Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kernel/ftrace_64.S
50719 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* Copyright (C) 2014 Steven Rostedt, Red Hat Inc
4
*/
5
6
#include <linux/export.h>
7
#include <linux/cfi_types.h>
8
#include <linux/linkage.h>
9
#include <asm/asm-offsets.h>
10
#include <asm/ptrace.h>
11
#include <asm/ftrace.h>
12
#include <asm/nospec-branch.h>
13
#include <asm/unwind_hints.h>
14
#include <asm/frame.h>
15
16
.code64
17
.section .text, "ax"
18
19
#ifdef CONFIG_FRAME_POINTER
20
/* Save parent and function stack frames (rip and rbp) */
21
# define MCOUNT_FRAME_SIZE (8+16*2)
22
#else
23
/* No need to save a stack frame */
24
# define MCOUNT_FRAME_SIZE 0
25
#endif /* CONFIG_FRAME_POINTER */
26
27
/* Size of stack used to save mcount regs in save_mcount_regs */
28
#define MCOUNT_REG_SIZE (FRAME_SIZE + MCOUNT_FRAME_SIZE)
29
30
/*
31
* gcc -pg option adds a call to 'mcount' in most functions.
32
* When -mfentry is used, the call is to 'fentry' and not 'mcount'
33
* and is done before the function's stack frame is set up.
34
* They both require a set of regs to be saved before calling
35
* any C code and restored before returning back to the function.
36
*
37
* On boot up, all these calls are converted into nops. When tracing
38
* is enabled, the call can jump to either ftrace_caller or
39
* ftrace_regs_caller. Callbacks (tracing functions) that require
40
* ftrace_regs_caller (like kprobes) need to have pt_regs passed to
41
* it. For this reason, the size of the pt_regs structure will be
42
* allocated on the stack and the required mcount registers will
43
* be saved in the locations that pt_regs has them in.
44
*/
45
46
/*
47
* @added: the amount of stack added before calling this
48
*
49
* After this is called, the following registers contain:
50
*
51
* %rdi - holds the address that called the trampoline
52
* %rsi - holds the parent function (traced function's return address)
53
* %rdx - holds the original %rbp
54
*/
55
.macro save_mcount_regs added=0
56
57
#ifdef CONFIG_FRAME_POINTER
58
/* Save the original rbp */
59
pushq %rbp
60
61
/*
62
* Stack traces will stop at the ftrace trampoline if the frame pointer
63
* is not set up properly. If fentry is used, we need to save a frame
64
* pointer for the parent as well as the function traced, because the
65
* fentry is called before the stack frame is set up, where as mcount
66
* is called afterward.
67
*/
68
69
/* Save the parent pointer (skip orig rbp and our return address) */
70
pushq \added+8*2(%rsp)
71
pushq %rbp
72
movq %rsp, %rbp
73
/* Save the return address (now skip orig rbp, rbp and parent) */
74
pushq \added+8*3(%rsp)
75
pushq %rbp
76
movq %rsp, %rbp
77
#endif /* CONFIG_FRAME_POINTER */
78
79
/*
80
* We add enough stack to save all regs.
81
*/
82
subq $(FRAME_SIZE), %rsp
83
movq %rax, RAX(%rsp)
84
movq %rcx, RCX(%rsp)
85
movq %rdx, RDX(%rsp)
86
movq %rsi, RSI(%rsp)
87
movq %rdi, RDI(%rsp)
88
movq %r8, R8(%rsp)
89
movq %r9, R9(%rsp)
90
movq $0, ORIG_RAX(%rsp)
91
/*
92
* Save the original RBP. Even though the mcount ABI does not
93
* require this, it helps out callers.
94
*/
95
#ifdef CONFIG_FRAME_POINTER
96
movq MCOUNT_REG_SIZE-8(%rsp), %rdx
97
#else
98
movq %rbp, %rdx
99
#endif
100
movq %rdx, RBP(%rsp)
101
102
/* Copy the parent address into %rsi (second parameter) */
103
movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
104
105
/* Move RIP to its proper location */
106
movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
107
movq %rdi, RIP(%rsp)
108
109
/*
110
* Now %rdi (the first parameter) has the return address of
111
* where ftrace_call returns. But the callbacks expect the
112
* address of the call itself.
113
*/
114
subq $MCOUNT_INSN_SIZE, %rdi
115
.endm
116
117
.macro restore_mcount_regs save=0
118
119
/* ftrace_regs_caller or frame pointers require this */
120
movq RBP(%rsp), %rbp
121
122
movq R9(%rsp), %r9
123
movq R8(%rsp), %r8
124
movq RDI(%rsp), %rdi
125
movq RSI(%rsp), %rsi
126
movq RDX(%rsp), %rdx
127
movq RCX(%rsp), %rcx
128
movq RAX(%rsp), %rax
129
130
addq $MCOUNT_REG_SIZE-\save, %rsp
131
132
.endm
133
134
SYM_TYPED_FUNC_START(ftrace_stub)
135
CALL_DEPTH_ACCOUNT
136
RET
137
SYM_FUNC_END(ftrace_stub)
138
139
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
140
SYM_TYPED_FUNC_START(ftrace_stub_graph)
141
CALL_DEPTH_ACCOUNT
142
RET
143
SYM_FUNC_END(ftrace_stub_graph)
144
#endif
145
146
#ifdef CONFIG_DYNAMIC_FTRACE
147
148
SYM_FUNC_START(__fentry__)
149
ANNOTATE_NOENDBR
150
CALL_DEPTH_ACCOUNT
151
RET
152
SYM_FUNC_END(__fentry__)
153
EXPORT_SYMBOL(__fentry__)
154
155
SYM_FUNC_START(ftrace_caller)
156
ANNOTATE_NOENDBR
157
/* save_mcount_regs fills in first two parameters */
158
save_mcount_regs
159
160
CALL_DEPTH_ACCOUNT
161
162
/* Stack - skipping return address of ftrace_caller */
163
leaq MCOUNT_REG_SIZE+8(%rsp), %rcx
164
movq %rcx, RSP(%rsp)
165
166
SYM_INNER_LABEL(ftrace_caller_op_ptr, SYM_L_GLOBAL)
167
ANNOTATE_NOENDBR
168
/* Load the ftrace_ops into the 3rd parameter */
169
movq function_trace_op(%rip), %rdx
170
171
/* regs go into 4th parameter */
172
leaq (%rsp), %rcx
173
174
/* Only ops with REGS flag set should have CS register set */
175
movq $0, CS(%rsp)
176
177
/* Account for the function call below */
178
CALL_DEPTH_ACCOUNT
179
180
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
181
ANNOTATE_NOENDBR
182
call ftrace_stub
183
184
/* Handlers can change the RIP */
185
movq RIP(%rsp), %rax
186
movq %rax, MCOUNT_REG_SIZE(%rsp)
187
188
restore_mcount_regs
189
190
/*
191
* The code up to this label is copied into trampolines so
192
* think twice before adding any new code or changing the
193
* layout here.
194
*/
195
SYM_INNER_LABEL(ftrace_caller_end, SYM_L_GLOBAL)
196
ANNOTATE_NOENDBR
197
RET
198
SYM_FUNC_END(ftrace_caller);
199
STACK_FRAME_NON_STANDARD_FP(ftrace_caller)
200
201
SYM_FUNC_START(ftrace_regs_caller)
202
ANNOTATE_NOENDBR
203
/* Save the current flags before any operations that can change them */
204
pushfq
205
206
/* added 8 bytes to save flags */
207
save_mcount_regs 8
208
/* save_mcount_regs fills in first two parameters */
209
210
CALL_DEPTH_ACCOUNT
211
212
SYM_INNER_LABEL(ftrace_regs_caller_op_ptr, SYM_L_GLOBAL)
213
ANNOTATE_NOENDBR
214
/* Load the ftrace_ops into the 3rd parameter */
215
movq function_trace_op(%rip), %rdx
216
217
/* Save the rest of pt_regs */
218
movq %r15, R15(%rsp)
219
movq %r14, R14(%rsp)
220
movq %r13, R13(%rsp)
221
movq %r12, R12(%rsp)
222
movq %r11, R11(%rsp)
223
movq %r10, R10(%rsp)
224
movq %rbx, RBX(%rsp)
225
/* Copy saved flags */
226
movq MCOUNT_REG_SIZE(%rsp), %rcx
227
movq %rcx, EFLAGS(%rsp)
228
/* Kernel segments */
229
movq $__KERNEL_DS, %rcx
230
movq %rcx, SS(%rsp)
231
movq $__KERNEL_CS, %rcx
232
movq %rcx, CS(%rsp)
233
/* Stack - skipping return address and flags */
234
leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
235
movq %rcx, RSP(%rsp)
236
237
ENCODE_FRAME_POINTER
238
239
/* regs go into 4th parameter */
240
leaq (%rsp), %rcx
241
242
/* Account for the function call below */
243
CALL_DEPTH_ACCOUNT
244
245
SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
246
ANNOTATE_NOENDBR
247
call ftrace_stub
248
249
/* Copy flags back to SS, to restore them */
250
movq EFLAGS(%rsp), %rax
251
movq %rax, MCOUNT_REG_SIZE(%rsp)
252
253
/* Handlers can change the RIP */
254
movq RIP(%rsp), %rax
255
movq %rax, MCOUNT_REG_SIZE+8(%rsp)
256
257
/* restore the rest of pt_regs */
258
movq R15(%rsp), %r15
259
movq R14(%rsp), %r14
260
movq R13(%rsp), %r13
261
movq R12(%rsp), %r12
262
movq R10(%rsp), %r10
263
movq RBX(%rsp), %rbx
264
265
movq ORIG_RAX(%rsp), %rax
266
movq %rax, MCOUNT_REG_SIZE-8(%rsp)
267
268
/*
269
* If ORIG_RAX is anything but zero, make this a call to that.
270
* See arch_ftrace_set_direct_caller().
271
*/
272
testq %rax, %rax
273
SYM_INNER_LABEL(ftrace_regs_caller_jmp, SYM_L_GLOBAL)
274
ANNOTATE_NOENDBR
275
jnz 1f
276
277
restore_mcount_regs
278
/* Restore flags */
279
popfq
280
281
/*
282
* The trampoline will add the return.
283
*/
284
SYM_INNER_LABEL(ftrace_regs_caller_end, SYM_L_GLOBAL)
285
ANNOTATE_NOENDBR
286
RET
287
288
1:
289
testb $1, %al
290
jz 2f
291
andq $0xfffffffffffffffe, %rax
292
movq %rax, MCOUNT_REG_SIZE+8(%rsp)
293
restore_mcount_regs
294
/* Restore flags */
295
popfq
296
RET
297
298
/* Swap the flags with orig_rax */
299
2: movq MCOUNT_REG_SIZE(%rsp), %rdi
300
movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
301
movq %rax, MCOUNT_REG_SIZE(%rsp)
302
303
restore_mcount_regs 8
304
/* Restore flags */
305
popfq
306
UNWIND_HINT_FUNC
307
308
/*
309
* The above left an extra return value on the stack; effectively
310
* doing a tail-call without using a register. This PUSH;RET
311
* pattern unbalances the RSB, inject a pointless CALL to rebalance.
312
*/
313
ANNOTATE_INTRA_FUNCTION_CALL
314
CALL .Ldo_rebalance
315
int3
316
.Ldo_rebalance:
317
add $8, %rsp
318
ALTERNATIVE __stringify(RET), \
319
__stringify(ANNOTATE_UNRET_SAFE; ret; int3), \
320
X86_FEATURE_CALL_DEPTH
321
322
SYM_FUNC_END(ftrace_regs_caller)
323
STACK_FRAME_NON_STANDARD_FP(ftrace_regs_caller)
324
325
SYM_FUNC_START(ftrace_stub_direct_tramp)
326
ANNOTATE_NOENDBR
327
CALL_DEPTH_ACCOUNT
328
RET
329
SYM_FUNC_END(ftrace_stub_direct_tramp)
330
331
#else /* ! CONFIG_DYNAMIC_FTRACE */
332
333
SYM_FUNC_START(__fentry__)
334
ANNOTATE_NOENDBR
335
CALL_DEPTH_ACCOUNT
336
337
cmpq $ftrace_stub, ftrace_trace_function
338
jnz trace
339
RET
340
341
trace:
342
/* save_mcount_regs fills in first two parameters */
343
save_mcount_regs
344
345
/*
346
* When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
347
* set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
348
* ip and parent ip are used and the list function is called when
349
* function tracing is enabled.
350
*/
351
movq ftrace_trace_function, %r8
352
CALL_NOSPEC r8
353
restore_mcount_regs
354
355
jmp ftrace_stub
356
SYM_FUNC_END(__fentry__)
357
EXPORT_SYMBOL(__fentry__)
358
STACK_FRAME_NON_STANDARD_FP(__fentry__)
359
360
#endif /* CONFIG_DYNAMIC_FTRACE */
361
362
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
363
SYM_CODE_START(return_to_handler)
364
UNWIND_HINT_UNDEFINED
365
ANNOTATE_NOENDBR
366
367
/* Store original rsp for pt_regs.sp value. */
368
movq %rsp, %rdi
369
370
/* Restore return_to_handler value that got eaten by previous ret instruction. */
371
subq $8, %rsp
372
UNWIND_HINT_FUNC
373
374
/* Save ftrace_regs for function exit context */
375
subq $(FRAME_SIZE), %rsp
376
377
movq %rax, RAX(%rsp)
378
movq %rdx, RDX(%rsp)
379
movq %rbp, RBP(%rsp)
380
movq %rdi, RSP(%rsp)
381
movq %rsp, %rdi
382
383
call ftrace_return_to_handler
384
385
movq %rax, %rdi
386
movq RDX(%rsp), %rdx
387
movq RAX(%rsp), %rax
388
389
addq $(FRAME_SIZE) + 8, %rsp
390
391
/*
392
* Jump back to the old return address. This cannot be JMP_NOSPEC rdi
393
* since IBT would demand that contain ENDBR, which simply isn't so for
394
* return addresses. Use a retpoline here to keep the RSB balanced.
395
*/
396
ANNOTATE_INTRA_FUNCTION_CALL
397
call .Ldo_rop
398
int3
399
.Ldo_rop:
400
mov %rdi, (%rsp)
401
ALTERNATIVE __stringify(RET), \
402
__stringify(ANNOTATE_UNRET_SAFE; ret; int3), \
403
X86_FEATURE_CALL_DEPTH
404
SYM_CODE_END(return_to_handler)
405
#endif
406
407