Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kernel/entry-ftrace.S
26424 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* arch/arm64/kernel/entry-ftrace.S
4
*
5
* Copyright (C) 2013 Linaro Limited
6
* Author: AKASHI Takahiro <[email protected]>
7
*/
8
9
#include <linux/linkage.h>
10
#include <linux/cfi_types.h>
11
#include <asm/asm-offsets.h>
12
#include <asm/assembler.h>
13
#include <asm/ftrace.h>
14
#include <asm/insn.h>
15
16
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
17
/*
18
* Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
19
* the regular function prologue. For an enabled callsite, ftrace_init_nop() and
20
* ftrace_make_call() have patched those NOPs to:
21
*
22
* MOV X9, LR
23
* BL ftrace_caller
24
*
25
* Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
26
* live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
27
* clobber.
28
*
29
* We save the callsite's context into a struct ftrace_regs before invoking any
30
* ftrace callbacks. So that we can get a sensible backtrace, we create frame
31
* records for the callsite and the ftrace entry assembly. This is not
32
* sufficient for reliable stacktrace: until we create the callsite stack
33
* record, its caller is missing from the LR and existing chain of frame
34
* records.
35
*/
36
SYM_CODE_START(ftrace_caller)
37
bti c
38
39
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
40
/*
41
* The literal pointer to the ops is at an 8-byte aligned boundary
42
* which is either 12 or 16 bytes before the BL instruction in the call
43
* site. See ftrace_call_adjust() for details.
44
*
45
* Therefore here the LR points at `literal + 16` or `literal + 20`,
46
* and we can find the address of the literal in either case by
47
* aligning to an 8-byte boundary and subtracting 16. We do the
48
* alignment first as this allows us to fold the subtraction into the
49
* LDR.
50
*/
51
bic x11, x30, 0x7
52
ldr x11, [x11, #-(4 * AARCH64_INSN_SIZE)] // op
53
54
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
55
/*
56
* If the op has a direct call, handle it immediately without
57
* saving/restoring registers.
58
*/
59
ldr x17, [x11, #FTRACE_OPS_DIRECT_CALL] // op->direct_call
60
cbnz x17, ftrace_caller_direct
61
#endif
62
#endif
63
64
/* Save original SP */
65
mov x10, sp
66
67
/* Make room for ftrace regs, plus two frame records */
68
sub sp, sp, #(FREGS_SIZE + 32)
69
70
/* Save function arguments */
71
stp x0, x1, [sp, #FREGS_X0]
72
stp x2, x3, [sp, #FREGS_X2]
73
stp x4, x5, [sp, #FREGS_X4]
74
stp x6, x7, [sp, #FREGS_X6]
75
str x8, [sp, #FREGS_X8]
76
77
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
78
str xzr, [sp, #FREGS_DIRECT_TRAMP]
79
#endif
80
81
/* Save the callsite's FP, LR, SP */
82
str x29, [sp, #FREGS_FP]
83
str x9, [sp, #FREGS_LR]
84
str x10, [sp, #FREGS_SP]
85
86
/* Save the PC after the ftrace callsite */
87
str x30, [sp, #FREGS_PC]
88
89
/* Create a frame record for the callsite above the ftrace regs */
90
stp x29, x9, [sp, #FREGS_SIZE + 16]
91
add x29, sp, #FREGS_SIZE + 16
92
93
/* Create our frame record above the ftrace regs */
94
stp x29, x30, [sp, #FREGS_SIZE]
95
add x29, sp, #FREGS_SIZE
96
97
/* Prepare arguments for the the tracer func */
98
sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
99
mov x1, x9 // parent_ip (callsite's LR)
100
mov x3, sp // regs
101
102
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS
103
mov x2, x11 // op
104
ldr x4, [x2, #FTRACE_OPS_FUNC] // op->func
105
blr x4 // op->func(ip, parent_ip, op, regs)
106
107
#else
108
ldr_l x2, function_trace_op // op
109
110
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
111
bl ftrace_stub // func(ip, parent_ip, op, regs)
112
#endif
113
114
/*
115
* At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
116
* x19-x29 per the AAPCS, and we created frame records upon entry, so we need
117
* to restore x0-x8, x29, and x30.
118
*/
119
/* Restore function arguments */
120
ldp x0, x1, [sp, #FREGS_X0]
121
ldp x2, x3, [sp, #FREGS_X2]
122
ldp x4, x5, [sp, #FREGS_X4]
123
ldp x6, x7, [sp, #FREGS_X6]
124
ldr x8, [sp, #FREGS_X8]
125
126
/* Restore the callsite's FP */
127
ldr x29, [sp, #FREGS_FP]
128
129
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
130
ldr x17, [sp, #FREGS_DIRECT_TRAMP]
131
cbnz x17, ftrace_caller_direct_late
132
#endif
133
134
/* Restore the callsite's LR and PC */
135
ldr x30, [sp, #FREGS_LR]
136
ldr x9, [sp, #FREGS_PC]
137
138
/* Restore the callsite's SP */
139
add sp, sp, #FREGS_SIZE + 32
140
141
ret x9
142
143
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
144
SYM_INNER_LABEL(ftrace_caller_direct_late, SYM_L_LOCAL)
145
/*
146
* Head to a direct trampoline in x17 after having run other tracers.
147
* The ftrace_regs are live, and x0-x8 and FP have been restored. The
148
* LR, PC, and SP have not been restored.
149
*/
150
151
/*
152
* Restore the callsite's LR and PC matching the trampoline calling
153
* convention.
154
*/
155
ldr x9, [sp, #FREGS_LR]
156
ldr x30, [sp, #FREGS_PC]
157
158
/* Restore the callsite's SP */
159
add sp, sp, #FREGS_SIZE + 32
160
161
SYM_INNER_LABEL(ftrace_caller_direct, SYM_L_LOCAL)
162
/*
163
* Head to a direct trampoline in x17.
164
*
165
* We use `BR X17` as this can safely land on a `BTI C` or `PACIASP` in
166
* the trampoline, and will not unbalance any return stack.
167
*/
168
br x17
169
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
170
SYM_CODE_END(ftrace_caller)
171
172
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
173
SYM_CODE_START(ftrace_stub_direct_tramp)
174
bti c
175
mov x10, x30
176
mov x30, x9
177
ret x10
178
SYM_CODE_END(ftrace_stub_direct_tramp)
179
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
180
181
#else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
182
183
/*
184
* Gcc with -pg will put the following code in the beginning of each function:
185
* mov x0, x30
186
* bl _mcount
187
* [function's body ...]
188
* "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
189
* ftrace is enabled.
190
*
191
* Please note that x0 as an argument will not be used here because we can
192
* get lr(x30) of instrumented function at any time by winding up call stack
193
* as long as the kernel is compiled without -fomit-frame-pointer.
194
* (or CONFIG_FRAME_POINTER, this is forced on arm64)
195
*
196
* stack layout after mcount_enter in _mcount():
197
*
198
* current sp/fp => 0:+-----+
199
* in _mcount() | x29 | -> instrumented function's fp
200
* +-----+
201
* | x30 | -> _mcount()'s lr (= instrumented function's pc)
202
* old sp => +16:+-----+
203
* when instrumented | |
204
* function calls | ... |
205
* _mcount() | |
206
* | |
207
* instrumented => +xx:+-----+
208
* function's fp | x29 | -> parent's fp
209
* +-----+
210
* | x30 | -> instrumented function's lr (= parent's pc)
211
* +-----+
212
* | ... |
213
*/
214
215
.macro mcount_enter
216
stp x29, x30, [sp, #-16]!
217
mov x29, sp
218
.endm
219
220
.macro mcount_exit
221
ldp x29, x30, [sp], #16
222
ret
223
.endm
224
225
.macro mcount_adjust_addr rd, rn
226
sub \rd, \rn, #AARCH64_INSN_SIZE
227
.endm
228
229
/* for instrumented function's parent */
230
.macro mcount_get_parent_fp reg
231
ldr \reg, [x29]
232
ldr \reg, [\reg]
233
.endm
234
235
/* for instrumented function */
236
.macro mcount_get_pc0 reg
237
mcount_adjust_addr \reg, x30
238
.endm
239
240
.macro mcount_get_pc reg
241
ldr \reg, [x29, #8]
242
mcount_adjust_addr \reg, \reg
243
.endm
244
245
.macro mcount_get_lr reg
246
ldr \reg, [x29]
247
ldr \reg, [\reg, #8]
248
.endm
249
250
.macro mcount_get_lr_addr reg
251
ldr \reg, [x29]
252
add \reg, \reg, #8
253
.endm
254
255
/*
256
* _mcount() is used to build the kernel with -pg option, but all the branch
257
* instructions to _mcount() are replaced to NOP initially at kernel start up,
258
* and later on, NOP to branch to ftrace_caller() when enabled or branch to
259
* NOP when disabled per-function base.
260
*/
261
SYM_FUNC_START(_mcount)
262
ret
263
SYM_FUNC_END(_mcount)
264
EXPORT_SYMBOL(_mcount)
265
NOKPROBE(_mcount)
266
267
/*
268
* void ftrace_caller(unsigned long return_address)
269
* @return_address: return address to instrumented function
270
*
271
* This function is a counterpart of _mcount() in 'static' ftrace, and
272
* makes calls to:
273
* - tracer function to probe instrumented function's entry,
274
* - ftrace_graph_caller to set up an exit hook
275
*/
276
SYM_FUNC_START(ftrace_caller)
277
mcount_enter
278
279
mcount_get_pc0 x0 // function's pc
280
mcount_get_lr x1 // function's lr
281
282
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
283
nop // This will be replaced with "bl xxx"
284
// where xxx can be any kind of tracer.
285
286
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
287
SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
288
nop // If enabled, this will be replaced
289
// "b ftrace_graph_caller"
290
#endif
291
292
mcount_exit
293
SYM_FUNC_END(ftrace_caller)
294
295
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
296
/*
297
* void ftrace_graph_caller(void)
298
*
299
* Called from _mcount() or ftrace_caller() when function_graph tracer is
300
* selected.
301
* This function w/ prepare_ftrace_return() fakes link register's value on
302
* the call stack in order to intercept instrumented function's return path
303
* and run return_to_handler() later on its exit.
304
*/
305
SYM_FUNC_START(ftrace_graph_caller)
306
mcount_get_pc x0 // function's pc
307
mcount_get_lr_addr x1 // pointer to function's saved lr
308
mcount_get_parent_fp x2 // parent's fp
309
bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
310
311
mcount_exit
312
SYM_FUNC_END(ftrace_graph_caller)
313
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
314
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
315
316
SYM_TYPED_FUNC_START(ftrace_stub)
317
ret
318
SYM_FUNC_END(ftrace_stub)
319
320
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
321
SYM_TYPED_FUNC_START(ftrace_stub_graph)
322
ret
323
SYM_FUNC_END(ftrace_stub_graph)
324
325
/*
326
* void return_to_handler(void)
327
*
328
* Run ftrace_return_to_handler() before going back to parent.
329
* @fp is checked against the value passed by ftrace_graph_caller().
330
*/
331
SYM_CODE_START(return_to_handler)
332
/* Make room for ftrace_regs */
333
sub sp, sp, #FREGS_SIZE
334
335
/* Save return value regs */
336
stp x0, x1, [sp, #FREGS_X0]
337
stp x2, x3, [sp, #FREGS_X2]
338
stp x4, x5, [sp, #FREGS_X4]
339
stp x6, x7, [sp, #FREGS_X6]
340
341
/* Save the callsite's FP */
342
str x29, [sp, #FREGS_FP]
343
344
mov x0, sp
345
bl ftrace_return_to_handler // addr = ftrace_return_to_hander(fregs);
346
mov x30, x0 // restore the original return address
347
348
/* Restore return value regs */
349
ldp x0, x1, [sp, #FREGS_X0]
350
ldp x2, x3, [sp, #FREGS_X2]
351
ldp x4, x5, [sp, #FREGS_X4]
352
ldp x6, x7, [sp, #FREGS_X6]
353
add sp, sp, #FREGS_SIZE
354
355
ret
356
SYM_CODE_END(return_to_handler)
357
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
358
359