Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/kernel/ftrace_dyn.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Based on arch/arm64/kernel/ftrace.c
4
*
5
* Copyright (C) 2022 Loongson Technology Corporation Limited
6
*/
7
8
#include <linux/ftrace.h>
9
#include <linux/kprobes.h>
10
#include <linux/uaccess.h>
11
12
#include <asm/inst.h>
13
#include <asm/module.h>
14
15
static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
16
{
17
u32 replaced;
18
19
if (validate) {
20
if (larch_insn_read((void *)pc, &replaced))
21
return -EFAULT;
22
23
if (replaced != old)
24
return -EINVAL;
25
}
26
27
if (larch_insn_patch_text((void *)pc, new))
28
return -EPERM;
29
30
return 0;
31
}
32
33
#ifdef CONFIG_MODULES
34
static bool reachable_by_bl(unsigned long addr, unsigned long pc)
35
{
36
long offset = (long)addr - (long)pc;
37
38
return offset >= -SZ_128M && offset < SZ_128M;
39
}
40
41
static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
42
{
43
struct plt_entry *plt = mod->arch.ftrace_trampolines;
44
45
if (addr == FTRACE_ADDR)
46
return &plt[FTRACE_PLT_IDX];
47
if (addr == FTRACE_REGS_ADDR &&
48
IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
49
return &plt[FTRACE_REGS_PLT_IDX];
50
51
return NULL;
52
}
53
54
/*
55
* Find the address the callsite must branch to in order to reach '*addr'.
56
*
57
* Due to the limited range of 'bl' instruction, modules may be placed too far
58
* away to branch directly and we must use a PLT.
59
*
60
* Returns true when '*addr' contains a reachable target address, or has been
61
* modified to contain a PLT address. Returns false otherwise.
62
*/
63
static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
64
{
65
unsigned long pc = rec->ip + LOONGARCH_INSN_SIZE;
66
struct plt_entry *plt;
67
68
/*
69
* If a custom trampoline is unreachable, rely on the ftrace_regs_caller
70
* trampoline which knows how to indirectly reach that trampoline through
71
* ops->direct_call.
72
*/
73
if (*addr != FTRACE_ADDR && *addr != FTRACE_REGS_ADDR && !reachable_by_bl(*addr, pc))
74
*addr = FTRACE_REGS_ADDR;
75
76
/*
77
* When the target is within range of the 'bl' instruction, use 'addr'
78
* as-is and branch to that directly.
79
*/
80
if (reachable_by_bl(*addr, pc))
81
return true;
82
83
/*
84
* 'mod' is only set at module load time, but if we end up
85
* dealing with an out-of-range condition, we can assume it
86
* is due to a module being loaded far away from the kernel.
87
*
88
* NOTE: __module_text_address() must be called within a RCU read
89
* section, but we can rely on ftrace_lock to ensure that 'mod'
90
* retains its validity throughout the remainder of this code.
91
*/
92
if (!mod) {
93
scoped_guard(rcu)
94
mod = __module_text_address(pc);
95
}
96
97
if (WARN_ON(!mod))
98
return false;
99
100
plt = get_ftrace_plt(mod, *addr);
101
if (!plt) {
102
pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
103
return false;
104
}
105
106
*addr = (unsigned long)plt;
107
return true;
108
}
109
#else /* !CONFIG_MODULES */
110
static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
111
{
112
return true;
113
}
114
#endif
115
116
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
117
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
118
{
119
u32 old, new;
120
unsigned long pc;
121
122
pc = rec->ip + LOONGARCH_INSN_SIZE;
123
124
if (!ftrace_find_callable_addr(rec, NULL, &addr))
125
return -EINVAL;
126
127
if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
128
return -EINVAL;
129
130
new = larch_insn_gen_bl(pc, addr);
131
old = larch_insn_gen_bl(pc, old_addr);
132
133
return ftrace_modify_code(pc, old, new, true);
134
}
135
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
136
137
int ftrace_update_ftrace_func(ftrace_func_t func)
138
{
139
u32 new;
140
unsigned long pc;
141
142
pc = (unsigned long)&ftrace_call;
143
new = larch_insn_gen_bl(pc, (unsigned long)func);
144
145
return ftrace_modify_code(pc, 0, new, false);
146
}
147
148
/*
149
* The compiler has inserted 2 NOPs before the regular function prologue.
150
* T series registers are available and safe because of LoongArch's psABI.
151
*
152
* At runtime, we can replace nop with bl to enable ftrace call and replace bl
153
* with nop to disable ftrace call. The bl requires us to save the original RA
154
* value, so it saves RA at t0 here.
155
*
156
* Details are:
157
*
158
* | Compiled | Disabled | Enabled |
159
* +------------+------------------------+------------------------+
160
* | nop | move t0, ra | move t0, ra |
161
* | nop | nop | bl ftrace_caller |
162
* | func_body | func_body | func_body |
163
*
164
* The RA value will be recovered by ftrace_regs_entry, and restored into RA
165
* before returning to the regular function prologue. When a function is not
166
* being traced, the "move t0, ra" is not harmful.
167
*/
168
169
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
170
{
171
u32 old, new;
172
unsigned long pc;
173
174
pc = rec->ip;
175
old = larch_insn_gen_nop();
176
new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
177
178
return ftrace_modify_code(pc, old, new, true);
179
}
180
181
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
182
{
183
u32 old, new;
184
unsigned long pc;
185
186
pc = rec->ip + LOONGARCH_INSN_SIZE;
187
188
if (!ftrace_find_callable_addr(rec, NULL, &addr))
189
return -EINVAL;
190
191
old = larch_insn_gen_nop();
192
new = larch_insn_gen_bl(pc, addr);
193
194
return ftrace_modify_code(pc, old, new, true);
195
}
196
197
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
198
{
199
u32 old, new;
200
unsigned long pc;
201
202
pc = rec->ip + LOONGARCH_INSN_SIZE;
203
204
if (!ftrace_find_callable_addr(rec, NULL, &addr))
205
return -EINVAL;
206
207
new = larch_insn_gen_nop();
208
old = larch_insn_gen_bl(pc, addr);
209
210
return ftrace_modify_code(pc, old, new, true);
211
}
212
213
void arch_ftrace_update_code(int command)
214
{
215
command |= FTRACE_MAY_SLEEP;
216
ftrace_modify_all_code(command);
217
}
218
219
int __init ftrace_dyn_arch_init(void)
220
{
221
return 0;
222
}
223
224
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
225
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
226
{
227
unsigned long old;
228
unsigned long return_hooker = (unsigned long)&return_to_handler;
229
230
if (unlikely(atomic_read(&current->tracing_graph_pause)))
231
return;
232
233
old = *parent;
234
235
if (!function_graph_enter(old, self_addr, 0, parent))
236
*parent = return_hooker;
237
}
238
239
#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
240
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
241
struct ftrace_ops *op, struct ftrace_regs *fregs)
242
{
243
struct pt_regs *regs = &arch_ftrace_regs(fregs)->regs;
244
unsigned long *parent = (unsigned long *)&regs->regs[1];
245
unsigned long return_hooker = (unsigned long)&return_to_handler;
246
unsigned long old;
247
248
if (unlikely(atomic_read(&current->tracing_graph_pause)))
249
return;
250
251
old = *parent;
252
253
if (!function_graph_enter_regs(old, ip, 0, parent, fregs))
254
*parent = return_hooker;
255
}
256
#else
257
static int ftrace_modify_graph_caller(bool enable)
258
{
259
u32 branch, nop;
260
unsigned long pc, func;
261
extern void ftrace_graph_call(void);
262
263
pc = (unsigned long)&ftrace_graph_call;
264
func = (unsigned long)&ftrace_graph_caller;
265
266
nop = larch_insn_gen_nop();
267
branch = larch_insn_gen_b(pc, func);
268
269
if (enable)
270
return ftrace_modify_code(pc, nop, branch, true);
271
else
272
return ftrace_modify_code(pc, branch, nop, true);
273
}
274
275
int ftrace_enable_ftrace_graph_caller(void)
276
{
277
return ftrace_modify_graph_caller(true);
278
}
279
280
int ftrace_disable_ftrace_graph_caller(void)
281
{
282
return ftrace_modify_graph_caller(false);
283
}
284
#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
285
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
286
287
#ifdef CONFIG_KPROBES_ON_FTRACE
288
/* Ftrace callback handler for kprobes -- called under preepmt disabled */
289
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
290
struct ftrace_ops *ops, struct ftrace_regs *fregs)
291
{
292
int bit;
293
struct pt_regs *regs;
294
struct kprobe *p;
295
struct kprobe_ctlblk *kcb;
296
297
if (unlikely(kprobe_ftrace_disabled))
298
return;
299
300
bit = ftrace_test_recursion_trylock(ip, parent_ip);
301
if (bit < 0)
302
return;
303
304
p = get_kprobe((kprobe_opcode_t *)ip);
305
if (unlikely(!p) || kprobe_disabled(p))
306
goto out;
307
308
regs = ftrace_get_regs(fregs);
309
if (!regs)
310
goto out;
311
312
kcb = get_kprobe_ctlblk();
313
if (kprobe_running()) {
314
kprobes_inc_nmissed_count(p);
315
} else {
316
unsigned long orig_ip = instruction_pointer(regs);
317
318
instruction_pointer_set(regs, ip);
319
320
__this_cpu_write(current_kprobe, p);
321
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
322
if (!p->pre_handler || !p->pre_handler(p, regs)) {
323
/*
324
* Emulate singlestep (and also recover regs->csr_era)
325
* as if there is a nop
326
*/
327
instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE);
328
if (unlikely(p->post_handler)) {
329
kcb->kprobe_status = KPROBE_HIT_SSDONE;
330
p->post_handler(p, regs, 0);
331
}
332
instruction_pointer_set(regs, orig_ip);
333
}
334
335
/*
336
* If pre_handler returns !0, it changes regs->csr_era. We have to
337
* skip emulating post_handler.
338
*/
339
__this_cpu_write(current_kprobe, NULL);
340
}
341
out:
342
ftrace_test_recursion_unlock(bit);
343
}
344
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
345
346
int arch_prepare_kprobe_ftrace(struct kprobe *p)
347
{
348
p->ainsn.insn = NULL;
349
return 0;
350
}
351
#endif /* CONFIG_KPROBES_ON_FTRACE */
352
353