Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/mips/kernel/ftrace.c
49643 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Code for replacing ftrace calls with jumps.
4
*
5
* Copyright (C) 2007-2008 Steven Rostedt <[email protected]>
6
* Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
7
* Author: Wu Zhangjin <[email protected]>
8
*
9
* Thanks goes to Steven Rostedt for writing the original x86 version.
10
*/
11
12
#include <linux/uaccess.h>
13
#include <linux/init.h>
14
#include <linux/ftrace.h>
15
#include <linux/syscalls.h>
16
17
#include <asm/asm.h>
18
#include <asm/asm-offsets.h>
19
#include <asm/cacheflush.h>
20
#include <asm/syscall.h>
21
#include <asm/uasm.h>
22
#include <asm/unistd.h>
23
24
#include <asm-generic/sections.h>
25
26
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
27
#define MCOUNT_OFFSET_INSNS 5
28
#else
29
#define MCOUNT_OFFSET_INSNS 4
30
#endif
31
32
#ifdef CONFIG_DYNAMIC_FTRACE
33
34
/* Arch override because MIPS doesn't need to run this from stop_machine() */
35
void arch_ftrace_update_code(int command)
36
{
37
ftrace_modify_all_code(command);
38
}
39
40
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
41
#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
42
#define JUMP_RANGE_MASK ((1UL << 28) - 1)
43
44
#define INSN_NOP 0x00000000 /* nop */
45
#define INSN_JAL(addr) \
46
((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
47
48
static unsigned int insn_jal_ftrace_caller __read_mostly;
49
static unsigned int insn_la_mcount[2] __read_mostly;
50
static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
51
52
static inline void ftrace_dyn_arch_init_insns(void)
53
{
54
u32 *buf;
55
unsigned int v1;
56
57
/* If we are not in compat space, the number of generated
58
* instructions will exceed the maximum expected limit of 2.
59
* To prevent buffer overflow, we avoid generating them.
60
* insn_la_mcount will not be used later in ftrace_make_call.
61
*/
62
if (uasm_in_compat_space_p(MCOUNT_ADDR)) {
63
/* la v1, _mcount */
64
v1 = 3;
65
buf = (u32 *)&insn_la_mcount[0];
66
UASM_i_LA(&buf, v1, MCOUNT_ADDR);
67
} else {
68
pr_warn("ftrace: mcount address beyond 32 bits is not supported (%lX)\n",
69
MCOUNT_ADDR);
70
}
71
72
/* jal (ftrace_caller + 8), jump over the first two instruction */
73
buf = (u32 *)&insn_jal_ftrace_caller;
74
uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
75
76
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
77
/* j ftrace_graph_caller */
78
buf = (u32 *)&insn_j_ftrace_graph_caller;
79
uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
80
#endif
81
}
82
83
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
84
{
85
int faulted;
86
87
/* *(unsigned int *)ip = new_code; */
88
safe_store_code(new_code, ip, faulted);
89
90
if (unlikely(faulted))
91
return -EFAULT;
92
93
flush_icache_range(ip, ip + 8);
94
95
return 0;
96
}
97
98
#ifndef CONFIG_64BIT
99
static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
100
unsigned int new_code2)
101
{
102
int faulted;
103
104
safe_store_code(new_code1, ip, faulted);
105
if (unlikely(faulted))
106
return -EFAULT;
107
108
ip += 4;
109
safe_store_code(new_code2, ip, faulted);
110
if (unlikely(faulted))
111
return -EFAULT;
112
113
ip -= 4;
114
flush_icache_range(ip, ip + 8);
115
116
return 0;
117
}
118
119
static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
120
unsigned int new_code2)
121
{
122
int faulted;
123
124
ip += 4;
125
safe_store_code(new_code2, ip, faulted);
126
if (unlikely(faulted))
127
return -EFAULT;
128
129
ip -= 4;
130
safe_store_code(new_code1, ip, faulted);
131
if (unlikely(faulted))
132
return -EFAULT;
133
134
flush_icache_range(ip, ip + 8);
135
136
return 0;
137
}
138
#endif
139
140
/*
141
* The details about the calling site of mcount on MIPS
142
*
143
* 1. For kernel:
144
*
145
* move at, ra
146
* jal _mcount --> nop
147
* sub sp, sp, 8 --> nop (CONFIG_32BIT)
148
*
149
* 2. For modules:
150
*
151
* 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
152
*
153
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
154
* addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
155
* move at, ra
156
* move $12, ra_address
157
* jalr v1
158
* sub sp, sp, 8
159
* 1: offset = 5 instructions
160
* 2.2 For the Other situations
161
*
162
* lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
163
* addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
164
* move at, ra
165
* jalr v1
166
* nop | move $12, ra_address | sub sp, sp, 8
167
* 1: offset = 4 instructions
168
*/
169
170
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
171
172
int ftrace_make_nop(struct module *mod,
173
struct dyn_ftrace *rec, unsigned long addr)
174
{
175
unsigned int new;
176
unsigned long ip = rec->ip;
177
178
/*
179
* If ip is in kernel space, no long call, otherwise, long call is
180
* needed.
181
*/
182
new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
183
#ifdef CONFIG_64BIT
184
return ftrace_modify_code(ip, new);
185
#else
186
/*
187
* On 32 bit MIPS platforms, gcc adds a stack adjust
188
* instruction in the delay slot after the branch to
189
* mcount and expects mcount to restore the sp on return.
190
* This is based on a legacy API and does nothing but
191
* waste instructions so it's being removed at runtime.
192
*/
193
return ftrace_modify_code_2(ip, new, INSN_NOP);
194
#endif
195
}
196
197
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
198
{
199
unsigned int new;
200
unsigned long ip = rec->ip;
201
202
/* When the code to patch does not belong to the kernel code
203
* space, we must use insn_la_mcount. However, if MCOUNT_ADDR
204
* is not in compat space, insn_la_mcount is not usable.
205
*/
206
if (!core_kernel_text(ip) && !uasm_in_compat_space_p(MCOUNT_ADDR))
207
return -EFAULT;
208
209
new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
210
211
#ifdef CONFIG_64BIT
212
return ftrace_modify_code(ip, new);
213
#else
214
return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
215
INSN_NOP : insn_la_mcount[1]);
216
#endif
217
}
218
219
#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
220
221
int ftrace_update_ftrace_func(ftrace_func_t func)
222
{
223
unsigned int new;
224
225
new = INSN_JAL((unsigned long)func);
226
227
return ftrace_modify_code(FTRACE_CALL_IP, new);
228
}
229
230
int __init ftrace_dyn_arch_init(void)
231
{
232
/* Encode the instructions when booting */
233
ftrace_dyn_arch_init_insns();
234
235
/* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
236
ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
237
238
return 0;
239
}
240
#endif /* CONFIG_DYNAMIC_FTRACE */
241
242
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
243
244
#ifdef CONFIG_DYNAMIC_FTRACE
245
246
extern void ftrace_graph_call(void);
247
#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
248
249
int ftrace_enable_ftrace_graph_caller(void)
250
{
251
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
252
insn_j_ftrace_graph_caller);
253
}
254
255
int ftrace_disable_ftrace_graph_caller(void)
256
{
257
return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
258
}
259
260
#endif /* CONFIG_DYNAMIC_FTRACE */
261
262
#ifndef KBUILD_MCOUNT_RA_ADDRESS
263
264
#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
265
#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
266
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
267
268
static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
269
old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
270
{
271
unsigned long sp, ip, tmp;
272
unsigned int code;
273
int faulted;
274
275
/*
276
* For module, move the ip from the return address after the
277
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
278
* kernel, move after the instruction "move ra, at"(offset is 16)
279
*/
280
ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
281
282
/*
283
* search the text until finding the non-store instruction or "s{d,w}
284
* ra, offset(sp)" instruction
285
*/
286
do {
287
/* get the code at "ip": code = *(unsigned int *)ip; */
288
safe_load_code(code, ip, faulted);
289
290
if (unlikely(faulted))
291
return 0;
292
/*
293
* If we hit the non-store instruction before finding where the
294
* ra is stored, then this is a leaf function and it does not
295
* store the ra on the stack
296
*/
297
if ((code & S_R_SP) != S_R_SP)
298
return parent_ra_addr;
299
300
/* Move to the next instruction */
301
ip -= 4;
302
} while ((code & S_RA_SP) != S_RA_SP);
303
304
sp = fp + (code & OFFSET_MASK);
305
306
/* tmp = *(unsigned long *)sp; */
307
safe_load_stack(tmp, sp, faulted);
308
if (unlikely(faulted))
309
return 0;
310
311
if (tmp == old_parent_ra)
312
return sp;
313
return 0;
314
}
315
316
#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
317
318
/*
319
* Hook the return address and push it in the stack of return addrs
320
* in current thread info.
321
*/
322
void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
323
unsigned long fp)
324
{
325
unsigned long old_parent_ra;
326
unsigned long return_hooker = (unsigned long)
327
&return_to_handler;
328
int faulted, insns;
329
330
if (unlikely(ftrace_graph_is_dead()))
331
return;
332
333
if (unlikely(atomic_read(&current->tracing_graph_pause)))
334
return;
335
336
/*
337
* "parent_ra_addr" is the stack address where the return address of
338
* the caller of _mcount is saved.
339
*
340
* If gcc < 4.5, a leaf function does not save the return address
341
* in the stack address, so we "emulate" one in _mcount's stack space,
342
* and hijack it directly.
343
* For a non-leaf function, it does save the return address to its own
344
* stack space, so we can not hijack it directly, but need to find the
345
* real stack address, which is done by ftrace_get_parent_addr().
346
*
347
* If gcc >= 4.5, with the new -mmcount-ra-address option, for a
348
* non-leaf function, the location of the return address will be saved
349
* to $12 for us.
350
* For a leaf function, it just puts a zero into $12, so we handle
351
* it in ftrace_graph_caller() of mcount.S.
352
*/
353
354
/* old_parent_ra = *parent_ra_addr; */
355
safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
356
if (unlikely(faulted))
357
goto out;
358
#ifndef KBUILD_MCOUNT_RA_ADDRESS
359
parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
360
old_parent_ra, (unsigned long)parent_ra_addr, fp);
361
/*
362
* If fails when getting the stack address of the non-leaf function's
363
* ra, stop function graph tracer and return
364
*/
365
if (parent_ra_addr == NULL)
366
goto out;
367
#endif
368
/* *parent_ra_addr = return_hooker; */
369
safe_store_stack(return_hooker, parent_ra_addr, faulted);
370
if (unlikely(faulted))
371
goto out;
372
373
/*
374
* Get the recorded ip of the current mcount calling site in the
375
* __mcount_loc section, which will be used to filter the function
376
* entries configured through the tracing/set_graph_function interface.
377
*/
378
379
insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
380
self_ra -= (MCOUNT_INSN_SIZE * insns);
381
382
if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
383
*parent_ra_addr = old_parent_ra;
384
return;
385
out:
386
ftrace_graph_stop();
387
WARN_ON(1);
388
}
389
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
390
391
#ifdef CONFIG_FTRACE_SYSCALLS
392
393
#ifdef CONFIG_32BIT
394
unsigned long __init arch_syscall_addr(int nr)
395
{
396
return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
397
}
398
#endif
399
400
#ifdef CONFIG_64BIT
401
402
unsigned long __init arch_syscall_addr(int nr)
403
{
404
#ifdef CONFIG_MIPS32_N32
405
if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
406
return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
407
#endif
408
if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
409
return (unsigned long)sys_call_table[nr - __NR_64_Linux];
410
#ifdef CONFIG_MIPS32_O32
411
if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
412
return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
413
#endif
414
415
return (unsigned long) &sys_ni_syscall;
416
}
417
#endif
418
419
#endif /* CONFIG_FTRACE_SYSCALLS */
420
421