Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sh/kernel/kprobes.c
26444 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Kernel probes (kprobes) for SuperH
4
*
5
* Copyright (C) 2007 Chris Smith <[email protected]>
6
* Copyright (C) 2006 Lineo Solutions, Inc.
7
*/
8
#include <linux/kprobes.h>
9
#include <linux/extable.h>
10
#include <linux/ptrace.h>
11
#include <linux/preempt.h>
12
#include <linux/kdebug.h>
13
#include <linux/slab.h>
14
#include <asm/cacheflush.h>
15
#include <linux/uaccess.h>
16
17
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
18
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
19
20
static DEFINE_PER_CPU(struct kprobe, saved_current_opcode);
21
static DEFINE_PER_CPU(struct kprobe, saved_next_opcode);
22
static DEFINE_PER_CPU(struct kprobe, saved_next_opcode2);
23
24
#define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b)
25
#define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b)
26
#define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000)
27
#define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023)
28
#define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000)
29
#define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003)
30
31
#define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00)
32
#define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00)
33
34
#define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00)
35
#define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900)
36
37
#define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b)
38
#define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b)
39
40
int __kprobes arch_prepare_kprobe(struct kprobe *p)
41
{
42
kprobe_opcode_t opcode = *p->addr;
43
44
if (OPCODE_RTE(opcode))
45
return -EFAULT; /* Bad breakpoint */
46
47
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
48
p->opcode = opcode;
49
50
return 0;
51
}
52
53
void __kprobes arch_arm_kprobe(struct kprobe *p)
54
{
55
*p->addr = BREAKPOINT_INSTRUCTION;
56
flush_icache_range((unsigned long)p->addr,
57
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
58
}
59
60
void __kprobes arch_disarm_kprobe(struct kprobe *p)
61
{
62
*p->addr = p->opcode;
63
flush_icache_range((unsigned long)p->addr,
64
(unsigned long)p->addr + sizeof(kprobe_opcode_t));
65
}
66
67
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
68
{
69
if (*p->addr == BREAKPOINT_INSTRUCTION)
70
return 1;
71
72
return 0;
73
}
74
75
/**
76
* If an illegal slot instruction exception occurs for an address
77
* containing a kprobe, remove the probe.
78
*
79
* Returns 0 if the exception was handled successfully, 1 otherwise.
80
*/
81
int __kprobes kprobe_handle_illslot(unsigned long pc)
82
{
83
struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1);
84
85
if (p != NULL) {
86
printk("Warning: removing kprobe from delay slot: 0x%.8x\n",
87
(unsigned int)pc + 2);
88
unregister_kprobe(p);
89
return 0;
90
}
91
92
return 1;
93
}
94
95
void __kprobes arch_remove_kprobe(struct kprobe *p)
96
{
97
struct kprobe *saved = this_cpu_ptr(&saved_next_opcode);
98
99
if (saved->addr) {
100
arch_disarm_kprobe(p);
101
arch_disarm_kprobe(saved);
102
103
saved->addr = NULL;
104
saved->opcode = 0;
105
106
saved = this_cpu_ptr(&saved_next_opcode2);
107
if (saved->addr) {
108
arch_disarm_kprobe(saved);
109
110
saved->addr = NULL;
111
saved->opcode = 0;
112
}
113
}
114
}
115
116
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
117
{
118
kcb->prev_kprobe.kp = kprobe_running();
119
kcb->prev_kprobe.status = kcb->kprobe_status;
120
}
121
122
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
123
{
124
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
125
kcb->kprobe_status = kcb->prev_kprobe.status;
126
}
127
128
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
129
struct kprobe_ctlblk *kcb)
130
{
131
__this_cpu_write(current_kprobe, p);
132
}
133
134
/*
135
* Singlestep is implemented by disabling the current kprobe and setting one
136
* on the next instruction, following branches. Two probes are set if the
137
* branch is conditional.
138
*/
139
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
140
{
141
__this_cpu_write(saved_current_opcode.addr, (kprobe_opcode_t *)regs->pc);
142
143
if (p != NULL) {
144
struct kprobe *op1, *op2;
145
146
arch_disarm_kprobe(p);
147
148
op1 = this_cpu_ptr(&saved_next_opcode);
149
op2 = this_cpu_ptr(&saved_next_opcode2);
150
151
if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) {
152
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
153
op1->addr = (kprobe_opcode_t *) regs->regs[reg_nr];
154
} else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) {
155
unsigned long disp = (p->opcode & 0x0FFF);
156
op1->addr =
157
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
158
159
} else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) {
160
unsigned int reg_nr = ((p->opcode >> 8) & 0x000F);
161
op1->addr =
162
(kprobe_opcode_t *) (regs->pc + 4 +
163
regs->regs[reg_nr]);
164
165
} else if (OPCODE_RTS(p->opcode)) {
166
op1->addr = (kprobe_opcode_t *) regs->pr;
167
168
} else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) {
169
unsigned long disp = (p->opcode & 0x00FF);
170
/* case 1 */
171
op1->addr = p->addr + 1;
172
/* case 2 */
173
op2->addr =
174
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
175
op2->opcode = *(op2->addr);
176
arch_arm_kprobe(op2);
177
178
} else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) {
179
unsigned long disp = (p->opcode & 0x00FF);
180
/* case 1 */
181
op1->addr = p->addr + 2;
182
/* case 2 */
183
op2->addr =
184
(kprobe_opcode_t *) (regs->pc + 4 + disp * 2);
185
op2->opcode = *(op2->addr);
186
arch_arm_kprobe(op2);
187
188
} else {
189
op1->addr = p->addr + 1;
190
}
191
192
op1->opcode = *(op1->addr);
193
arch_arm_kprobe(op1);
194
}
195
}
196
197
/* Called with kretprobe_lock held */
198
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
199
struct pt_regs *regs)
200
{
201
ri->ret_addr = (kprobe_opcode_t *) regs->pr;
202
ri->fp = NULL;
203
204
/* Replace the return addr with trampoline addr */
205
regs->pr = (unsigned long)__kretprobe_trampoline;
206
}
207
208
static int __kprobes kprobe_handler(struct pt_regs *regs)
209
{
210
struct kprobe *p;
211
int ret = 0;
212
kprobe_opcode_t *addr = NULL;
213
struct kprobe_ctlblk *kcb;
214
215
/*
216
* We don't want to be preempted for the entire
217
* duration of kprobe processing
218
*/
219
preempt_disable();
220
kcb = get_kprobe_ctlblk();
221
222
addr = (kprobe_opcode_t *) (regs->pc);
223
224
/* Check we're not actually recursing */
225
if (kprobe_running()) {
226
p = get_kprobe(addr);
227
if (p) {
228
if (kcb->kprobe_status == KPROBE_HIT_SS &&
229
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
230
goto no_kprobe;
231
}
232
/* We have reentered the kprobe_handler(), since
233
* another probe was hit while within the handler.
234
* We here save the original kprobes variables and
235
* just single step on the instruction of the new probe
236
* without calling any user handlers.
237
*/
238
save_previous_kprobe(kcb);
239
set_current_kprobe(p, regs, kcb);
240
kprobes_inc_nmissed_count(p);
241
prepare_singlestep(p, regs);
242
kcb->kprobe_status = KPROBE_REENTER;
243
return 1;
244
}
245
goto no_kprobe;
246
}
247
248
p = get_kprobe(addr);
249
if (!p) {
250
/* Not one of ours: let kernel handle it */
251
if (*addr != BREAKPOINT_INSTRUCTION) {
252
/*
253
* The breakpoint instruction was removed right
254
* after we hit it. Another cpu has removed
255
* either a probepoint or a debugger breakpoint
256
* at this address. In either case, no further
257
* handling of this interrupt is appropriate.
258
*/
259
ret = 1;
260
}
261
262
goto no_kprobe;
263
}
264
265
set_current_kprobe(p, regs, kcb);
266
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
267
268
if (p->pre_handler && p->pre_handler(p, regs)) {
269
/* handler has already set things up, so skip ss setup */
270
reset_current_kprobe();
271
preempt_enable_no_resched();
272
return 1;
273
}
274
275
prepare_singlestep(p, regs);
276
kcb->kprobe_status = KPROBE_HIT_SS;
277
return 1;
278
279
no_kprobe:
280
preempt_enable_no_resched();
281
return ret;
282
}
283
284
/*
285
* For function-return probes, init_kprobes() establishes a probepoint
286
* here. When a retprobed function returns, this probe is hit and
287
* trampoline_probe_handler() runs, calling the kretprobe's handler.
288
*/
289
static void __used kretprobe_trampoline_holder(void)
290
{
291
asm volatile (".globl __kretprobe_trampoline\n"
292
"__kretprobe_trampoline:\n\t"
293
"nop\n");
294
}
295
296
/*
297
* Called when we hit the probe point at __kretprobe_trampoline
298
*/
299
static int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
300
{
301
regs->pc = __kretprobe_trampoline_handler(regs, NULL);
302
303
return 1;
304
}
305
306
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
307
{
308
struct kprobe *cur = kprobe_running();
309
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
310
kprobe_opcode_t *addr = NULL;
311
struct kprobe *p = NULL;
312
313
if (!cur)
314
return 0;
315
316
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
317
kcb->kprobe_status = KPROBE_HIT_SSDONE;
318
cur->post_handler(cur, regs, 0);
319
}
320
321
p = this_cpu_ptr(&saved_next_opcode);
322
if (p->addr) {
323
arch_disarm_kprobe(p);
324
p->addr = NULL;
325
p->opcode = 0;
326
327
addr = __this_cpu_read(saved_current_opcode.addr);
328
__this_cpu_write(saved_current_opcode.addr, NULL);
329
330
p = get_kprobe(addr);
331
arch_arm_kprobe(p);
332
333
p = this_cpu_ptr(&saved_next_opcode2);
334
if (p->addr) {
335
arch_disarm_kprobe(p);
336
p->addr = NULL;
337
p->opcode = 0;
338
}
339
}
340
341
/* Restore back the original saved kprobes variables and continue. */
342
if (kcb->kprobe_status == KPROBE_REENTER) {
343
restore_previous_kprobe(kcb);
344
goto out;
345
}
346
347
reset_current_kprobe();
348
349
out:
350
preempt_enable_no_resched();
351
352
return 1;
353
}
354
355
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
356
{
357
struct kprobe *cur = kprobe_running();
358
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
359
const struct exception_table_entry *entry;
360
361
switch (kcb->kprobe_status) {
362
case KPROBE_HIT_SS:
363
case KPROBE_REENTER:
364
/*
365
* We are here because the instruction being single
366
* stepped caused a page fault. We reset the current
367
* kprobe, point the pc back to the probe address
368
* and allow the page fault handler to continue as a
369
* normal page fault.
370
*/
371
regs->pc = (unsigned long)cur->addr;
372
if (kcb->kprobe_status == KPROBE_REENTER)
373
restore_previous_kprobe(kcb);
374
else
375
reset_current_kprobe();
376
preempt_enable_no_resched();
377
break;
378
case KPROBE_HIT_ACTIVE:
379
case KPROBE_HIT_SSDONE:
380
/*
381
* In case the user-specified fault handler returned
382
* zero, try to fix up.
383
*/
384
if ((entry = search_exception_tables(regs->pc)) != NULL) {
385
regs->pc = entry->fixup;
386
return 1;
387
}
388
389
/*
390
* fixup_exception() could not handle it,
391
* Let do_page_fault() fix it.
392
*/
393
break;
394
default:
395
break;
396
}
397
398
return 0;
399
}
400
401
/*
402
* Wrapper routine to for handling exceptions.
403
*/
404
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
405
unsigned long val, void *data)
406
{
407
struct die_args *args = (struct die_args *)data;
408
int ret = NOTIFY_DONE;
409
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
410
411
if (val == DIE_TRAP &&
412
args->trapnr == (BREAKPOINT_INSTRUCTION & 0xff)) {
413
if (!kprobe_running()) {
414
if (kprobe_handler(args->regs)) {
415
ret = NOTIFY_STOP;
416
} else {
417
/* Not a kprobe trap */
418
ret = NOTIFY_DONE;
419
}
420
} else {
421
if ((kcb->kprobe_status == KPROBE_HIT_SS) ||
422
(kcb->kprobe_status == KPROBE_REENTER)) {
423
if (post_kprobe_handler(args->regs))
424
ret = NOTIFY_STOP;
425
} else {
426
if (kprobe_handler(args->regs))
427
ret = NOTIFY_STOP;
428
}
429
}
430
}
431
432
return ret;
433
}
434
435
static struct kprobe trampoline_p = {
436
.addr = (kprobe_opcode_t *)&__kretprobe_trampoline,
437
.pre_handler = trampoline_probe_handler
438
};
439
440
int __init arch_init_kprobes(void)
441
{
442
return register_kprobe(&trampoline_p);
443
}
444
445