Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/s390/kernel/kprobes.c
26442 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* Kernel Probes (KProbes)
4
*
5
* Copyright IBM Corp. 2002, 2006
6
*
7
* s390 port, used ppc64 as template. Mike Grundy <[email protected]>
8
*/
9
10
#define pr_fmt(fmt) "kprobes: " fmt
11
12
#include <linux/kprobes.h>
13
#include <linux/ptrace.h>
14
#include <linux/preempt.h>
15
#include <linux/stop_machine.h>
16
#include <linux/cpufeature.h>
17
#include <linux/kdebug.h>
18
#include <linux/uaccess.h>
19
#include <linux/extable.h>
20
#include <linux/module.h>
21
#include <linux/slab.h>
22
#include <linux/hardirq.h>
23
#include <linux/ftrace.h>
24
#include <linux/execmem.h>
25
#include <asm/text-patching.h>
26
#include <asm/set_memory.h>
27
#include <asm/sections.h>
28
#include <asm/dis.h>
29
#include "entry.h"
30
31
DEFINE_PER_CPU(struct kprobe *, current_kprobe);
32
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
33
34
struct kretprobe_blackpoint kretprobe_blacklist[] = { };
35
36
void *alloc_insn_page(void)
37
{
38
void *page;
39
40
page = execmem_alloc(EXECMEM_KPROBES, PAGE_SIZE);
41
if (!page)
42
return NULL;
43
set_memory_rox((unsigned long)page, 1);
44
return page;
45
}
46
47
static void copy_instruction(struct kprobe *p)
48
{
49
kprobe_opcode_t insn[MAX_INSN_SIZE];
50
s64 disp, new_disp;
51
u64 addr, new_addr;
52
unsigned int len;
53
54
len = insn_length(*p->addr >> 8);
55
memcpy(&insn, p->addr, len);
56
p->opcode = insn[0];
57
if (probe_is_insn_relative_long(&insn[0])) {
58
/*
59
* For pc-relative instructions in RIL-b or RIL-c format patch
60
* the RI2 displacement field. The insn slot for the to be
61
* patched instruction is within the same 4GB area like the
62
* original instruction. Therefore the new displacement will
63
* always fit.
64
*/
65
disp = *(s32 *)&insn[1];
66
addr = (u64)(unsigned long)p->addr;
67
new_addr = (u64)(unsigned long)p->ainsn.insn;
68
new_disp = ((addr + (disp * 2)) - new_addr) / 2;
69
*(s32 *)&insn[1] = new_disp;
70
}
71
s390_kernel_write(p->ainsn.insn, &insn, len);
72
}
73
NOKPROBE_SYMBOL(copy_instruction);
74
75
/* Check if paddr is at an instruction boundary */
76
static bool can_probe(unsigned long paddr)
77
{
78
unsigned long addr, offset = 0;
79
kprobe_opcode_t insn;
80
struct kprobe *kp;
81
82
if (paddr & 0x01)
83
return false;
84
85
if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
86
return false;
87
88
/* Decode instructions */
89
addr = paddr - offset;
90
while (addr < paddr) {
91
if (copy_from_kernel_nofault(&insn, (void *)addr, sizeof(insn)))
92
return false;
93
94
if (insn >> 8 == 0) {
95
if (insn != BREAKPOINT_INSTRUCTION) {
96
/*
97
* Note that QEMU inserts opcode 0x0000 to implement
98
* software breakpoints for guests. Since the size of
99
* the original instruction is unknown, stop following
100
* instructions and prevent setting a kprobe.
101
*/
102
return false;
103
}
104
/*
105
* Check if the instruction has been modified by another
106
* kprobe, in which case the original instruction is
107
* decoded.
108
*/
109
kp = get_kprobe((void *)addr);
110
if (!kp) {
111
/* not a kprobe */
112
return false;
113
}
114
insn = kp->opcode;
115
}
116
addr += insn_length(insn >> 8);
117
}
118
return addr == paddr;
119
}
120
121
int arch_prepare_kprobe(struct kprobe *p)
122
{
123
if (!can_probe((unsigned long)p->addr))
124
return -EINVAL;
125
/* Make sure the probe isn't going on a difficult instruction */
126
if (probe_is_prohibited_opcode(p->addr))
127
return -EINVAL;
128
p->ainsn.insn = get_insn_slot();
129
if (!p->ainsn.insn)
130
return -ENOMEM;
131
copy_instruction(p);
132
return 0;
133
}
134
NOKPROBE_SYMBOL(arch_prepare_kprobe);
135
136
struct swap_insn_args {
137
struct kprobe *p;
138
unsigned int arm_kprobe : 1;
139
};
140
141
static int swap_instruction(void *data)
142
{
143
struct swap_insn_args *args = data;
144
struct kprobe *p = args->p;
145
u16 opc;
146
147
opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
148
s390_kernel_write(p->addr, &opc, sizeof(opc));
149
return 0;
150
}
151
NOKPROBE_SYMBOL(swap_instruction);
152
153
void arch_arm_kprobe(struct kprobe *p)
154
{
155
struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
156
157
if (cpu_has_seq_insn()) {
158
swap_instruction(&args);
159
text_poke_sync();
160
} else {
161
stop_machine_cpuslocked(swap_instruction, &args, NULL);
162
}
163
}
164
NOKPROBE_SYMBOL(arch_arm_kprobe);
165
166
void arch_disarm_kprobe(struct kprobe *p)
167
{
168
struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
169
170
if (cpu_has_seq_insn()) {
171
swap_instruction(&args);
172
text_poke_sync();
173
} else {
174
stop_machine_cpuslocked(swap_instruction, &args, NULL);
175
}
176
}
177
NOKPROBE_SYMBOL(arch_disarm_kprobe);
178
179
void arch_remove_kprobe(struct kprobe *p)
180
{
181
if (!p->ainsn.insn)
182
return;
183
free_insn_slot(p->ainsn.insn, 0);
184
p->ainsn.insn = NULL;
185
}
186
NOKPROBE_SYMBOL(arch_remove_kprobe);
187
188
static void enable_singlestep(struct kprobe_ctlblk *kcb,
189
struct pt_regs *regs,
190
unsigned long ip)
191
{
192
union {
193
struct ctlreg regs[3];
194
struct {
195
struct ctlreg control;
196
struct ctlreg start;
197
struct ctlreg end;
198
};
199
} per_kprobe;
200
201
/* Set up the PER control registers %cr9-%cr11 */
202
per_kprobe.control.val = PER_EVENT_IFETCH;
203
per_kprobe.start.val = ip;
204
per_kprobe.end.val = ip;
205
206
/* Save control regs and psw mask */
207
__local_ctl_store(9, 11, kcb->kprobe_saved_ctl);
208
kcb->kprobe_saved_imask = regs->psw.mask &
209
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT);
210
211
/* Set PER control regs, turns on single step for the given address */
212
__local_ctl_load(9, 11, per_kprobe.regs);
213
regs->psw.mask |= PSW_MASK_PER;
214
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
215
regs->psw.addr = ip;
216
}
217
NOKPROBE_SYMBOL(enable_singlestep);
218
219
static void disable_singlestep(struct kprobe_ctlblk *kcb,
220
struct pt_regs *regs,
221
unsigned long ip)
222
{
223
/* Restore control regs and psw mask, set new psw address */
224
__local_ctl_load(9, 11, kcb->kprobe_saved_ctl);
225
regs->psw.mask &= ~PSW_MASK_PER;
226
regs->psw.mask |= kcb->kprobe_saved_imask;
227
regs->psw.addr = ip;
228
}
229
NOKPROBE_SYMBOL(disable_singlestep);
230
231
/*
232
* Activate a kprobe by storing its pointer to current_kprobe. The
233
* previous kprobe is stored in kcb->prev_kprobe. A stack of up to
234
* two kprobes can be active, see KPROBE_REENTER.
235
*/
236
static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
237
{
238
kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
239
kcb->prev_kprobe.status = kcb->kprobe_status;
240
__this_cpu_write(current_kprobe, p);
241
}
242
NOKPROBE_SYMBOL(push_kprobe);
243
244
/*
245
* Deactivate a kprobe by backing up to the previous state. If the
246
* current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
247
* for any other state prev_kprobe.kp will be NULL.
248
*/
249
static void pop_kprobe(struct kprobe_ctlblk *kcb)
250
{
251
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
252
kcb->kprobe_status = kcb->prev_kprobe.status;
253
kcb->prev_kprobe.kp = NULL;
254
}
255
NOKPROBE_SYMBOL(pop_kprobe);
256
257
static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
258
{
259
switch (kcb->kprobe_status) {
260
case KPROBE_HIT_SSDONE:
261
case KPROBE_HIT_ACTIVE:
262
kprobes_inc_nmissed_count(p);
263
break;
264
case KPROBE_HIT_SS:
265
case KPROBE_REENTER:
266
default:
267
/*
268
* A kprobe on the code path to single step an instruction
269
* is a BUG. The code path resides in the .kprobes.text
270
* section and is executed with interrupts disabled.
271
*/
272
pr_err("Failed to recover from reentered kprobes.\n");
273
dump_kprobe(p);
274
BUG();
275
}
276
}
277
NOKPROBE_SYMBOL(kprobe_reenter_check);
278
279
static int kprobe_handler(struct pt_regs *regs)
280
{
281
struct kprobe_ctlblk *kcb;
282
struct kprobe *p;
283
284
/*
285
* We want to disable preemption for the entire duration of kprobe
286
* processing. That includes the calls to the pre/post handlers
287
* and single stepping the kprobe instruction.
288
*/
289
preempt_disable();
290
kcb = get_kprobe_ctlblk();
291
p = get_kprobe((void *)(regs->psw.addr - 2));
292
293
if (p) {
294
if (kprobe_running()) {
295
/*
296
* We have hit a kprobe while another is still
297
* active. This can happen in the pre and post
298
* handler. Single step the instruction of the
299
* new probe but do not call any handler function
300
* of this secondary kprobe.
301
* push_kprobe and pop_kprobe saves and restores
302
* the currently active kprobe.
303
*/
304
kprobe_reenter_check(kcb, p);
305
push_kprobe(kcb, p);
306
kcb->kprobe_status = KPROBE_REENTER;
307
} else {
308
/*
309
* If we have no pre-handler or it returned 0, we
310
* continue with single stepping. If we have a
311
* pre-handler and it returned non-zero, it prepped
312
* for changing execution path, so get out doing
313
* nothing more here.
314
*/
315
push_kprobe(kcb, p);
316
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
317
if (p->pre_handler && p->pre_handler(p, regs)) {
318
pop_kprobe(kcb);
319
preempt_enable_no_resched();
320
return 1;
321
}
322
kcb->kprobe_status = KPROBE_HIT_SS;
323
}
324
enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
325
return 1;
326
} /* else:
327
* No kprobe at this address and no active kprobe. The trap has
328
* not been caused by a kprobe breakpoint. The race of breakpoint
329
* vs. kprobe remove does not exist because on s390 as we use
330
* stop_machine to arm/disarm the breakpoints.
331
*/
332
preempt_enable_no_resched();
333
return 0;
334
}
335
NOKPROBE_SYMBOL(kprobe_handler);
336
337
/*
338
* Called after single-stepping. p->addr is the address of the
339
* instruction whose first byte has been replaced by the "breakpoint"
340
* instruction. To avoid the SMP problems that can occur when we
341
* temporarily put back the original opcode to single-step, we
342
* single-stepped a copy of the instruction. The address of this
343
* copy is p->ainsn.insn.
344
*/
345
static void resume_execution(struct kprobe *p, struct pt_regs *regs)
346
{
347
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
348
unsigned long ip = regs->psw.addr;
349
int fixup = probe_get_fixup_type(p->ainsn.insn);
350
351
if (fixup & FIXUP_PSW_NORMAL)
352
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
353
354
if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
355
int ilen = insn_length(p->ainsn.insn[0] >> 8);
356
if (ip - (unsigned long) p->ainsn.insn == ilen)
357
ip = (unsigned long) p->addr + ilen;
358
}
359
360
if (fixup & FIXUP_RETURN_REGISTER) {
361
int reg = (p->ainsn.insn[0] & 0xf0) >> 4;
362
regs->gprs[reg] += (unsigned long) p->addr -
363
(unsigned long) p->ainsn.insn;
364
}
365
366
disable_singlestep(kcb, regs, ip);
367
}
368
NOKPROBE_SYMBOL(resume_execution);
369
370
static int post_kprobe_handler(struct pt_regs *regs)
371
{
372
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
373
struct kprobe *p = kprobe_running();
374
375
if (!p)
376
return 0;
377
378
resume_execution(p, regs);
379
if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) {
380
kcb->kprobe_status = KPROBE_HIT_SSDONE;
381
p->post_handler(p, regs, 0);
382
}
383
pop_kprobe(kcb);
384
preempt_enable_no_resched();
385
386
/*
387
* if somebody else is singlestepping across a probe point, psw mask
388
* will have PER set, in which case, continue the remaining processing
389
* of do_single_step, as if this is not a probe hit.
390
*/
391
if (regs->psw.mask & PSW_MASK_PER)
392
return 0;
393
394
return 1;
395
}
396
NOKPROBE_SYMBOL(post_kprobe_handler);
397
398
static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
399
{
400
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
401
struct kprobe *p = kprobe_running();
402
403
switch(kcb->kprobe_status) {
404
case KPROBE_HIT_SS:
405
case KPROBE_REENTER:
406
/*
407
* We are here because the instruction being single
408
* stepped caused a page fault. We reset the current
409
* kprobe and the nip points back to the probe address
410
* and allow the page fault handler to continue as a
411
* normal page fault.
412
*/
413
disable_singlestep(kcb, regs, (unsigned long) p->addr);
414
pop_kprobe(kcb);
415
preempt_enable_no_resched();
416
break;
417
case KPROBE_HIT_ACTIVE:
418
case KPROBE_HIT_SSDONE:
419
/*
420
* In case the user-specified fault handler returned
421
* zero, try to fix up.
422
*/
423
if (fixup_exception(regs))
424
return 1;
425
/*
426
* fixup_exception() could not handle it,
427
* Let do_page_fault() fix it.
428
*/
429
break;
430
default:
431
break;
432
}
433
return 0;
434
}
435
NOKPROBE_SYMBOL(kprobe_trap_handler);
436
437
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
438
{
439
int ret;
440
441
if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
442
local_irq_disable();
443
ret = kprobe_trap_handler(regs, trapnr);
444
if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
445
local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
446
return ret;
447
}
448
NOKPROBE_SYMBOL(kprobe_fault_handler);
449
450
/*
451
* Wrapper routine to for handling exceptions.
452
*/
453
int kprobe_exceptions_notify(struct notifier_block *self,
454
unsigned long val, void *data)
455
{
456
struct die_args *args = (struct die_args *) data;
457
struct pt_regs *regs = args->regs;
458
int ret = NOTIFY_DONE;
459
460
if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
461
local_irq_disable();
462
463
switch (val) {
464
case DIE_BPT:
465
if (kprobe_handler(regs))
466
ret = NOTIFY_STOP;
467
break;
468
case DIE_SSTEP:
469
if (post_kprobe_handler(regs))
470
ret = NOTIFY_STOP;
471
break;
472
case DIE_TRAP:
473
if (!preemptible() && kprobe_running() &&
474
kprobe_trap_handler(regs, args->trapnr))
475
ret = NOTIFY_STOP;
476
break;
477
default:
478
break;
479
}
480
481
if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT))
482
local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
483
484
return ret;
485
}
486
NOKPROBE_SYMBOL(kprobe_exceptions_notify);
487
488
int __init arch_init_kprobes(void)
489
{
490
return 0;
491
}
492
493
int __init arch_populate_kprobe_blacklist(void)
494
{
495
return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
496
(unsigned long)__irqentry_text_end);
497
}
498
499
int arch_trampoline_kprobe(struct kprobe *p)
500
{
501
return 0;
502
}
503
NOKPROBE_SYMBOL(arch_trampoline_kprobe);
504
505