Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/csky/kernel/probes/kprobes.c
26489 views
1
// SPDX-License-Identifier: GPL-2.0+
2
3
#define pr_fmt(fmt) "kprobes: " fmt
4
5
#include <linux/kprobes.h>
6
#include <linux/extable.h>
7
#include <linux/slab.h>
8
#include <linux/stop_machine.h>
9
#include <asm/ptrace.h>
10
#include <linux/uaccess.h>
11
#include <asm/sections.h>
12
#include <asm/cacheflush.h>
13
14
#include "decode-insn.h"
15
16
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
17
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
18
19
static void __kprobes
20
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
21
22
struct csky_insn_patch {
23
kprobe_opcode_t *addr;
24
u32 opcode;
25
atomic_t cpu_count;
26
};
27
28
static int __kprobes patch_text_cb(void *priv)
29
{
30
struct csky_insn_patch *param = priv;
31
unsigned int addr = (unsigned int)param->addr;
32
33
if (atomic_inc_return(&param->cpu_count) == num_online_cpus()) {
34
*(u16 *) addr = cpu_to_le16(param->opcode);
35
dcache_wb_range(addr, addr + 2);
36
atomic_inc(&param->cpu_count);
37
} else {
38
while (atomic_read(&param->cpu_count) <= num_online_cpus())
39
cpu_relax();
40
}
41
42
icache_inv_range(addr, addr + 2);
43
44
return 0;
45
}
46
47
static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
48
{
49
struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) };
50
51
return stop_machine_cpuslocked(patch_text_cb, &param, cpu_online_mask);
52
}
53
54
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
55
{
56
unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
57
58
p->ainsn.api.restore = (unsigned long)p->addr + offset;
59
60
patch_text(p->ainsn.api.insn, p->opcode);
61
}
62
63
static void __kprobes arch_prepare_simulate(struct kprobe *p)
64
{
65
p->ainsn.api.restore = 0;
66
}
67
68
static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
69
{
70
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
71
72
if (p->ainsn.api.handler)
73
p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
74
75
post_kprobe_handler(kcb, regs);
76
}
77
78
int __kprobes arch_prepare_kprobe(struct kprobe *p)
79
{
80
unsigned long probe_addr = (unsigned long)p->addr;
81
82
if (probe_addr & 0x1)
83
return -EILSEQ;
84
85
/* copy instruction */
86
p->opcode = le32_to_cpu(*p->addr);
87
88
/* decode instruction */
89
switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) {
90
case INSN_REJECTED: /* insn not supported */
91
return -EINVAL;
92
93
case INSN_GOOD_NO_SLOT: /* insn need simulation */
94
p->ainsn.api.insn = NULL;
95
break;
96
97
case INSN_GOOD: /* instruction uses slot */
98
p->ainsn.api.insn = get_insn_slot();
99
if (!p->ainsn.api.insn)
100
return -ENOMEM;
101
break;
102
}
103
104
/* prepare the instruction */
105
if (p->ainsn.api.insn)
106
arch_prepare_ss_slot(p);
107
else
108
arch_prepare_simulate(p);
109
110
return 0;
111
}
112
113
/* install breakpoint in text */
114
void __kprobes arch_arm_kprobe(struct kprobe *p)
115
{
116
patch_text(p->addr, USR_BKPT);
117
}
118
119
/* remove breakpoint from text */
120
void __kprobes arch_disarm_kprobe(struct kprobe *p)
121
{
122
patch_text(p->addr, p->opcode);
123
}
124
125
void __kprobes arch_remove_kprobe(struct kprobe *p)
126
{
127
if (p->ainsn.api.insn) {
128
free_insn_slot(p->ainsn.api.insn, 0);
129
p->ainsn.api.insn = NULL;
130
}
131
}
132
133
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
134
{
135
kcb->prev_kprobe.kp = kprobe_running();
136
kcb->prev_kprobe.status = kcb->kprobe_status;
137
}
138
139
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
140
{
141
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
142
kcb->kprobe_status = kcb->prev_kprobe.status;
143
}
144
145
static void __kprobes set_current_kprobe(struct kprobe *p)
146
{
147
__this_cpu_write(current_kprobe, p);
148
}
149
150
/*
151
* Interrupts need to be disabled before single-step mode is set, and not
152
* reenabled until after single-step mode ends.
153
* Without disabling interrupt on local CPU, there is a chance of
154
* interrupt occurrence in the period of exception return and start of
155
* out-of-line single-step, that result in wrongly single stepping
156
* into the interrupt handler.
157
*/
158
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
159
struct pt_regs *regs)
160
{
161
kcb->saved_sr = regs->sr;
162
regs->sr &= ~BIT(6);
163
}
164
165
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
166
struct pt_regs *regs)
167
{
168
regs->sr = kcb->saved_sr;
169
}
170
171
static void __kprobes
172
set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
173
{
174
unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
175
176
kcb->ss_ctx.ss_pending = true;
177
kcb->ss_ctx.match_addr = addr + offset;
178
}
179
180
static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
181
{
182
kcb->ss_ctx.ss_pending = false;
183
kcb->ss_ctx.match_addr = 0;
184
}
185
186
#define TRACE_MODE_SI BIT(14)
187
#define TRACE_MODE_MASK ~(0x3 << 14)
188
#define TRACE_MODE_RUN 0
189
190
static void __kprobes setup_singlestep(struct kprobe *p,
191
struct pt_regs *regs,
192
struct kprobe_ctlblk *kcb, int reenter)
193
{
194
unsigned long slot;
195
196
if (reenter) {
197
save_previous_kprobe(kcb);
198
set_current_kprobe(p);
199
kcb->kprobe_status = KPROBE_REENTER;
200
} else {
201
kcb->kprobe_status = KPROBE_HIT_SS;
202
}
203
204
if (p->ainsn.api.insn) {
205
/* prepare for single stepping */
206
slot = (unsigned long)p->ainsn.api.insn;
207
208
set_ss_context(kcb, slot, p); /* mark pending ss */
209
210
/* IRQs and single stepping do not mix well. */
211
kprobes_save_local_irqflag(kcb, regs);
212
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
213
instruction_pointer_set(regs, slot);
214
} else {
215
/* insn simulation */
216
arch_simulate_insn(p, regs);
217
}
218
}
219
220
static int __kprobes reenter_kprobe(struct kprobe *p,
221
struct pt_regs *regs,
222
struct kprobe_ctlblk *kcb)
223
{
224
switch (kcb->kprobe_status) {
225
case KPROBE_HIT_SSDONE:
226
case KPROBE_HIT_ACTIVE:
227
kprobes_inc_nmissed_count(p);
228
setup_singlestep(p, regs, kcb, 1);
229
break;
230
case KPROBE_HIT_SS:
231
case KPROBE_REENTER:
232
pr_warn("Failed to recover from reentered kprobes.\n");
233
dump_kprobe(p);
234
BUG();
235
break;
236
default:
237
WARN_ON(1);
238
return 0;
239
}
240
241
return 1;
242
}
243
244
static void __kprobes
245
post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
246
{
247
struct kprobe *cur = kprobe_running();
248
249
if (!cur)
250
return;
251
252
/* return addr restore if non-branching insn */
253
if (cur->ainsn.api.restore != 0)
254
regs->pc = cur->ainsn.api.restore;
255
256
/* restore back original saved kprobe variables and continue */
257
if (kcb->kprobe_status == KPROBE_REENTER) {
258
restore_previous_kprobe(kcb);
259
return;
260
}
261
262
/* call post handler */
263
kcb->kprobe_status = KPROBE_HIT_SSDONE;
264
if (cur->post_handler) {
265
/* post_handler can hit breakpoint and single step
266
* again, so we enable D-flag for recursive exception.
267
*/
268
cur->post_handler(cur, regs, 0);
269
}
270
271
reset_current_kprobe();
272
}
273
274
int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
275
{
276
struct kprobe *cur = kprobe_running();
277
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
278
279
switch (kcb->kprobe_status) {
280
case KPROBE_HIT_SS:
281
case KPROBE_REENTER:
282
/*
283
* We are here because the instruction being single
284
* stepped caused a page fault. We reset the current
285
* kprobe and the ip points back to the probe address
286
* and allow the page fault handler to continue as a
287
* normal page fault.
288
*/
289
regs->pc = (unsigned long) cur->addr;
290
BUG_ON(!instruction_pointer(regs));
291
292
if (kcb->kprobe_status == KPROBE_REENTER)
293
restore_previous_kprobe(kcb);
294
else
295
reset_current_kprobe();
296
297
break;
298
case KPROBE_HIT_ACTIVE:
299
case KPROBE_HIT_SSDONE:
300
/*
301
* In case the user-specified fault handler returned
302
* zero, try to fix up.
303
*/
304
if (fixup_exception(regs))
305
return 1;
306
}
307
return 0;
308
}
309
310
int __kprobes
311
kprobe_breakpoint_handler(struct pt_regs *regs)
312
{
313
struct kprobe *p, *cur_kprobe;
314
struct kprobe_ctlblk *kcb;
315
unsigned long addr = instruction_pointer(regs);
316
317
kcb = get_kprobe_ctlblk();
318
cur_kprobe = kprobe_running();
319
320
p = get_kprobe((kprobe_opcode_t *) addr);
321
322
if (p) {
323
if (cur_kprobe) {
324
if (reenter_kprobe(p, regs, kcb))
325
return 1;
326
} else {
327
/* Probe hit */
328
set_current_kprobe(p);
329
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
330
331
/*
332
* If we have no pre-handler or it returned 0, we
333
* continue with normal processing. If we have a
334
* pre-handler and it returned non-zero, it will
335
* modify the execution path and no need to single
336
* stepping. Let's just reset current kprobe and exit.
337
*
338
* pre_handler can hit a breakpoint and can step thru
339
* before return.
340
*/
341
if (!p->pre_handler || !p->pre_handler(p, regs))
342
setup_singlestep(p, regs, kcb, 0);
343
else
344
reset_current_kprobe();
345
}
346
return 1;
347
}
348
349
/*
350
* The breakpoint instruction was removed right
351
* after we hit it. Another cpu has removed
352
* either a probepoint or a debugger breakpoint
353
* at this address. In either case, no further
354
* handling of this interrupt is appropriate.
355
* Return back to original instruction, and continue.
356
*/
357
return 0;
358
}
359
360
int __kprobes
361
kprobe_single_step_handler(struct pt_regs *regs)
362
{
363
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
364
365
if ((kcb->ss_ctx.ss_pending)
366
&& (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
367
clear_ss_context(kcb); /* clear pending ss */
368
369
kprobes_restore_local_irqflag(kcb, regs);
370
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
371
372
post_kprobe_handler(kcb, regs);
373
return 1;
374
}
375
return 0;
376
}
377
378
/*
379
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
380
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
381
*/
382
int __init arch_populate_kprobe_blacklist(void)
383
{
384
int ret;
385
386
ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
387
(unsigned long)__irqentry_text_end);
388
return ret;
389
}
390
391
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
392
{
393
return (void *)kretprobe_trampoline_handler(regs, NULL);
394
}
395
396
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
397
struct pt_regs *regs)
398
{
399
ri->ret_addr = (kprobe_opcode_t *)regs->lr;
400
ri->fp = NULL;
401
regs->lr = (unsigned long) &__kretprobe_trampoline;
402
}
403
404
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
405
{
406
return 0;
407
}
408
409
int __init arch_init_kprobes(void)
410
{
411
return 0;
412
}
413
414