Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kernel/hw_breakpoint.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
4
* using the CPU's debug registers.
5
*
6
* Copyright (C) 2012 ARM Limited
7
* Author: Will Deacon <[email protected]>
8
*/
9
10
#define pr_fmt(fmt) "hw-breakpoint: " fmt
11
12
#include <linux/compat.h>
13
#include <linux/cpu_pm.h>
14
#include <linux/errno.h>
15
#include <linux/hw_breakpoint.h>
16
#include <linux/kprobes.h>
17
#include <linux/perf_event.h>
18
#include <linux/ptrace.h>
19
#include <linux/smp.h>
20
#include <linux/uaccess.h>
21
22
#include <asm/current.h>
23
#include <asm/debug-monitors.h>
24
#include <asm/esr.h>
25
#include <asm/exception.h>
26
#include <asm/hw_breakpoint.h>
27
#include <asm/traps.h>
28
#include <asm/cputype.h>
29
#include <asm/system_misc.h>
30
31
/* Breakpoint currently in use for each BRP. */
32
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
33
34
/* Watchpoint currently in use for each WRP. */
35
static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
36
37
/* Currently stepping a per-CPU kernel breakpoint. */
38
static DEFINE_PER_CPU(int, stepping_kernel_bp);
39
40
/* Number of BRP/WRP registers on this CPU. */
41
static int core_num_brps;
42
static int core_num_wrps;
43
44
int hw_breakpoint_slots(int type)
45
{
46
/*
47
* We can be called early, so don't rely on
48
* our static variables being initialised.
49
*/
50
switch (type) {
51
case TYPE_INST:
52
return get_num_brps();
53
case TYPE_DATA:
54
return get_num_wrps();
55
default:
56
pr_warn("unknown slot type: %d\n", type);
57
return 0;
58
}
59
}
60
61
#define READ_WB_REG_CASE(OFF, N, REG, VAL) \
62
case (OFF + N): \
63
AARCH64_DBG_READ(N, REG, VAL); \
64
break
65
66
#define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
67
case (OFF + N): \
68
AARCH64_DBG_WRITE(N, REG, VAL); \
69
break
70
71
#define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
72
READ_WB_REG_CASE(OFF, 0, REG, VAL); \
73
READ_WB_REG_CASE(OFF, 1, REG, VAL); \
74
READ_WB_REG_CASE(OFF, 2, REG, VAL); \
75
READ_WB_REG_CASE(OFF, 3, REG, VAL); \
76
READ_WB_REG_CASE(OFF, 4, REG, VAL); \
77
READ_WB_REG_CASE(OFF, 5, REG, VAL); \
78
READ_WB_REG_CASE(OFF, 6, REG, VAL); \
79
READ_WB_REG_CASE(OFF, 7, REG, VAL); \
80
READ_WB_REG_CASE(OFF, 8, REG, VAL); \
81
READ_WB_REG_CASE(OFF, 9, REG, VAL); \
82
READ_WB_REG_CASE(OFF, 10, REG, VAL); \
83
READ_WB_REG_CASE(OFF, 11, REG, VAL); \
84
READ_WB_REG_CASE(OFF, 12, REG, VAL); \
85
READ_WB_REG_CASE(OFF, 13, REG, VAL); \
86
READ_WB_REG_CASE(OFF, 14, REG, VAL); \
87
READ_WB_REG_CASE(OFF, 15, REG, VAL)
88
89
#define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
90
WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
91
WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
92
WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
93
WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
94
WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
95
WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
96
WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
97
WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
98
WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
99
WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
100
WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
101
WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
102
WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
103
WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
104
WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
105
WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
106
107
static u64 read_wb_reg(int reg, int n)
108
{
109
u64 val = 0;
110
111
switch (reg + n) {
112
GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
113
GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
114
GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
115
GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
116
default:
117
pr_warn("attempt to read from unknown breakpoint register %d\n", n);
118
}
119
120
return val;
121
}
122
NOKPROBE_SYMBOL(read_wb_reg);
123
124
static void write_wb_reg(int reg, int n, u64 val)
125
{
126
switch (reg + n) {
127
GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
128
GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
129
GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
130
GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
131
default:
132
pr_warn("attempt to write to unknown breakpoint register %d\n", n);
133
}
134
isb();
135
}
136
NOKPROBE_SYMBOL(write_wb_reg);
137
138
/*
139
* Convert a breakpoint privilege level to the corresponding exception
140
* level.
141
*/
142
static enum dbg_active_el debug_exception_level(int privilege)
143
{
144
switch (privilege) {
145
case AARCH64_BREAKPOINT_EL0:
146
return DBG_ACTIVE_EL0;
147
case AARCH64_BREAKPOINT_EL1:
148
return DBG_ACTIVE_EL1;
149
default:
150
pr_warn("invalid breakpoint privilege level %d\n", privilege);
151
return -EINVAL;
152
}
153
}
154
NOKPROBE_SYMBOL(debug_exception_level);
155
156
enum hw_breakpoint_ops {
157
HW_BREAKPOINT_INSTALL,
158
HW_BREAKPOINT_UNINSTALL,
159
HW_BREAKPOINT_RESTORE
160
};
161
162
static int is_compat_bp(struct perf_event *bp)
163
{
164
struct task_struct *tsk = bp->hw.target;
165
166
/*
167
* tsk can be NULL for per-cpu (non-ptrace) breakpoints.
168
* In this case, use the native interface, since we don't have
169
* the notion of a "compat CPU" and could end up relying on
170
* deprecated behaviour if we use unaligned watchpoints in
171
* AArch64 state.
172
*/
173
return tsk && is_compat_thread(task_thread_info(tsk));
174
}
175
176
/**
177
* hw_breakpoint_slot_setup - Find and setup a perf slot according to
178
* operations
179
*
180
* @slots: pointer to array of slots
181
* @max_slots: max number of slots
182
* @bp: perf_event to setup
183
* @ops: operation to be carried out on the slot
184
*
185
* Return:
186
* slot index on success
187
* -ENOSPC if no slot is available/matches
188
* -EINVAL on wrong operations parameter
189
*/
190
static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
191
struct perf_event *bp,
192
enum hw_breakpoint_ops ops)
193
{
194
int i;
195
struct perf_event **slot;
196
197
for (i = 0; i < max_slots; ++i) {
198
slot = &slots[i];
199
switch (ops) {
200
case HW_BREAKPOINT_INSTALL:
201
if (!*slot) {
202
*slot = bp;
203
return i;
204
}
205
break;
206
case HW_BREAKPOINT_UNINSTALL:
207
if (*slot == bp) {
208
*slot = NULL;
209
return i;
210
}
211
break;
212
case HW_BREAKPOINT_RESTORE:
213
if (*slot == bp)
214
return i;
215
break;
216
default:
217
pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
218
return -EINVAL;
219
}
220
}
221
return -ENOSPC;
222
}
223
224
static int hw_breakpoint_control(struct perf_event *bp,
225
enum hw_breakpoint_ops ops)
226
{
227
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
228
struct perf_event **slots;
229
struct debug_info *debug_info = &current->thread.debug;
230
int i, max_slots, ctrl_reg, val_reg, reg_enable;
231
enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
232
u32 ctrl;
233
234
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
235
/* Breakpoint */
236
ctrl_reg = AARCH64_DBG_REG_BCR;
237
val_reg = AARCH64_DBG_REG_BVR;
238
slots = this_cpu_ptr(bp_on_reg);
239
max_slots = core_num_brps;
240
reg_enable = !debug_info->bps_disabled;
241
} else {
242
/* Watchpoint */
243
ctrl_reg = AARCH64_DBG_REG_WCR;
244
val_reg = AARCH64_DBG_REG_WVR;
245
slots = this_cpu_ptr(wp_on_reg);
246
max_slots = core_num_wrps;
247
reg_enable = !debug_info->wps_disabled;
248
}
249
250
i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
251
252
if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
253
return i;
254
255
switch (ops) {
256
case HW_BREAKPOINT_INSTALL:
257
/*
258
* Ensure debug monitors are enabled at the correct exception
259
* level.
260
*/
261
enable_debug_monitors(dbg_el);
262
fallthrough;
263
case HW_BREAKPOINT_RESTORE:
264
/* Setup the address register. */
265
write_wb_reg(val_reg, i, info->address);
266
267
/* Setup the control register. */
268
ctrl = encode_ctrl_reg(info->ctrl);
269
write_wb_reg(ctrl_reg, i,
270
reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
271
break;
272
case HW_BREAKPOINT_UNINSTALL:
273
/* Reset the control register. */
274
write_wb_reg(ctrl_reg, i, 0);
275
276
/*
277
* Release the debug monitors for the correct exception
278
* level.
279
*/
280
disable_debug_monitors(dbg_el);
281
break;
282
}
283
284
return 0;
285
}
286
287
/*
288
* Install a perf counter breakpoint.
289
*/
290
int arch_install_hw_breakpoint(struct perf_event *bp)
291
{
292
return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
293
}
294
295
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
296
{
297
hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
298
}
299
300
static int get_hbp_len(u8 hbp_len)
301
{
302
unsigned int len_in_bytes = 0;
303
304
switch (hbp_len) {
305
case ARM_BREAKPOINT_LEN_1:
306
len_in_bytes = 1;
307
break;
308
case ARM_BREAKPOINT_LEN_2:
309
len_in_bytes = 2;
310
break;
311
case ARM_BREAKPOINT_LEN_3:
312
len_in_bytes = 3;
313
break;
314
case ARM_BREAKPOINT_LEN_4:
315
len_in_bytes = 4;
316
break;
317
case ARM_BREAKPOINT_LEN_5:
318
len_in_bytes = 5;
319
break;
320
case ARM_BREAKPOINT_LEN_6:
321
len_in_bytes = 6;
322
break;
323
case ARM_BREAKPOINT_LEN_7:
324
len_in_bytes = 7;
325
break;
326
case ARM_BREAKPOINT_LEN_8:
327
len_in_bytes = 8;
328
break;
329
}
330
331
return len_in_bytes;
332
}
333
334
/*
335
* Check whether bp virtual address is in kernel space.
336
*/
337
int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
338
{
339
unsigned int len;
340
unsigned long va;
341
342
va = hw->address;
343
len = get_hbp_len(hw->ctrl.len);
344
345
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
346
}
347
348
/*
349
* Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
350
* Hopefully this will disappear when ptrace can bypass the conversion
351
* to generic breakpoint descriptions.
352
*/
353
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
354
int *gen_len, int *gen_type, int *offset)
355
{
356
/* Type */
357
switch (ctrl.type) {
358
case ARM_BREAKPOINT_EXECUTE:
359
*gen_type = HW_BREAKPOINT_X;
360
break;
361
case ARM_BREAKPOINT_LOAD:
362
*gen_type = HW_BREAKPOINT_R;
363
break;
364
case ARM_BREAKPOINT_STORE:
365
*gen_type = HW_BREAKPOINT_W;
366
break;
367
case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
368
*gen_type = HW_BREAKPOINT_RW;
369
break;
370
default:
371
return -EINVAL;
372
}
373
374
if (!ctrl.len)
375
return -EINVAL;
376
*offset = __ffs(ctrl.len);
377
378
/* Len */
379
switch (ctrl.len >> *offset) {
380
case ARM_BREAKPOINT_LEN_1:
381
*gen_len = HW_BREAKPOINT_LEN_1;
382
break;
383
case ARM_BREAKPOINT_LEN_2:
384
*gen_len = HW_BREAKPOINT_LEN_2;
385
break;
386
case ARM_BREAKPOINT_LEN_3:
387
*gen_len = HW_BREAKPOINT_LEN_3;
388
break;
389
case ARM_BREAKPOINT_LEN_4:
390
*gen_len = HW_BREAKPOINT_LEN_4;
391
break;
392
case ARM_BREAKPOINT_LEN_5:
393
*gen_len = HW_BREAKPOINT_LEN_5;
394
break;
395
case ARM_BREAKPOINT_LEN_6:
396
*gen_len = HW_BREAKPOINT_LEN_6;
397
break;
398
case ARM_BREAKPOINT_LEN_7:
399
*gen_len = HW_BREAKPOINT_LEN_7;
400
break;
401
case ARM_BREAKPOINT_LEN_8:
402
*gen_len = HW_BREAKPOINT_LEN_8;
403
break;
404
default:
405
return -EINVAL;
406
}
407
408
return 0;
409
}
410
411
/*
412
* Construct an arch_hw_breakpoint from a perf_event.
413
*/
414
static int arch_build_bp_info(struct perf_event *bp,
415
const struct perf_event_attr *attr,
416
struct arch_hw_breakpoint *hw)
417
{
418
/* Type */
419
switch (attr->bp_type) {
420
case HW_BREAKPOINT_X:
421
hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
422
break;
423
case HW_BREAKPOINT_R:
424
hw->ctrl.type = ARM_BREAKPOINT_LOAD;
425
break;
426
case HW_BREAKPOINT_W:
427
hw->ctrl.type = ARM_BREAKPOINT_STORE;
428
break;
429
case HW_BREAKPOINT_RW:
430
hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
431
break;
432
default:
433
return -EINVAL;
434
}
435
436
/* Len */
437
switch (attr->bp_len) {
438
case HW_BREAKPOINT_LEN_1:
439
hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
440
break;
441
case HW_BREAKPOINT_LEN_2:
442
hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
443
break;
444
case HW_BREAKPOINT_LEN_3:
445
hw->ctrl.len = ARM_BREAKPOINT_LEN_3;
446
break;
447
case HW_BREAKPOINT_LEN_4:
448
hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
449
break;
450
case HW_BREAKPOINT_LEN_5:
451
hw->ctrl.len = ARM_BREAKPOINT_LEN_5;
452
break;
453
case HW_BREAKPOINT_LEN_6:
454
hw->ctrl.len = ARM_BREAKPOINT_LEN_6;
455
break;
456
case HW_BREAKPOINT_LEN_7:
457
hw->ctrl.len = ARM_BREAKPOINT_LEN_7;
458
break;
459
case HW_BREAKPOINT_LEN_8:
460
hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
461
break;
462
default:
463
return -EINVAL;
464
}
465
466
/*
467
* On AArch64, we only permit breakpoints of length 4, whereas
468
* AArch32 also requires breakpoints of length 2 for Thumb.
469
* Watchpoints can be of length 1, 2, 4 or 8 bytes.
470
*/
471
if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
472
if (is_compat_bp(bp)) {
473
if (hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
474
hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
475
return -EINVAL;
476
} else if (hw->ctrl.len != ARM_BREAKPOINT_LEN_4) {
477
/*
478
* FIXME: Some tools (I'm looking at you perf) assume
479
* that breakpoints should be sizeof(long). This
480
* is nonsense. For now, we fix up the parameter
481
* but we should probably return -EINVAL instead.
482
*/
483
hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
484
}
485
}
486
487
/* Address */
488
hw->address = attr->bp_addr;
489
490
/*
491
* Privilege
492
* Note that we disallow combined EL0/EL1 breakpoints because
493
* that would complicate the stepping code.
494
*/
495
if (arch_check_bp_in_kernelspace(hw))
496
hw->ctrl.privilege = AARCH64_BREAKPOINT_EL1;
497
else
498
hw->ctrl.privilege = AARCH64_BREAKPOINT_EL0;
499
500
/* Enabled? */
501
hw->ctrl.enabled = !attr->disabled;
502
503
return 0;
504
}
505
506
/*
507
* Validate the arch-specific HW Breakpoint register settings.
508
*/
509
int hw_breakpoint_arch_parse(struct perf_event *bp,
510
const struct perf_event_attr *attr,
511
struct arch_hw_breakpoint *hw)
512
{
513
int ret;
514
u64 alignment_mask, offset;
515
516
/* Build the arch_hw_breakpoint. */
517
ret = arch_build_bp_info(bp, attr, hw);
518
if (ret)
519
return ret;
520
521
/*
522
* Check address alignment.
523
* We don't do any clever alignment correction for watchpoints
524
* because using 64-bit unaligned addresses is deprecated for
525
* AArch64.
526
*
527
* AArch32 tasks expect some simple alignment fixups, so emulate
528
* that here.
529
*/
530
if (is_compat_bp(bp)) {
531
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
532
alignment_mask = 0x7;
533
else
534
alignment_mask = 0x3;
535
offset = hw->address & alignment_mask;
536
switch (offset) {
537
case 0:
538
/* Aligned */
539
break;
540
case 1:
541
case 2:
542
/* Allow halfword watchpoints and breakpoints. */
543
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
544
break;
545
546
fallthrough;
547
case 3:
548
/* Allow single byte watchpoint. */
549
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
550
break;
551
552
fallthrough;
553
default:
554
return -EINVAL;
555
}
556
} else {
557
if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE)
558
alignment_mask = 0x3;
559
else
560
alignment_mask = 0x7;
561
offset = hw->address & alignment_mask;
562
}
563
564
hw->address &= ~alignment_mask;
565
hw->ctrl.len <<= offset;
566
567
/*
568
* Disallow per-task kernel breakpoints since these would
569
* complicate the stepping code.
570
*/
571
if (hw->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target)
572
return -EINVAL;
573
574
return 0;
575
}
576
577
/*
578
* Enable/disable all of the breakpoints active at the specified
579
* exception level at the register level.
580
* This is used when single-stepping after a breakpoint exception.
581
*/
582
static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
583
{
584
int i, max_slots, privilege;
585
u32 ctrl;
586
struct perf_event **slots;
587
588
switch (reg) {
589
case AARCH64_DBG_REG_BCR:
590
slots = this_cpu_ptr(bp_on_reg);
591
max_slots = core_num_brps;
592
break;
593
case AARCH64_DBG_REG_WCR:
594
slots = this_cpu_ptr(wp_on_reg);
595
max_slots = core_num_wrps;
596
break;
597
default:
598
return;
599
}
600
601
for (i = 0; i < max_slots; ++i) {
602
if (!slots[i])
603
continue;
604
605
privilege = counter_arch_bp(slots[i])->ctrl.privilege;
606
if (debug_exception_level(privilege) != el)
607
continue;
608
609
ctrl = read_wb_reg(reg, i);
610
if (enable)
611
ctrl |= 0x1;
612
else
613
ctrl &= ~0x1;
614
write_wb_reg(reg, i, ctrl);
615
}
616
}
617
NOKPROBE_SYMBOL(toggle_bp_registers);
618
619
/*
620
* Debug exception handlers.
621
*/
622
void do_breakpoint(unsigned long esr, struct pt_regs *regs)
623
{
624
int i, step = 0, *kernel_step;
625
u32 ctrl_reg;
626
u64 addr, val;
627
struct perf_event *bp, **slots;
628
struct debug_info *debug_info;
629
struct arch_hw_breakpoint_ctrl ctrl;
630
631
slots = this_cpu_ptr(bp_on_reg);
632
addr = instruction_pointer(regs);
633
debug_info = &current->thread.debug;
634
635
for (i = 0; i < core_num_brps; ++i) {
636
rcu_read_lock();
637
638
bp = slots[i];
639
640
if (bp == NULL)
641
goto unlock;
642
643
/* Check if the breakpoint value matches. */
644
val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
645
if (val != (addr & ~0x3))
646
goto unlock;
647
648
/* Possible match, check the byte address select to confirm. */
649
ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
650
decode_ctrl_reg(ctrl_reg, &ctrl);
651
if (!((1 << (addr & 0x3)) & ctrl.len))
652
goto unlock;
653
654
counter_arch_bp(bp)->trigger = addr;
655
perf_bp_event(bp, regs);
656
657
/* Do we need to handle the stepping? */
658
if (is_default_overflow_handler(bp))
659
step = 1;
660
unlock:
661
rcu_read_unlock();
662
}
663
664
if (!step)
665
return;
666
667
if (user_mode(regs)) {
668
debug_info->bps_disabled = 1;
669
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0);
670
671
/* If we're already stepping a watchpoint, just return. */
672
if (debug_info->wps_disabled)
673
return;
674
675
if (test_thread_flag(TIF_SINGLESTEP))
676
debug_info->suspended_step = 1;
677
else
678
user_enable_single_step(current);
679
} else {
680
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
681
kernel_step = this_cpu_ptr(&stepping_kernel_bp);
682
683
if (*kernel_step != ARM_KERNEL_STEP_NONE)
684
return;
685
686
if (kernel_active_single_step()) {
687
*kernel_step = ARM_KERNEL_STEP_SUSPEND;
688
} else {
689
*kernel_step = ARM_KERNEL_STEP_ACTIVE;
690
kernel_enable_single_step(regs);
691
}
692
}
693
}
694
NOKPROBE_SYMBOL(do_breakpoint);
695
696
/*
697
* Arm64 hardware does not always report a watchpoint hit address that matches
698
* one of the watchpoints set. It can also report an address "near" the
699
* watchpoint if a single instruction access both watched and unwatched
700
* addresses. There is no straight-forward way, short of disassembling the
701
* offending instruction, to map that address back to the watchpoint. This
702
* function computes the distance of the memory access from the watchpoint as a
703
* heuristic for the likelihood that a given access triggered the watchpoint.
704
*
705
* See Section D2.10.5 "Determining the memory location that caused a Watchpoint
706
* exception" of ARMv8 Architecture Reference Manual for details.
707
*
708
* The function returns the distance of the address from the bytes watched by
709
* the watchpoint. In case of an exact match, it returns 0.
710
*/
711
static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
712
struct arch_hw_breakpoint_ctrl *ctrl)
713
{
714
u64 wp_low, wp_high;
715
u32 lens, lene;
716
717
addr = untagged_addr(addr);
718
719
lens = __ffs(ctrl->len);
720
lene = __fls(ctrl->len);
721
722
wp_low = val + lens;
723
wp_high = val + lene;
724
if (addr < wp_low)
725
return wp_low - addr;
726
else if (addr > wp_high)
727
return addr - wp_high;
728
else
729
return 0;
730
}
731
732
static int watchpoint_report(struct perf_event *wp, unsigned long addr,
733
struct pt_regs *regs)
734
{
735
int step = is_default_overflow_handler(wp);
736
struct arch_hw_breakpoint *info = counter_arch_bp(wp);
737
738
info->trigger = addr;
739
740
/*
741
* If we triggered a user watchpoint from a uaccess routine, then
742
* handle the stepping ourselves since userspace really can't help
743
* us with this.
744
*/
745
if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0)
746
step = 1;
747
else
748
perf_bp_event(wp, regs);
749
750
return step;
751
}
752
753
void do_watchpoint(unsigned long addr, unsigned long esr, struct pt_regs *regs)
754
{
755
int i, step = 0, *kernel_step, access, closest_match = 0;
756
u64 min_dist = -1, dist;
757
u32 ctrl_reg;
758
u64 val;
759
struct perf_event *wp, **slots;
760
struct debug_info *debug_info;
761
struct arch_hw_breakpoint_ctrl ctrl;
762
763
slots = this_cpu_ptr(wp_on_reg);
764
debug_info = &current->thread.debug;
765
766
/*
767
* Find all watchpoints that match the reported address. If no exact
768
* match is found. Attribute the hit to the closest watchpoint.
769
*/
770
rcu_read_lock();
771
for (i = 0; i < core_num_wrps; ++i) {
772
wp = slots[i];
773
if (wp == NULL)
774
continue;
775
776
/*
777
* Check that the access type matches.
778
* 0 => load, otherwise => store
779
*/
780
access = (esr & ESR_ELx_WNR) ? HW_BREAKPOINT_W :
781
HW_BREAKPOINT_R;
782
if (!(access & hw_breakpoint_type(wp)))
783
continue;
784
785
/* Check if the watchpoint value and byte select match. */
786
val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
787
ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
788
decode_ctrl_reg(ctrl_reg, &ctrl);
789
dist = get_distance_from_watchpoint(addr, val, &ctrl);
790
if (dist < min_dist) {
791
min_dist = dist;
792
closest_match = i;
793
}
794
/* Is this an exact match? */
795
if (dist != 0)
796
continue;
797
798
step = watchpoint_report(wp, addr, regs);
799
}
800
801
/* No exact match found? */
802
if (min_dist > 0 && min_dist != -1)
803
step = watchpoint_report(slots[closest_match], addr, regs);
804
805
rcu_read_unlock();
806
807
if (!step)
808
return;
809
810
/*
811
* We always disable EL0 watchpoints because the kernel can
812
* cause these to fire via an unprivileged access.
813
*/
814
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0);
815
816
if (user_mode(regs)) {
817
debug_info->wps_disabled = 1;
818
819
/* If we're already stepping a breakpoint, just return. */
820
if (debug_info->bps_disabled)
821
return;
822
823
if (test_thread_flag(TIF_SINGLESTEP))
824
debug_info->suspended_step = 1;
825
else
826
user_enable_single_step(current);
827
} else {
828
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
829
kernel_step = this_cpu_ptr(&stepping_kernel_bp);
830
831
if (*kernel_step != ARM_KERNEL_STEP_NONE)
832
return;
833
834
if (kernel_active_single_step()) {
835
*kernel_step = ARM_KERNEL_STEP_SUSPEND;
836
} else {
837
*kernel_step = ARM_KERNEL_STEP_ACTIVE;
838
kernel_enable_single_step(regs);
839
}
840
}
841
}
842
NOKPROBE_SYMBOL(do_watchpoint);
843
844
/*
845
* Handle single-step exception.
846
*/
847
bool try_step_suspended_breakpoints(struct pt_regs *regs)
848
{
849
struct debug_info *debug_info = &current->thread.debug;
850
int *kernel_step = this_cpu_ptr(&stepping_kernel_bp);
851
bool handled_exception = false;
852
853
/*
854
* Called from single-step exception entry.
855
* Return true if we stepped a breakpoint and can resume execution,
856
* false if we need to handle a single-step.
857
*/
858
if (user_mode(regs)) {
859
if (debug_info->bps_disabled) {
860
debug_info->bps_disabled = 0;
861
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1);
862
handled_exception = true;
863
}
864
865
if (debug_info->wps_disabled) {
866
debug_info->wps_disabled = 0;
867
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
868
handled_exception = true;
869
}
870
871
if (handled_exception) {
872
if (debug_info->suspended_step) {
873
debug_info->suspended_step = 0;
874
/* Allow exception handling to fall-through. */
875
handled_exception = false;
876
} else {
877
user_disable_single_step(current);
878
}
879
}
880
} else if (*kernel_step != ARM_KERNEL_STEP_NONE) {
881
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1);
882
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1);
883
884
if (!debug_info->wps_disabled)
885
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1);
886
887
if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) {
888
kernel_disable_single_step();
889
handled_exception = true;
890
} else {
891
handled_exception = false;
892
}
893
894
*kernel_step = ARM_KERNEL_STEP_NONE;
895
}
896
897
return handled_exception;
898
}
899
NOKPROBE_SYMBOL(try_step_suspended_breakpoints);
900
901
/*
902
* Context-switcher for restoring suspended breakpoints.
903
*/
904
void hw_breakpoint_thread_switch(struct task_struct *next)
905
{
906
/*
907
* current next
908
* disabled: 0 0 => The usual case, NOTIFY_DONE
909
* 0 1 => Disable the registers
910
* 1 0 => Enable the registers
911
* 1 1 => NOTIFY_DONE. per-task bps will
912
* get taken care of by perf.
913
*/
914
915
struct debug_info *current_debug_info, *next_debug_info;
916
917
current_debug_info = &current->thread.debug;
918
next_debug_info = &next->thread.debug;
919
920
/* Update breakpoints. */
921
if (current_debug_info->bps_disabled != next_debug_info->bps_disabled)
922
toggle_bp_registers(AARCH64_DBG_REG_BCR,
923
DBG_ACTIVE_EL0,
924
!next_debug_info->bps_disabled);
925
926
/* Update watchpoints. */
927
if (current_debug_info->wps_disabled != next_debug_info->wps_disabled)
928
toggle_bp_registers(AARCH64_DBG_REG_WCR,
929
DBG_ACTIVE_EL0,
930
!next_debug_info->wps_disabled);
931
}
932
933
/*
934
* CPU initialisation.
935
*/
936
static int hw_breakpoint_reset(unsigned int cpu)
937
{
938
int i;
939
struct perf_event **slots;
940
/*
941
* When a CPU goes through cold-boot, it does not have any installed
942
* slot, so it is safe to share the same function for restoring and
943
* resetting breakpoints; when a CPU is hotplugged in, it goes
944
* through the slots, which are all empty, hence it just resets control
945
* and value for debug registers.
946
* When this function is triggered on warm-boot through a CPU PM
947
* notifier some slots might be initialized; if so they are
948
* reprogrammed according to the debug slots content.
949
*/
950
for (slots = this_cpu_ptr(bp_on_reg), i = 0; i < core_num_brps; ++i) {
951
if (slots[i]) {
952
hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
953
} else {
954
write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
955
write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
956
}
957
}
958
959
for (slots = this_cpu_ptr(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
960
if (slots[i]) {
961
hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
962
} else {
963
write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
964
write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
965
}
966
}
967
968
return 0;
969
}
970
971
/*
972
* One-time initialisation.
973
*/
974
static int __init arch_hw_breakpoint_init(void)
975
{
976
int ret;
977
978
core_num_brps = get_num_brps();
979
core_num_wrps = get_num_wrps();
980
981
pr_info("found %d breakpoint and %d watchpoint registers.\n",
982
core_num_brps, core_num_wrps);
983
984
/*
985
* Reset the breakpoint resources. We assume that a halting
986
* debugger will leave the world in a nice state for us.
987
*/
988
ret = cpuhp_setup_state(CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
989
"perf/arm64/hw_breakpoint:starting",
990
hw_breakpoint_reset, NULL);
991
if (ret)
992
pr_err("failed to register CPU hotplug notifier: %d\n", ret);
993
994
/* Register cpu_suspend hw breakpoint restore hook */
995
cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
996
997
return ret;
998
}
999
arch_initcall(arch_hw_breakpoint_init);
1000
1001
void hw_breakpoint_pmu_read(struct perf_event *bp)
1002
{
1003
}
1004
1005
/*
1006
* Dummy function to register with die_notifier.
1007
*/
1008
int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
1009
unsigned long val, void *data)
1010
{
1011
return NOTIFY_DONE;
1012
}
1013
1014