Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/loongarch/kernel/ptrace.c
48893 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Author: Hanlu Li <[email protected]>
4
* Huacai Chen <[email protected]>
5
*
6
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
7
*
8
* Derived from MIPS:
9
* Copyright (C) 1992 Ross Biro
10
* Copyright (C) Linus Torvalds
11
* Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
12
* Copyright (C) 1996 David S. Miller
13
* Kevin D. Kissell, [email protected] and Carsten Langgaard, [email protected]
14
* Copyright (C) 1999 MIPS Technologies, Inc.
15
* Copyright (C) 2000 Ulf Carlsson
16
*/
17
#include <linux/kernel.h>
18
#include <linux/audit.h>
19
#include <linux/compiler.h>
20
#include <linux/context_tracking.h>
21
#include <linux/elf.h>
22
#include <linux/errno.h>
23
#include <linux/hw_breakpoint.h>
24
#include <linux/mm.h>
25
#include <linux/nospec.h>
26
#include <linux/ptrace.h>
27
#include <linux/regset.h>
28
#include <linux/sched.h>
29
#include <linux/sched/task_stack.h>
30
#include <linux/security.h>
31
#include <linux/smp.h>
32
#include <linux/stddef.h>
33
#include <linux/seccomp.h>
34
#include <linux/thread_info.h>
35
#include <linux/uaccess.h>
36
37
#include <asm/byteorder.h>
38
#include <asm/cpu.h>
39
#include <asm/cpu-info.h>
40
#include <asm/fpu.h>
41
#include <asm/lbt.h>
42
#include <asm/loongarch.h>
43
#include <asm/page.h>
44
#include <asm/pgtable.h>
45
#include <asm/processor.h>
46
#include <asm/ptrace.h>
47
#include <asm/reg.h>
48
#include <asm/syscall.h>
49
50
static void init_fp_ctx(struct task_struct *target)
51
{
52
/* The target already has context */
53
if (tsk_used_math(target))
54
return;
55
56
/* Begin with data registers set to all 1s... */
57
memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
58
set_stopped_child_used_math(target);
59
}
60
61
/*
62
* Called by kernel/ptrace.c when detaching..
63
*
64
* Make sure single step bits etc are not set.
65
*/
66
void ptrace_disable(struct task_struct *child)
67
{
68
/* Don't load the watchpoint registers for the ex-child. */
69
clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
70
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
71
}
72
73
/* regset get/set implementations */
74
75
static int gpr_get(struct task_struct *target,
76
const struct user_regset *regset,
77
struct membuf to)
78
{
79
int r;
80
struct pt_regs *regs = task_pt_regs(target);
81
82
r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
83
r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
84
r = membuf_write(&to, &regs->csr_era, sizeof(u64));
85
r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
86
87
return r;
88
}
89
90
static int gpr_set(struct task_struct *target,
91
const struct user_regset *regset,
92
unsigned int pos, unsigned int count,
93
const void *kbuf, const void __user *ubuf)
94
{
95
int err;
96
int a0_start = sizeof(u64) * GPR_NUM;
97
int era_start = a0_start + sizeof(u64);
98
int badvaddr_start = era_start + sizeof(u64);
99
struct pt_regs *regs = task_pt_regs(target);
100
101
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
102
&regs->regs,
103
0, a0_start);
104
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
105
&regs->orig_a0,
106
a0_start, a0_start + sizeof(u64));
107
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
108
&regs->csr_era,
109
era_start, era_start + sizeof(u64));
110
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
111
&regs->csr_badvaddr,
112
badvaddr_start, badvaddr_start + sizeof(u64));
113
114
return err;
115
}
116
117
118
/*
119
* Get the general floating-point registers.
120
*/
121
static int gfpr_get(struct task_struct *target, struct membuf *to)
122
{
123
return membuf_write(to, &target->thread.fpu.fpr,
124
sizeof(elf_fpreg_t) * NUM_FPU_REGS);
125
}
126
127
static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
128
{
129
int i, r;
130
u64 fpr_val;
131
132
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
133
for (i = 0; i < NUM_FPU_REGS; i++) {
134
fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
135
r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
136
}
137
138
return r;
139
}
140
141
/*
142
* Choose the appropriate helper for general registers, and then copy
143
* the FCC and FCSR registers separately.
144
*/
145
static int fpr_get(struct task_struct *target,
146
const struct user_regset *regset,
147
struct membuf to)
148
{
149
int r;
150
151
save_fpu_regs(target);
152
153
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
154
r = gfpr_get(target, &to);
155
else
156
r = gfpr_get_simd(target, &to);
157
158
r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
159
r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
160
161
return r;
162
}
163
164
static int gfpr_set(struct task_struct *target,
165
unsigned int *pos, unsigned int *count,
166
const void **kbuf, const void __user **ubuf)
167
{
168
return user_regset_copyin(pos, count, kbuf, ubuf,
169
&target->thread.fpu.fpr,
170
0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
171
}
172
173
static int gfpr_set_simd(struct task_struct *target,
174
unsigned int *pos, unsigned int *count,
175
const void **kbuf, const void __user **ubuf)
176
{
177
int i, err;
178
u64 fpr_val;
179
180
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
181
for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
182
err = user_regset_copyin(pos, count, kbuf, ubuf,
183
&fpr_val, i * sizeof(elf_fpreg_t),
184
(i + 1) * sizeof(elf_fpreg_t));
185
if (err)
186
return err;
187
set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
188
}
189
190
return 0;
191
}
192
193
/*
194
* Choose the appropriate helper for general registers, and then copy
195
* the FCC register separately.
196
*/
197
static int fpr_set(struct task_struct *target,
198
const struct user_regset *regset,
199
unsigned int pos, unsigned int count,
200
const void *kbuf, const void __user *ubuf)
201
{
202
const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
203
const int fcsr_start = fcc_start + sizeof(u64);
204
int err;
205
206
BUG_ON(count % sizeof(elf_fpreg_t));
207
if (pos + count > sizeof(elf_fpregset_t))
208
return -EIO;
209
210
init_fp_ctx(target);
211
212
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
213
err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
214
else
215
err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
216
if (err)
217
return err;
218
219
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
220
&target->thread.fpu.fcc, fcc_start,
221
fcc_start + sizeof(u64));
222
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
223
&target->thread.fpu.fcsr, fcsr_start,
224
fcsr_start + sizeof(u32));
225
226
return err;
227
}
228
229
static int cfg_get(struct task_struct *target,
230
const struct user_regset *regset,
231
struct membuf to)
232
{
233
int i, r;
234
u32 cfg_val;
235
236
i = 0;
237
while (to.left > 0) {
238
cfg_val = read_cpucfg(i++);
239
r = membuf_write(&to, &cfg_val, sizeof(u32));
240
}
241
242
return r;
243
}
244
245
/*
246
* CFG registers are read-only.
247
*/
248
static int cfg_set(struct task_struct *target,
249
const struct user_regset *regset,
250
unsigned int pos, unsigned int count,
251
const void *kbuf, const void __user *ubuf)
252
{
253
return 0;
254
}
255
256
#ifdef CONFIG_CPU_HAS_LSX
257
258
static void copy_pad_fprs(struct task_struct *target,
259
const struct user_regset *regset,
260
struct membuf *to, unsigned int live_sz)
261
{
262
int i, j;
263
unsigned long long fill = ~0ull;
264
unsigned int cp_sz, pad_sz;
265
266
cp_sz = min(regset->size, live_sz);
267
pad_sz = regset->size - cp_sz;
268
WARN_ON(pad_sz % sizeof(fill));
269
270
for (i = 0; i < NUM_FPU_REGS; i++) {
271
membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
272
for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
273
membuf_store(to, fill);
274
}
275
}
276
}
277
278
static int simd_get(struct task_struct *target,
279
const struct user_regset *regset,
280
struct membuf to)
281
{
282
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
283
284
save_fpu_regs(target);
285
286
if (!tsk_used_math(target)) {
287
/* The task hasn't used FP or LSX, fill with 0xff */
288
copy_pad_fprs(target, regset, &to, 0);
289
} else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
290
/* Copy scalar FP context, fill the rest with 0xff */
291
copy_pad_fprs(target, regset, &to, 8);
292
#ifdef CONFIG_CPU_HAS_LASX
293
} else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
294
/* Copy LSX 128 Bit context, fill the rest with 0xff */
295
copy_pad_fprs(target, regset, &to, 16);
296
#endif
297
} else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
298
/* Trivially copy the vector registers */
299
membuf_write(&to, &target->thread.fpu.fpr, wr_size);
300
} else {
301
/* Copy as much context as possible, fill the rest with 0xff */
302
copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
303
}
304
305
return 0;
306
}
307
308
static int simd_set(struct task_struct *target,
309
const struct user_regset *regset,
310
unsigned int pos, unsigned int count,
311
const void *kbuf, const void __user *ubuf)
312
{
313
const unsigned int wr_size = NUM_FPU_REGS * regset->size;
314
unsigned int cp_sz;
315
int i, err, start;
316
317
init_fp_ctx(target);
318
319
if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
320
/* Trivially copy the vector registers */
321
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
322
&target->thread.fpu.fpr,
323
0, wr_size);
324
} else {
325
/* Copy as much context as possible */
326
cp_sz = min_t(unsigned int, regset->size,
327
sizeof(target->thread.fpu.fpr[0]));
328
329
i = start = err = 0;
330
for (; i < NUM_FPU_REGS; i++, start += regset->size) {
331
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
332
&target->thread.fpu.fpr[i],
333
start, start + cp_sz);
334
}
335
}
336
337
return err;
338
}
339
340
#endif /* CONFIG_CPU_HAS_LSX */
341
342
#ifdef CONFIG_CPU_HAS_LBT
343
static int lbt_get(struct task_struct *target,
344
const struct user_regset *regset,
345
struct membuf to)
346
{
347
int r;
348
349
r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
350
r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
351
r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
352
r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
353
r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
354
r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
355
356
return r;
357
}
358
359
static int lbt_set(struct task_struct *target,
360
const struct user_regset *regset,
361
unsigned int pos, unsigned int count,
362
const void *kbuf, const void __user *ubuf)
363
{
364
int err = 0;
365
const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
366
const int ftop_start = eflags_start + sizeof(u32);
367
368
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
369
&target->thread.lbt.scr0,
370
0, 4 * sizeof(target->thread.lbt.scr0));
371
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
372
&target->thread.lbt.eflags,
373
eflags_start, ftop_start);
374
err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
375
&target->thread.fpu.ftop,
376
ftop_start, ftop_start + sizeof(u32));
377
378
return err;
379
}
380
#endif /* CONFIG_CPU_HAS_LBT */
381
382
#ifdef CONFIG_HAVE_HW_BREAKPOINT
383
384
/*
385
* Handle hitting a HW-breakpoint.
386
*/
387
static void ptrace_hbptriggered(struct perf_event *bp,
388
struct perf_sample_data *data,
389
struct pt_regs *regs)
390
{
391
int i;
392
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
393
394
for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
395
if (current->thread.hbp_break[i] == bp)
396
break;
397
398
for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
399
if (current->thread.hbp_watch[i] == bp)
400
break;
401
402
force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
403
}
404
405
static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
406
struct task_struct *tsk,
407
unsigned long idx)
408
{
409
struct perf_event *bp;
410
411
switch (note_type) {
412
case NT_LOONGARCH_HW_BREAK:
413
if (idx >= LOONGARCH_MAX_BRP)
414
return ERR_PTR(-EINVAL);
415
idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
416
bp = tsk->thread.hbp_break[idx];
417
break;
418
case NT_LOONGARCH_HW_WATCH:
419
if (idx >= LOONGARCH_MAX_WRP)
420
return ERR_PTR(-EINVAL);
421
idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
422
bp = tsk->thread.hbp_watch[idx];
423
break;
424
}
425
426
return bp;
427
}
428
429
static int ptrace_hbp_set_event(unsigned int note_type,
430
struct task_struct *tsk,
431
unsigned long idx,
432
struct perf_event *bp)
433
{
434
switch (note_type) {
435
case NT_LOONGARCH_HW_BREAK:
436
if (idx >= LOONGARCH_MAX_BRP)
437
return -EINVAL;
438
idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
439
tsk->thread.hbp_break[idx] = bp;
440
break;
441
case NT_LOONGARCH_HW_WATCH:
442
if (idx >= LOONGARCH_MAX_WRP)
443
return -EINVAL;
444
idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
445
tsk->thread.hbp_watch[idx] = bp;
446
break;
447
}
448
449
return 0;
450
}
451
452
static struct perf_event *ptrace_hbp_create(unsigned int note_type,
453
struct task_struct *tsk,
454
unsigned long idx)
455
{
456
int err, type;
457
struct perf_event *bp;
458
struct perf_event_attr attr;
459
460
switch (note_type) {
461
case NT_LOONGARCH_HW_BREAK:
462
type = HW_BREAKPOINT_X;
463
break;
464
case NT_LOONGARCH_HW_WATCH:
465
type = HW_BREAKPOINT_RW;
466
break;
467
default:
468
return ERR_PTR(-EINVAL);
469
}
470
471
ptrace_breakpoint_init(&attr);
472
473
/*
474
* Initialise fields to sane defaults
475
* (i.e. values that will pass validation).
476
*/
477
attr.bp_addr = 0;
478
attr.bp_len = HW_BREAKPOINT_LEN_4;
479
attr.bp_type = type;
480
attr.disabled = 1;
481
482
bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
483
if (IS_ERR(bp))
484
return bp;
485
486
err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
487
if (err)
488
return ERR_PTR(err);
489
490
return bp;
491
}
492
493
static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
494
struct arch_hw_breakpoint_ctrl ctrl,
495
struct perf_event_attr *attr)
496
{
497
int err, len, type;
498
499
err = arch_bp_generic_fields(ctrl, &len, &type);
500
if (err)
501
return err;
502
503
attr->bp_len = len;
504
attr->bp_type = type;
505
506
return 0;
507
}
508
509
static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
510
{
511
u8 num;
512
u64 reg = 0;
513
514
switch (note_type) {
515
case NT_LOONGARCH_HW_BREAK:
516
num = hw_breakpoint_slots(TYPE_INST);
517
break;
518
case NT_LOONGARCH_HW_WATCH:
519
num = hw_breakpoint_slots(TYPE_DATA);
520
break;
521
default:
522
return -EINVAL;
523
}
524
525
*info = reg | num;
526
527
return 0;
528
}
529
530
static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
531
struct task_struct *tsk,
532
unsigned long idx)
533
{
534
struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
535
536
if (!bp)
537
bp = ptrace_hbp_create(note_type, tsk, idx);
538
539
return bp;
540
}
541
542
static int ptrace_hbp_get_ctrl(unsigned int note_type,
543
struct task_struct *tsk,
544
unsigned long idx, u32 *ctrl)
545
{
546
struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
547
548
if (IS_ERR(bp))
549
return PTR_ERR(bp);
550
551
*ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
552
553
return 0;
554
}
555
556
static int ptrace_hbp_get_mask(unsigned int note_type,
557
struct task_struct *tsk,
558
unsigned long idx, u64 *mask)
559
{
560
struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
561
562
if (IS_ERR(bp))
563
return PTR_ERR(bp);
564
565
*mask = bp ? counter_arch_bp(bp)->mask : 0;
566
567
return 0;
568
}
569
570
static int ptrace_hbp_get_addr(unsigned int note_type,
571
struct task_struct *tsk,
572
unsigned long idx, u64 *addr)
573
{
574
struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
575
576
if (IS_ERR(bp))
577
return PTR_ERR(bp);
578
579
*addr = bp ? counter_arch_bp(bp)->address : 0;
580
581
return 0;
582
}
583
584
static int ptrace_hbp_set_ctrl(unsigned int note_type,
585
struct task_struct *tsk,
586
unsigned long idx, u32 uctrl)
587
{
588
int err;
589
struct perf_event *bp;
590
struct perf_event_attr attr;
591
struct arch_hw_breakpoint_ctrl ctrl;
592
struct thread_info *ti = task_thread_info(tsk);
593
594
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
595
if (IS_ERR(bp))
596
return PTR_ERR(bp);
597
598
attr = bp->attr;
599
600
switch (note_type) {
601
case NT_LOONGARCH_HW_BREAK:
602
ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
603
ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
604
break;
605
case NT_LOONGARCH_HW_WATCH:
606
decode_ctrl_reg(uctrl, &ctrl);
607
break;
608
default:
609
return -EINVAL;
610
}
611
612
if (uctrl & CTRL_PLV_ENABLE) {
613
err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
614
if (err)
615
return err;
616
attr.disabled = 0;
617
set_ti_thread_flag(ti, TIF_LOAD_WATCH);
618
} else {
619
attr.disabled = 1;
620
clear_ti_thread_flag(ti, TIF_LOAD_WATCH);
621
}
622
623
return modify_user_hw_breakpoint(bp, &attr);
624
}
625
626
static int ptrace_hbp_set_mask(unsigned int note_type,
627
struct task_struct *tsk,
628
unsigned long idx, u64 mask)
629
{
630
struct perf_event *bp;
631
struct perf_event_attr attr;
632
struct arch_hw_breakpoint *info;
633
634
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
635
if (IS_ERR(bp))
636
return PTR_ERR(bp);
637
638
attr = bp->attr;
639
info = counter_arch_bp(bp);
640
info->mask = mask;
641
642
return modify_user_hw_breakpoint(bp, &attr);
643
}
644
645
static int ptrace_hbp_set_addr(unsigned int note_type,
646
struct task_struct *tsk,
647
unsigned long idx, u64 addr)
648
{
649
struct perf_event *bp;
650
struct perf_event_attr attr;
651
652
/* Kernel-space address cannot be monitored by user-space */
653
#ifdef CONFIG_32BIT
654
if ((unsigned long)addr >= KPRANGE0)
655
return -EINVAL;
656
#else
657
if ((unsigned long)addr >= XKPRANGE)
658
return -EINVAL;
659
#endif
660
661
bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
662
if (IS_ERR(bp))
663
return PTR_ERR(bp);
664
665
attr = bp->attr;
666
attr.bp_addr = addr;
667
668
return modify_user_hw_breakpoint(bp, &attr);
669
}
670
671
#define PTRACE_HBP_ADDR_SZ sizeof(u64)
672
#define PTRACE_HBP_MASK_SZ sizeof(u64)
673
#define PTRACE_HBP_CTRL_SZ sizeof(u32)
674
#define PTRACE_HBP_PAD_SZ sizeof(u32)
675
676
static int hw_break_get(struct task_struct *target,
677
const struct user_regset *regset,
678
struct membuf to)
679
{
680
u64 info;
681
u32 ctrl;
682
u64 addr, mask;
683
int ret, idx = 0;
684
unsigned int note_type = regset->core_note_type;
685
686
/* Resource info */
687
ret = ptrace_hbp_get_resource_info(note_type, &info);
688
if (ret)
689
return ret;
690
691
membuf_write(&to, &info, sizeof(info));
692
693
/* (address, mask, ctrl) registers */
694
while (to.left) {
695
ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
696
if (ret)
697
return ret;
698
699
ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
700
if (ret)
701
return ret;
702
703
ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
704
if (ret)
705
return ret;
706
707
membuf_store(&to, addr);
708
membuf_store(&to, mask);
709
membuf_store(&to, ctrl);
710
membuf_zero(&to, sizeof(u32));
711
idx++;
712
}
713
714
return 0;
715
}
716
717
static int hw_break_set(struct task_struct *target,
718
const struct user_regset *regset,
719
unsigned int pos, unsigned int count,
720
const void *kbuf, const void __user *ubuf)
721
{
722
u32 ctrl;
723
u64 addr, mask;
724
int ret, idx = 0, offset, limit;
725
unsigned int note_type = regset->core_note_type;
726
727
/* Resource info */
728
offset = offsetof(struct user_watch_state_v2, dbg_regs);
729
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
730
731
/* (address, mask, ctrl) registers */
732
limit = regset->n * regset->size;
733
while (count && offset < limit) {
734
if (count < PTRACE_HBP_ADDR_SZ)
735
return -EINVAL;
736
737
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
738
offset, offset + PTRACE_HBP_ADDR_SZ);
739
if (ret)
740
return ret;
741
742
ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
743
if (ret)
744
return ret;
745
offset += PTRACE_HBP_ADDR_SZ;
746
747
if (!count)
748
break;
749
750
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
751
offset, offset + PTRACE_HBP_MASK_SZ);
752
if (ret)
753
return ret;
754
755
ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
756
if (ret)
757
return ret;
758
offset += PTRACE_HBP_MASK_SZ;
759
760
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
761
offset, offset + PTRACE_HBP_CTRL_SZ);
762
if (ret)
763
return ret;
764
765
ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
766
if (ret)
767
return ret;
768
offset += PTRACE_HBP_CTRL_SZ;
769
770
user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
771
offset, offset + PTRACE_HBP_PAD_SZ);
772
offset += PTRACE_HBP_PAD_SZ;
773
774
idx++;
775
}
776
777
return 0;
778
}
779
780
#endif
781
782
struct pt_regs_offset {
783
const char *name;
784
int offset;
785
};
786
787
#define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
788
#define REG_OFFSET_END {.name = NULL, .offset = 0}
789
790
static const struct pt_regs_offset regoffset_table[] = {
791
REG_OFFSET_NAME(r0, regs[0]),
792
REG_OFFSET_NAME(r1, regs[1]),
793
REG_OFFSET_NAME(r2, regs[2]),
794
REG_OFFSET_NAME(r3, regs[3]),
795
REG_OFFSET_NAME(r4, regs[4]),
796
REG_OFFSET_NAME(r5, regs[5]),
797
REG_OFFSET_NAME(r6, regs[6]),
798
REG_OFFSET_NAME(r7, regs[7]),
799
REG_OFFSET_NAME(r8, regs[8]),
800
REG_OFFSET_NAME(r9, regs[9]),
801
REG_OFFSET_NAME(r10, regs[10]),
802
REG_OFFSET_NAME(r11, regs[11]),
803
REG_OFFSET_NAME(r12, regs[12]),
804
REG_OFFSET_NAME(r13, regs[13]),
805
REG_OFFSET_NAME(r14, regs[14]),
806
REG_OFFSET_NAME(r15, regs[15]),
807
REG_OFFSET_NAME(r16, regs[16]),
808
REG_OFFSET_NAME(r17, regs[17]),
809
REG_OFFSET_NAME(r18, regs[18]),
810
REG_OFFSET_NAME(r19, regs[19]),
811
REG_OFFSET_NAME(r20, regs[20]),
812
REG_OFFSET_NAME(r21, regs[21]),
813
REG_OFFSET_NAME(r22, regs[22]),
814
REG_OFFSET_NAME(r23, regs[23]),
815
REG_OFFSET_NAME(r24, regs[24]),
816
REG_OFFSET_NAME(r25, regs[25]),
817
REG_OFFSET_NAME(r26, regs[26]),
818
REG_OFFSET_NAME(r27, regs[27]),
819
REG_OFFSET_NAME(r28, regs[28]),
820
REG_OFFSET_NAME(r29, regs[29]),
821
REG_OFFSET_NAME(r30, regs[30]),
822
REG_OFFSET_NAME(r31, regs[31]),
823
REG_OFFSET_NAME(orig_a0, orig_a0),
824
REG_OFFSET_NAME(csr_era, csr_era),
825
REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
826
REG_OFFSET_NAME(csr_crmd, csr_crmd),
827
REG_OFFSET_NAME(csr_prmd, csr_prmd),
828
REG_OFFSET_NAME(csr_euen, csr_euen),
829
REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
830
REG_OFFSET_NAME(csr_estat, csr_estat),
831
REG_OFFSET_END,
832
};
833
834
/**
835
* regs_query_register_offset() - query register offset from its name
836
* @name: the name of a register
837
*
838
* regs_query_register_offset() returns the offset of a register in struct
839
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
840
*/
841
int regs_query_register_offset(const char *name)
842
{
843
const struct pt_regs_offset *roff;
844
845
for (roff = regoffset_table; roff->name != NULL; roff++)
846
if (!strcmp(roff->name, name))
847
return roff->offset;
848
return -EINVAL;
849
}
850
851
enum loongarch_regset {
852
REGSET_GPR,
853
REGSET_FPR,
854
REGSET_CPUCFG,
855
#ifdef CONFIG_CPU_HAS_LSX
856
REGSET_LSX,
857
#endif
858
#ifdef CONFIG_CPU_HAS_LASX
859
REGSET_LASX,
860
#endif
861
#ifdef CONFIG_CPU_HAS_LBT
862
REGSET_LBT,
863
#endif
864
#ifdef CONFIG_HAVE_HW_BREAKPOINT
865
REGSET_HW_BREAK,
866
REGSET_HW_WATCH,
867
#endif
868
};
869
870
static const struct user_regset loongarch64_regsets[] = {
871
[REGSET_GPR] = {
872
USER_REGSET_NOTE_TYPE(PRSTATUS),
873
.n = ELF_NGREG,
874
.size = sizeof(elf_greg_t),
875
.align = sizeof(elf_greg_t),
876
.regset_get = gpr_get,
877
.set = gpr_set,
878
},
879
[REGSET_FPR] = {
880
USER_REGSET_NOTE_TYPE(PRFPREG),
881
.n = ELF_NFPREG,
882
.size = sizeof(elf_fpreg_t),
883
.align = sizeof(elf_fpreg_t),
884
.regset_get = fpr_get,
885
.set = fpr_set,
886
},
887
[REGSET_CPUCFG] = {
888
USER_REGSET_NOTE_TYPE(LOONGARCH_CPUCFG),
889
.n = 64,
890
.size = sizeof(u32),
891
.align = sizeof(u32),
892
.regset_get = cfg_get,
893
.set = cfg_set,
894
},
895
#ifdef CONFIG_CPU_HAS_LSX
896
[REGSET_LSX] = {
897
USER_REGSET_NOTE_TYPE(LOONGARCH_LSX),
898
.n = NUM_FPU_REGS,
899
.size = 16,
900
.align = 16,
901
.regset_get = simd_get,
902
.set = simd_set,
903
},
904
#endif
905
#ifdef CONFIG_CPU_HAS_LASX
906
[REGSET_LASX] = {
907
USER_REGSET_NOTE_TYPE(LOONGARCH_LASX),
908
.n = NUM_FPU_REGS,
909
.size = 32,
910
.align = 32,
911
.regset_get = simd_get,
912
.set = simd_set,
913
},
914
#endif
915
#ifdef CONFIG_CPU_HAS_LBT
916
[REGSET_LBT] = {
917
USER_REGSET_NOTE_TYPE(LOONGARCH_LBT),
918
.n = 5,
919
.size = sizeof(u64),
920
.align = sizeof(u64),
921
.regset_get = lbt_get,
922
.set = lbt_set,
923
},
924
#endif
925
#ifdef CONFIG_HAVE_HW_BREAKPOINT
926
[REGSET_HW_BREAK] = {
927
USER_REGSET_NOTE_TYPE(LOONGARCH_HW_BREAK),
928
.n = sizeof(struct user_watch_state_v2) / sizeof(u32),
929
.size = sizeof(u32),
930
.align = sizeof(u32),
931
.regset_get = hw_break_get,
932
.set = hw_break_set,
933
},
934
[REGSET_HW_WATCH] = {
935
USER_REGSET_NOTE_TYPE(LOONGARCH_HW_WATCH),
936
.n = sizeof(struct user_watch_state_v2) / sizeof(u32),
937
.size = sizeof(u32),
938
.align = sizeof(u32),
939
.regset_get = hw_break_get,
940
.set = hw_break_set,
941
},
942
#endif
943
};
944
945
static const struct user_regset_view user_loongarch64_view = {
946
.name = "loongarch64",
947
.e_machine = ELF_ARCH,
948
.regsets = loongarch64_regsets,
949
.n = ARRAY_SIZE(loongarch64_regsets),
950
};
951
952
953
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
954
{
955
return &user_loongarch64_view;
956
}
957
958
static inline int read_user(struct task_struct *target, unsigned long addr,
959
unsigned long __user *data)
960
{
961
unsigned long tmp = 0;
962
963
switch (addr) {
964
case 0 ... 31:
965
tmp = task_pt_regs(target)->regs[addr];
966
break;
967
case ARG0:
968
tmp = task_pt_regs(target)->orig_a0;
969
break;
970
case PC:
971
tmp = task_pt_regs(target)->csr_era;
972
break;
973
case BADVADDR:
974
tmp = task_pt_regs(target)->csr_badvaddr;
975
break;
976
default:
977
return -EIO;
978
}
979
980
return put_user(tmp, data);
981
}
982
983
static inline int write_user(struct task_struct *target, unsigned long addr,
984
unsigned long data)
985
{
986
switch (addr) {
987
case 0 ... 31:
988
task_pt_regs(target)->regs[addr] = data;
989
break;
990
case ARG0:
991
task_pt_regs(target)->orig_a0 = data;
992
break;
993
case PC:
994
task_pt_regs(target)->csr_era = data;
995
break;
996
case BADVADDR:
997
task_pt_regs(target)->csr_badvaddr = data;
998
break;
999
default:
1000
return -EIO;
1001
}
1002
1003
return 0;
1004
}
1005
1006
long arch_ptrace(struct task_struct *child, long request,
1007
unsigned long addr, unsigned long data)
1008
{
1009
int ret;
1010
unsigned long __user *datap = (void __user *) data;
1011
1012
switch (request) {
1013
case PTRACE_PEEKUSR:
1014
ret = read_user(child, addr, datap);
1015
break;
1016
1017
case PTRACE_POKEUSR:
1018
ret = write_user(child, addr, data);
1019
break;
1020
1021
default:
1022
ret = ptrace_request(child, request, addr, data);
1023
break;
1024
}
1025
1026
return ret;
1027
}
1028
1029
#ifdef CONFIG_HAVE_HW_BREAKPOINT
1030
static void ptrace_triggered(struct perf_event *bp,
1031
struct perf_sample_data *data, struct pt_regs *regs)
1032
{
1033
struct perf_event_attr attr;
1034
1035
attr = bp->attr;
1036
attr.disabled = true;
1037
modify_user_hw_breakpoint(bp, &attr);
1038
}
1039
1040
static int set_single_step(struct task_struct *tsk, unsigned long addr)
1041
{
1042
struct perf_event *bp;
1043
struct perf_event_attr attr;
1044
struct arch_hw_breakpoint *info;
1045
struct thread_struct *thread = &tsk->thread;
1046
1047
bp = thread->hbp_break[0];
1048
if (!bp) {
1049
ptrace_breakpoint_init(&attr);
1050
1051
attr.bp_addr = addr;
1052
attr.bp_len = HW_BREAKPOINT_LEN_8;
1053
attr.bp_type = HW_BREAKPOINT_X;
1054
1055
bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
1056
NULL, tsk);
1057
if (IS_ERR(bp))
1058
return PTR_ERR(bp);
1059
1060
thread->hbp_break[0] = bp;
1061
} else {
1062
int err;
1063
1064
attr = bp->attr;
1065
attr.bp_addr = addr;
1066
1067
/* Reenable breakpoint */
1068
attr.disabled = false;
1069
err = modify_user_hw_breakpoint(bp, &attr);
1070
if (unlikely(err))
1071
return err;
1072
1073
csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
1074
}
1075
info = counter_arch_bp(bp);
1076
info->mask = TASK_SIZE - 1;
1077
1078
return 0;
1079
}
1080
1081
/* ptrace API */
1082
void user_enable_single_step(struct task_struct *task)
1083
{
1084
struct thread_info *ti = task_thread_info(task);
1085
1086
set_single_step(task, task_pt_regs(task)->csr_era);
1087
task->thread.single_step = task_pt_regs(task)->csr_era;
1088
set_ti_thread_flag(ti, TIF_SINGLESTEP);
1089
}
1090
1091
void user_disable_single_step(struct task_struct *task)
1092
{
1093
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
1094
}
1095
#endif
1096
1097