Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/tile/kernel/single_step.c
10817 views
1
/*
2
* Copyright 2010 Tilera Corporation. All Rights Reserved.
3
*
4
* This program is free software; you can redistribute it and/or
5
* modify it under the terms of the GNU General Public License
6
* as published by the Free Software Foundation, version 2.
7
*
8
* This program is distributed in the hope that it will be useful, but
9
* WITHOUT ANY WARRANTY; without even the implied warranty of
10
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11
* NON INFRINGEMENT. See the GNU General Public License for
12
* more details.
13
*
14
* A code-rewriter that enables instruction single-stepping.
15
* Derived from iLib's single-stepping code.
16
*/
17
18
#ifndef __tilegx__ /* Hardware support for single step unavailable. */
19
20
/* These functions are only used on the TILE platform */
21
#include <linux/slab.h>
22
#include <linux/thread_info.h>
23
#include <linux/uaccess.h>
24
#include <linux/mman.h>
25
#include <linux/types.h>
26
#include <linux/err.h>
27
#include <asm/cacheflush.h>
28
#include <asm/opcode-tile.h>
29
#include <asm/opcode_constants.h>
30
#include <arch/abi.h>
31
32
#define signExtend17(val) sign_extend((val), 17)
33
#define TILE_X1_MASK (0xffffffffULL << 31)
34
35
int unaligned_printk;
36
37
static int __init setup_unaligned_printk(char *str)
38
{
39
long val;
40
if (strict_strtol(str, 0, &val) != 0)
41
return 0;
42
unaligned_printk = val;
43
pr_info("Printk for each unaligned data accesses is %s\n",
44
unaligned_printk ? "enabled" : "disabled");
45
return 1;
46
}
47
__setup("unaligned_printk=", setup_unaligned_printk);
48
49
unsigned int unaligned_fixup_count;
50
51
enum mem_op {
52
MEMOP_NONE,
53
MEMOP_LOAD,
54
MEMOP_STORE,
55
MEMOP_LOAD_POSTINCR,
56
MEMOP_STORE_POSTINCR
57
};
58
59
static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
60
{
61
tile_bundle_bits result;
62
63
/* mask out the old offset */
64
tile_bundle_bits mask = create_BrOff_X1(-1);
65
result = n & (~mask);
66
67
/* or in the new offset */
68
result |= create_BrOff_X1(offset);
69
70
return result;
71
}
72
73
static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
74
{
75
tile_bundle_bits result;
76
tile_bundle_bits op;
77
78
result = n & (~TILE_X1_MASK);
79
80
op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
81
create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
82
create_Dest_X1(dest) |
83
create_SrcB_X1(TREG_ZERO) |
84
create_SrcA_X1(src) ;
85
86
result |= op;
87
return result;
88
}
89
90
static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
91
{
92
return move_X1(n, TREG_ZERO, TREG_ZERO);
93
}
94
95
static inline tile_bundle_bits addi_X1(
96
tile_bundle_bits n, int dest, int src, int imm)
97
{
98
n &= ~TILE_X1_MASK;
99
100
n |= (create_SrcA_X1(src) |
101
create_Dest_X1(dest) |
102
create_Imm8_X1(imm) |
103
create_S_X1(0) |
104
create_Opcode_X1(IMM_0_OPCODE_X1) |
105
create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
106
107
return n;
108
}
109
110
static tile_bundle_bits rewrite_load_store_unaligned(
111
struct single_step_state *state,
112
tile_bundle_bits bundle,
113
struct pt_regs *regs,
114
enum mem_op mem_op,
115
int size, int sign_ext)
116
{
117
unsigned char __user *addr;
118
int val_reg, addr_reg, err, val;
119
120
/* Get address and value registers */
121
if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
122
addr_reg = get_SrcA_Y2(bundle);
123
val_reg = get_SrcBDest_Y2(bundle);
124
} else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
125
addr_reg = get_SrcA_X1(bundle);
126
val_reg = get_Dest_X1(bundle);
127
} else {
128
addr_reg = get_SrcA_X1(bundle);
129
val_reg = get_SrcB_X1(bundle);
130
}
131
132
/*
133
* If registers are not GPRs, don't try to handle it.
134
*
135
* FIXME: we could handle non-GPR loads by getting the real value
136
* from memory, writing it to the single step buffer, using a
137
* temp_reg to hold a pointer to that memory, then executing that
138
* instruction and resetting temp_reg. For non-GPR stores, it's a
139
* little trickier; we could use the single step buffer for that
140
* too, but we'd have to add some more state bits so that we could
141
* call back in here to copy that value to the real target. For
142
* now, we just handle the simple case.
143
*/
144
if ((val_reg >= PTREGS_NR_GPRS &&
145
(val_reg != TREG_ZERO ||
146
mem_op == MEMOP_LOAD ||
147
mem_op == MEMOP_LOAD_POSTINCR)) ||
148
addr_reg >= PTREGS_NR_GPRS)
149
return bundle;
150
151
/* If it's aligned, don't handle it specially */
152
addr = (void __user *)regs->regs[addr_reg];
153
if (((unsigned long)addr % size) == 0)
154
return bundle;
155
156
#ifndef __LITTLE_ENDIAN
157
# error We assume little-endian representation with copy_xx_user size 2 here
158
#endif
159
/* Handle unaligned load/store */
160
if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
161
unsigned short val_16;
162
switch (size) {
163
case 2:
164
err = copy_from_user(&val_16, addr, sizeof(val_16));
165
val = sign_ext ? ((short)val_16) : val_16;
166
break;
167
case 4:
168
err = copy_from_user(&val, addr, sizeof(val));
169
break;
170
default:
171
BUG();
172
}
173
if (err == 0) {
174
state->update_reg = val_reg;
175
state->update_value = val;
176
state->update = 1;
177
}
178
} else {
179
val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
180
err = copy_to_user(addr, &val, size);
181
}
182
183
if (err) {
184
siginfo_t info = {
185
.si_signo = SIGSEGV,
186
.si_code = SEGV_MAPERR,
187
.si_addr = addr
188
};
189
trace_unhandled_signal("segfault", regs,
190
(unsigned long)addr, SIGSEGV);
191
force_sig_info(info.si_signo, &info, current);
192
return (tile_bundle_bits) 0;
193
}
194
195
if (unaligned_fixup == 0) {
196
siginfo_t info = {
197
.si_signo = SIGBUS,
198
.si_code = BUS_ADRALN,
199
.si_addr = addr
200
};
201
trace_unhandled_signal("unaligned trap", regs,
202
(unsigned long)addr, SIGBUS);
203
force_sig_info(info.si_signo, &info, current);
204
return (tile_bundle_bits) 0;
205
}
206
207
if (unaligned_printk || unaligned_fixup_count == 0) {
208
pr_info("Process %d/%s: PC %#lx: Fixup of"
209
" unaligned %s at %#lx.\n",
210
current->pid, current->comm, regs->pc,
211
(mem_op == MEMOP_LOAD ||
212
mem_op == MEMOP_LOAD_POSTINCR) ?
213
"load" : "store",
214
(unsigned long)addr);
215
if (!unaligned_printk) {
216
#define P pr_info
217
P("\n");
218
P("Unaligned fixups in the kernel will slow your application considerably.\n");
219
P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
220
P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
221
P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
222
P("access will become a SIGBUS you can debug. No further warnings will be\n");
223
P("shown so as to avoid additional slowdown, but you can track the number\n");
224
P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
225
P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
226
P("\n");
227
#undef P
228
}
229
}
230
++unaligned_fixup_count;
231
232
if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
233
/* Convert the Y2 instruction to a prefetch. */
234
bundle &= ~(create_SrcBDest_Y2(-1) |
235
create_Opcode_Y2(-1));
236
bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
237
create_Opcode_Y2(LW_OPCODE_Y2));
238
/* Replace the load postincr with an addi */
239
} else if (mem_op == MEMOP_LOAD_POSTINCR) {
240
bundle = addi_X1(bundle, addr_reg, addr_reg,
241
get_Imm8_X1(bundle));
242
/* Replace the store postincr with an addi */
243
} else if (mem_op == MEMOP_STORE_POSTINCR) {
244
bundle = addi_X1(bundle, addr_reg, addr_reg,
245
get_Dest_Imm8_X1(bundle));
246
} else {
247
/* Convert the X1 instruction to a nop. */
248
bundle &= ~(create_Opcode_X1(-1) |
249
create_UnShOpcodeExtension_X1(-1) |
250
create_UnOpcodeExtension_X1(-1));
251
bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
252
create_UnShOpcodeExtension_X1(
253
UN_0_SHUN_0_OPCODE_X1) |
254
create_UnOpcodeExtension_X1(
255
NOP_UN_0_SHUN_0_OPCODE_X1));
256
}
257
258
return bundle;
259
}
260
261
/*
262
* Called after execve() has started the new image. This allows us
263
* to reset the info state. Note that the the mmap'ed memory, if there
264
* was any, has already been unmapped by the exec.
265
*/
266
void single_step_execve(void)
267
{
268
struct thread_info *ti = current_thread_info();
269
kfree(ti->step_state);
270
ti->step_state = NULL;
271
}
272
273
/**
274
* single_step_once() - entry point when single stepping has been triggered.
275
* @regs: The machine register state
276
*
277
* When we arrive at this routine via a trampoline, the single step
278
* engine copies the executing bundle to the single step buffer.
279
* If the instruction is a condition branch, then the target is
280
* reset to one past the next instruction. If the instruction
281
* sets the lr, then that is noted. If the instruction is a jump
282
* or call, then the new target pc is preserved and the current
283
* bundle instruction set to null.
284
*
285
* The necessary post-single-step rewriting information is stored in
286
* single_step_state-> We use data segment values because the
287
* stack will be rewound when we run the rewritten single-stepped
288
* instruction.
289
*/
290
void single_step_once(struct pt_regs *regs)
291
{
292
extern tile_bundle_bits __single_step_ill_insn;
293
extern tile_bundle_bits __single_step_j_insn;
294
extern tile_bundle_bits __single_step_addli_insn;
295
extern tile_bundle_bits __single_step_auli_insn;
296
struct thread_info *info = (void *)current_thread_info();
297
struct single_step_state *state = info->step_state;
298
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
299
tile_bundle_bits __user *buffer, *pc;
300
tile_bundle_bits bundle;
301
int temp_reg;
302
int target_reg = TREG_LR;
303
int err;
304
enum mem_op mem_op = MEMOP_NONE;
305
int size = 0, sign_ext = 0; /* happy compiler */
306
307
asm(
308
" .pushsection .rodata.single_step\n"
309
" .align 8\n"
310
" .globl __single_step_ill_insn\n"
311
"__single_step_ill_insn:\n"
312
" ill\n"
313
" .globl __single_step_addli_insn\n"
314
"__single_step_addli_insn:\n"
315
" { nop; addli r0, zero, 0 }\n"
316
" .globl __single_step_auli_insn\n"
317
"__single_step_auli_insn:\n"
318
" { nop; auli r0, r0, 0 }\n"
319
" .globl __single_step_j_insn\n"
320
"__single_step_j_insn:\n"
321
" j .\n"
322
" .popsection\n"
323
);
324
325
/*
326
* Enable interrupts here to allow touching userspace and the like.
327
* The callers expect this: do_trap() already has interrupts
328
* enabled, and do_work_pending() handles functions that enable
329
* interrupts internally.
330
*/
331
local_irq_enable();
332
333
if (state == NULL) {
334
/* allocate a page of writable, executable memory */
335
state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
336
if (state == NULL) {
337
pr_err("Out of kernel memory trying to single-step\n");
338
return;
339
}
340
341
/* allocate a cache line of writable, executable memory */
342
down_write(&current->mm->mmap_sem);
343
buffer = (void __user *) do_mmap(NULL, 0, 64,
344
PROT_EXEC | PROT_READ | PROT_WRITE,
345
MAP_PRIVATE | MAP_ANONYMOUS,
346
0);
347
up_write(&current->mm->mmap_sem);
348
349
if (IS_ERR((void __force *)buffer)) {
350
kfree(state);
351
pr_err("Out of kernel pages trying to single-step\n");
352
return;
353
}
354
355
state->buffer = buffer;
356
state->is_enabled = 0;
357
358
info->step_state = state;
359
360
/* Validate our stored instruction patterns */
361
BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
362
ADDLI_OPCODE_X1);
363
BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
364
AULI_OPCODE_X1);
365
BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
366
BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
367
BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
368
}
369
370
/*
371
* If we are returning from a syscall, we still haven't hit the
372
* "ill" for the swint1 instruction. So back the PC up to be
373
* pointing at the swint1, but we'll actually return directly
374
* back to the "ill" so we come back in via SIGILL as if we
375
* had "executed" the swint1 without ever being in kernel space.
376
*/
377
if (regs->faultnum == INT_SWINT_1)
378
regs->pc -= 8;
379
380
pc = (tile_bundle_bits __user *)(regs->pc);
381
if (get_user(bundle, pc) != 0) {
382
pr_err("Couldn't read instruction at %p trying to step\n", pc);
383
return;
384
}
385
386
/* We'll follow the instruction with 2 ill op bundles */
387
state->orig_pc = (unsigned long)pc;
388
state->next_pc = (unsigned long)(pc + 1);
389
state->branch_next_pc = 0;
390
state->update = 0;
391
392
if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) {
393
/* two wide, check for control flow */
394
int opcode = get_Opcode_X1(bundle);
395
396
switch (opcode) {
397
/* branches */
398
case BRANCH_OPCODE_X1:
399
{
400
s32 offset = signExtend17(get_BrOff_X1(bundle));
401
402
/*
403
* For branches, we use a rewriting trick to let the
404
* hardware evaluate whether the branch is taken or
405
* untaken. We record the target offset and then
406
* rewrite the branch instruction to target 1 insn
407
* ahead if the branch is taken. We then follow the
408
* rewritten branch with two bundles, each containing
409
* an "ill" instruction. The supervisor examines the
410
* pc after the single step code is executed, and if
411
* the pc is the first ill instruction, then the
412
* branch (if any) was not taken. If the pc is the
413
* second ill instruction, then the branch was
414
* taken. The new pc is computed for these cases, and
415
* inserted into the registers for the thread. If
416
* the pc is the start of the single step code, then
417
* an exception or interrupt was taken before the
418
* code started processing, and the same "original"
419
* pc is restored. This change, different from the
420
* original implementation, has the advantage of
421
* executing a single user instruction.
422
*/
423
state->branch_next_pc = (unsigned long)(pc + offset);
424
425
/* rewrite branch offset to go forward one bundle */
426
bundle = set_BrOff_X1(bundle, 2);
427
}
428
break;
429
430
/* jumps */
431
case JALB_OPCODE_X1:
432
case JALF_OPCODE_X1:
433
state->update = 1;
434
state->next_pc =
435
(unsigned long) (pc + get_JOffLong_X1(bundle));
436
break;
437
438
case JB_OPCODE_X1:
439
case JF_OPCODE_X1:
440
state->next_pc =
441
(unsigned long) (pc + get_JOffLong_X1(bundle));
442
bundle = nop_X1(bundle);
443
break;
444
445
case SPECIAL_0_OPCODE_X1:
446
switch (get_RRROpcodeExtension_X1(bundle)) {
447
/* jump-register */
448
case JALRP_SPECIAL_0_OPCODE_X1:
449
case JALR_SPECIAL_0_OPCODE_X1:
450
state->update = 1;
451
state->next_pc =
452
regs->regs[get_SrcA_X1(bundle)];
453
break;
454
455
case JRP_SPECIAL_0_OPCODE_X1:
456
case JR_SPECIAL_0_OPCODE_X1:
457
state->next_pc =
458
regs->regs[get_SrcA_X1(bundle)];
459
bundle = nop_X1(bundle);
460
break;
461
462
case LNK_SPECIAL_0_OPCODE_X1:
463
state->update = 1;
464
target_reg = get_Dest_X1(bundle);
465
break;
466
467
/* stores */
468
case SH_SPECIAL_0_OPCODE_X1:
469
mem_op = MEMOP_STORE;
470
size = 2;
471
break;
472
473
case SW_SPECIAL_0_OPCODE_X1:
474
mem_op = MEMOP_STORE;
475
size = 4;
476
break;
477
}
478
break;
479
480
/* loads and iret */
481
case SHUN_0_OPCODE_X1:
482
if (get_UnShOpcodeExtension_X1(bundle) ==
483
UN_0_SHUN_0_OPCODE_X1) {
484
switch (get_UnOpcodeExtension_X1(bundle)) {
485
case LH_UN_0_SHUN_0_OPCODE_X1:
486
mem_op = MEMOP_LOAD;
487
size = 2;
488
sign_ext = 1;
489
break;
490
491
case LH_U_UN_0_SHUN_0_OPCODE_X1:
492
mem_op = MEMOP_LOAD;
493
size = 2;
494
sign_ext = 0;
495
break;
496
497
case LW_UN_0_SHUN_0_OPCODE_X1:
498
mem_op = MEMOP_LOAD;
499
size = 4;
500
break;
501
502
case IRET_UN_0_SHUN_0_OPCODE_X1:
503
{
504
unsigned long ex0_0 = __insn_mfspr(
505
SPR_EX_CONTEXT_0_0);
506
unsigned long ex0_1 = __insn_mfspr(
507
SPR_EX_CONTEXT_0_1);
508
/*
509
* Special-case it if we're iret'ing
510
* to PL0 again. Otherwise just let
511
* it run and it will generate SIGILL.
512
*/
513
if (EX1_PL(ex0_1) == USER_PL) {
514
state->next_pc = ex0_0;
515
regs->ex1 = ex0_1;
516
bundle = nop_X1(bundle);
517
}
518
}
519
}
520
}
521
break;
522
523
#if CHIP_HAS_WH64()
524
/* postincrement operations */
525
case IMM_0_OPCODE_X1:
526
switch (get_ImmOpcodeExtension_X1(bundle)) {
527
case LWADD_IMM_0_OPCODE_X1:
528
mem_op = MEMOP_LOAD_POSTINCR;
529
size = 4;
530
break;
531
532
case LHADD_IMM_0_OPCODE_X1:
533
mem_op = MEMOP_LOAD_POSTINCR;
534
size = 2;
535
sign_ext = 1;
536
break;
537
538
case LHADD_U_IMM_0_OPCODE_X1:
539
mem_op = MEMOP_LOAD_POSTINCR;
540
size = 2;
541
sign_ext = 0;
542
break;
543
544
case SWADD_IMM_0_OPCODE_X1:
545
mem_op = MEMOP_STORE_POSTINCR;
546
size = 4;
547
break;
548
549
case SHADD_IMM_0_OPCODE_X1:
550
mem_op = MEMOP_STORE_POSTINCR;
551
size = 2;
552
break;
553
554
default:
555
break;
556
}
557
break;
558
#endif /* CHIP_HAS_WH64() */
559
}
560
561
if (state->update) {
562
/*
563
* Get an available register. We start with a
564
* bitmask with 1's for available registers.
565
* We truncate to the low 32 registers since
566
* we are guaranteed to have set bits in the
567
* low 32 bits, then use ctz to pick the first.
568
*/
569
u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
570
(1ULL << get_SrcA_X0(bundle)) |
571
(1ULL << get_SrcB_X0(bundle)) |
572
(1ULL << target_reg));
573
temp_reg = __builtin_ctz(mask);
574
state->update_reg = temp_reg;
575
state->update_value = regs->regs[temp_reg];
576
regs->regs[temp_reg] = (unsigned long) (pc+1);
577
regs->flags |= PT_FLAGS_RESTORE_REGS;
578
bundle = move_X1(bundle, target_reg, temp_reg);
579
}
580
} else {
581
int opcode = get_Opcode_Y2(bundle);
582
583
switch (opcode) {
584
/* loads */
585
case LH_OPCODE_Y2:
586
mem_op = MEMOP_LOAD;
587
size = 2;
588
sign_ext = 1;
589
break;
590
591
case LH_U_OPCODE_Y2:
592
mem_op = MEMOP_LOAD;
593
size = 2;
594
sign_ext = 0;
595
break;
596
597
case LW_OPCODE_Y2:
598
mem_op = MEMOP_LOAD;
599
size = 4;
600
break;
601
602
/* stores */
603
case SH_OPCODE_Y2:
604
mem_op = MEMOP_STORE;
605
size = 2;
606
break;
607
608
case SW_OPCODE_Y2:
609
mem_op = MEMOP_STORE;
610
size = 4;
611
break;
612
}
613
}
614
615
/*
616
* Check if we need to rewrite an unaligned load/store.
617
* Returning zero is a special value meaning we need to SIGSEGV.
618
*/
619
if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
620
bundle = rewrite_load_store_unaligned(state, bundle, regs,
621
mem_op, size, sign_ext);
622
if (bundle == 0)
623
return;
624
}
625
626
/* write the bundle to our execution area */
627
buffer = state->buffer;
628
err = __put_user(bundle, buffer++);
629
630
/*
631
* If we're really single-stepping, we take an INT_ILL after.
632
* If we're just handling an unaligned access, we can just
633
* jump directly back to where we were in user code.
634
*/
635
if (is_single_step) {
636
err |= __put_user(__single_step_ill_insn, buffer++);
637
err |= __put_user(__single_step_ill_insn, buffer++);
638
} else {
639
long delta;
640
641
if (state->update) {
642
/* We have some state to update; do it inline */
643
int ha16;
644
bundle = __single_step_addli_insn;
645
bundle |= create_Dest_X1(state->update_reg);
646
bundle |= create_Imm16_X1(state->update_value);
647
err |= __put_user(bundle, buffer++);
648
bundle = __single_step_auli_insn;
649
bundle |= create_Dest_X1(state->update_reg);
650
bundle |= create_SrcA_X1(state->update_reg);
651
ha16 = (state->update_value + 0x8000) >> 16;
652
bundle |= create_Imm16_X1(ha16);
653
err |= __put_user(bundle, buffer++);
654
state->update = 0;
655
}
656
657
/* End with a jump back to the next instruction */
658
delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
659
(unsigned long)buffer) >>
660
TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
661
bundle = __single_step_j_insn;
662
bundle |= create_JOffLong_X1(delta);
663
err |= __put_user(bundle, buffer++);
664
}
665
666
if (err) {
667
pr_err("Fault when writing to single-step buffer\n");
668
return;
669
}
670
671
/*
672
* Flush the buffer.
673
* We do a local flush only, since this is a thread-specific buffer.
674
*/
675
__flush_icache_range((unsigned long)state->buffer,
676
(unsigned long)buffer);
677
678
/* Indicate enabled */
679
state->is_enabled = is_single_step;
680
regs->pc = (unsigned long)state->buffer;
681
682
/* Fault immediately if we are coming back from a syscall. */
683
if (regs->faultnum == INT_SWINT_1)
684
regs->pc += 8;
685
}
686
687
#else
688
#include <linux/smp.h>
689
#include <linux/ptrace.h>
690
#include <arch/spr_def.h>
691
692
static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
693
694
695
/*
696
* Called directly on the occasion of an interrupt.
697
*
698
* If the process doesn't have single step set, then we use this as an
699
* opportunity to turn single step off.
700
*
701
* It has been mentioned that we could conditionally turn off single stepping
702
* on each entry into the kernel and rely on single_step_once to turn it
703
* on for the processes that matter (as we already do), but this
704
* implementation is somewhat more efficient in that we muck with registers
705
* once on a bum interrupt rather than on every entry into the kernel.
706
*
707
* If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
708
* so we have to run through this process again before we can say that an
709
* instruction has executed.
710
*
711
* swint will set CANCELED, but it's a legitimate instruction. Fortunately
712
* it changes the PC. If it hasn't changed, then we know that the interrupt
713
* wasn't generated by swint and we'll need to run this process again before
714
* we can say an instruction has executed.
715
*
716
* If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
717
* on with our lives.
718
*/
719
720
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
721
{
722
unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
723
struct thread_info *info = (void *)current_thread_info();
724
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
725
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
726
727
if (is_single_step == 0) {
728
__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
729
730
} else if ((*ss_pc != regs->pc) ||
731
(!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
732
733
ptrace_notify(SIGTRAP);
734
control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
735
control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
736
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
737
}
738
}
739
740
741
/*
742
* Called from need_singlestep. Set up the control registers and the enable
743
* register, then return back.
744
*/
745
746
void single_step_once(struct pt_regs *regs)
747
{
748
unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
749
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
750
751
*ss_pc = regs->pc;
752
control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
753
control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
754
__insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
755
__insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
756
}
757
758
void single_step_execve(void)
759
{
760
/* Nothing */
761
}
762
763
#endif /* !__tilegx__ */
764
765