Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/hexagon/kernel/traps.c
26442 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Kernel traps/events for Hexagon processor
4
*
5
* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
6
*/
7
8
#include <linux/init.h>
9
#include <linux/sched/signal.h>
10
#include <linux/sched/debug.h>
11
#include <linux/sched/task_stack.h>
12
#include <linux/module.h>
13
#include <linux/kallsyms.h>
14
#include <linux/kdebug.h>
15
#include <linux/syscalls.h>
16
#include <linux/signal.h>
17
#include <linux/ptrace.h>
18
#include <asm/traps.h>
19
#include <asm/vm_fault.h>
20
#include <asm/syscall.h>
21
#include <asm/registers.h>
22
#include <asm/unistd.h>
23
#include <asm/sections.h>
24
#ifdef CONFIG_KGDB
25
# include <linux/kgdb.h>
26
#endif
27
28
#define TRAP_SYSCALL 1
29
#define TRAP_DEBUG 0xdb
30
31
#ifdef CONFIG_GENERIC_BUG
32
/* Maybe should resemble arch/sh/kernel/traps.c ?? */
33
int is_valid_bugaddr(unsigned long addr)
34
{
35
return 1;
36
}
37
#endif /* CONFIG_GENERIC_BUG */
38
39
static const char *ex_name(int ex)
40
{
41
switch (ex) {
42
case HVM_GE_C_XPROT:
43
case HVM_GE_C_XUSER:
44
return "Execute protection fault";
45
case HVM_GE_C_RPROT:
46
case HVM_GE_C_RUSER:
47
return "Read protection fault";
48
case HVM_GE_C_WPROT:
49
case HVM_GE_C_WUSER:
50
return "Write protection fault";
51
case HVM_GE_C_XMAL:
52
return "Misaligned instruction";
53
case HVM_GE_C_WREG:
54
return "Multiple writes to same register in packet";
55
case HVM_GE_C_PCAL:
56
return "Program counter values that are not properly aligned";
57
case HVM_GE_C_RMAL:
58
return "Misaligned data load";
59
case HVM_GE_C_WMAL:
60
return "Misaligned data store";
61
case HVM_GE_C_INVI:
62
case HVM_GE_C_PRIVI:
63
return "Illegal instruction";
64
case HVM_GE_C_BUS:
65
return "Precise bus error";
66
case HVM_GE_C_CACHE:
67
return "Cache error";
68
69
case 0xdb:
70
return "Debugger trap";
71
72
default:
73
return "Unrecognized exception";
74
}
75
}
76
77
static void do_show_stack(struct task_struct *task, unsigned long *fp,
78
unsigned long ip, const char *loglvl)
79
{
80
int kstack_depth_to_print = 24;
81
unsigned long offset, size;
82
const char *name = NULL;
83
unsigned long *newfp;
84
unsigned long low, high;
85
char tmpstr[128];
86
char *modname;
87
int i;
88
89
if (task == NULL)
90
task = current;
91
92
printk("%sCPU#%d, %s/%d, Call Trace:\n", loglvl, raw_smp_processor_id(),
93
task->comm, task_pid_nr(task));
94
95
if (fp == NULL) {
96
if (task == current) {
97
asm("%0 = r30" : "=r" (fp));
98
} else {
99
fp = (unsigned long *)
100
((struct hexagon_switch_stack *)
101
task->thread.switch_sp)->fp;
102
}
103
}
104
105
if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
106
printk("%s-- Corrupt frame pointer %p\n", loglvl, fp);
107
return;
108
}
109
110
/* Saved link reg is one word above FP */
111
if (!ip)
112
ip = *(fp+1);
113
114
/* Expect kernel stack to be in-bounds */
115
low = (unsigned long)task_stack_page(task);
116
high = low + THREAD_SIZE - 8;
117
low += sizeof(struct thread_info);
118
119
for (i = 0; i < kstack_depth_to_print; i++) {
120
121
name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
122
123
printk("%s[%p] 0x%lx: %s + 0x%lx", loglvl, fp, ip, name, offset);
124
if (((unsigned long) fp < low) || (high < (unsigned long) fp))
125
printk(KERN_CONT " (FP out of bounds!)");
126
if (modname)
127
printk(KERN_CONT " [%s] ", modname);
128
printk(KERN_CONT "\n");
129
130
newfp = (unsigned long *) *fp;
131
132
if (((unsigned long) newfp) & 0x3) {
133
printk("%s-- Corrupt frame pointer %p\n", loglvl, newfp);
134
break;
135
}
136
137
/* Attempt to continue past exception. */
138
if (!newfp) {
139
struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
140
+ 8);
141
142
if (regs->syscall_nr != -1) {
143
printk("%s-- trap0 -- syscall_nr: %ld", loglvl,
144
regs->syscall_nr);
145
printk(KERN_CONT " psp: %lx elr: %lx\n",
146
pt_psp(regs), pt_elr(regs));
147
break;
148
} else {
149
/* really want to see more ... */
150
kstack_depth_to_print += 6;
151
printk("%s-- %s (0x%lx) badva: %lx\n", loglvl,
152
ex_name(pt_cause(regs)), pt_cause(regs),
153
pt_badva(regs));
154
}
155
156
newfp = (unsigned long *) regs->r30;
157
ip = pt_elr(regs);
158
} else {
159
ip = *(newfp + 1);
160
}
161
162
/* If link reg is null, we are done. */
163
if (ip == 0x0)
164
break;
165
166
/* If newfp isn't larger, we're tracing garbage. */
167
if (newfp > fp)
168
fp = newfp;
169
else
170
break;
171
}
172
}
173
174
void show_stack(struct task_struct *task, unsigned long *fp, const char *loglvl)
175
{
176
/* Saved link reg is one word above FP */
177
do_show_stack(task, fp, 0, loglvl);
178
}
179
180
int die(const char *str, struct pt_regs *regs, long err)
181
{
182
static struct {
183
spinlock_t lock;
184
int counter;
185
} die = {
186
.lock = __SPIN_LOCK_UNLOCKED(die.lock),
187
.counter = 0
188
};
189
190
console_verbose();
191
oops_enter();
192
193
spin_lock_irq(&die.lock);
194
bust_spinlocks(1);
195
printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
196
197
if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
198
NOTIFY_STOP) {
199
spin_unlock_irq(&die.lock);
200
return 1;
201
}
202
203
print_modules();
204
show_regs(regs);
205
do_show_stack(current, &regs->r30, pt_elr(regs), KERN_EMERG);
206
207
bust_spinlocks(0);
208
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
209
210
spin_unlock_irq(&die.lock);
211
212
if (in_interrupt())
213
panic("Fatal exception in interrupt");
214
215
if (panic_on_oops)
216
panic("Fatal exception");
217
218
oops_exit();
219
make_task_dead(err);
220
return 0;
221
}
222
223
int die_if_kernel(char *str, struct pt_regs *regs, long err)
224
{
225
if (!user_mode(regs))
226
return die(str, regs, err);
227
else
228
return 0;
229
}
230
231
/*
232
* It's not clear that misaligned fetches are ever recoverable.
233
*/
234
static void misaligned_instruction(struct pt_regs *regs)
235
{
236
die_if_kernel("Misaligned Instruction", regs, 0);
237
force_sig(SIGBUS);
238
}
239
240
/*
241
* Misaligned loads and stores, on the other hand, can be
242
* emulated, and probably should be, some day. But for now
243
* they will be considered fatal.
244
*/
245
static void misaligned_data_load(struct pt_regs *regs)
246
{
247
die_if_kernel("Misaligned Data Load", regs, 0);
248
force_sig(SIGBUS);
249
}
250
251
static void misaligned_data_store(struct pt_regs *regs)
252
{
253
die_if_kernel("Misaligned Data Store", regs, 0);
254
force_sig(SIGBUS);
255
}
256
257
static void illegal_instruction(struct pt_regs *regs)
258
{
259
die_if_kernel("Illegal Instruction", regs, 0);
260
force_sig(SIGILL);
261
}
262
263
/*
264
* Precise bus errors may be recoverable with a a retry,
265
* but for now, treat them as irrecoverable.
266
*/
267
static void precise_bus_error(struct pt_regs *regs)
268
{
269
die_if_kernel("Precise Bus Error", regs, 0);
270
force_sig(SIGBUS);
271
}
272
273
/*
274
* If anything is to be done here other than panic,
275
* it will probably be complex and migrate to another
276
* source module. For now, just die.
277
*/
278
static void cache_error(struct pt_regs *regs)
279
{
280
die("Cache Error", regs, 0);
281
}
282
283
/*
284
* General exception handler
285
*/
286
void do_genex(struct pt_regs *regs);
287
void do_genex(struct pt_regs *regs)
288
{
289
/*
290
* Decode Cause and Dispatch
291
*/
292
switch (pt_cause(regs)) {
293
case HVM_GE_C_XPROT:
294
case HVM_GE_C_XUSER:
295
execute_protection_fault(regs);
296
break;
297
case HVM_GE_C_RPROT:
298
case HVM_GE_C_RUSER:
299
read_protection_fault(regs);
300
break;
301
case HVM_GE_C_WPROT:
302
case HVM_GE_C_WUSER:
303
write_protection_fault(regs);
304
break;
305
case HVM_GE_C_XMAL:
306
misaligned_instruction(regs);
307
break;
308
case HVM_GE_C_WREG:
309
illegal_instruction(regs);
310
break;
311
case HVM_GE_C_PCAL:
312
misaligned_instruction(regs);
313
break;
314
case HVM_GE_C_RMAL:
315
misaligned_data_load(regs);
316
break;
317
case HVM_GE_C_WMAL:
318
misaligned_data_store(regs);
319
break;
320
case HVM_GE_C_INVI:
321
case HVM_GE_C_PRIVI:
322
illegal_instruction(regs);
323
break;
324
case HVM_GE_C_BUS:
325
precise_bus_error(regs);
326
break;
327
case HVM_GE_C_CACHE:
328
cache_error(regs);
329
break;
330
default:
331
/* Halt and catch fire */
332
panic("Unrecognized exception 0x%lx\n", pt_cause(regs));
333
break;
334
}
335
}
336
337
void do_trap0(struct pt_regs *regs);
338
void do_trap0(struct pt_regs *regs)
339
{
340
syscall_fn syscall;
341
342
switch (pt_cause(regs)) {
343
case TRAP_SYSCALL:
344
/* System call is trap0 #1 */
345
346
/* allow strace to catch syscall args */
347
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) &&
348
ptrace_report_syscall_entry(regs)))
349
return; /* return -ENOSYS somewhere? */
350
351
/* Interrupts should be re-enabled for syscall processing */
352
__vmsetie(VM_INT_ENABLE);
353
354
/*
355
* System call number is in r6, arguments in r0..r5.
356
* Fortunately, no Linux syscall has more than 6 arguments,
357
* and Hexagon ABI passes first 6 arguments in registers.
358
* 64-bit arguments are passed in odd/even register pairs.
359
* Fortunately, we have no system calls that take more
360
* than three arguments with more than one 64-bit value.
361
* Should that change, we'd need to redesign to copy
362
* between user and kernel stacks.
363
*/
364
regs->syscall_nr = regs->r06;
365
366
/*
367
* GPR R0 carries the first parameter, and is also used
368
* to report the return value. We need a backup of
369
* the user's value in case we need to do a late restart
370
* of the system call.
371
*/
372
regs->restart_r0 = regs->r00;
373
374
if ((unsigned long) regs->syscall_nr >= __NR_syscalls) {
375
regs->r00 = -1;
376
} else {
377
syscall = (syscall_fn)
378
(sys_call_table[regs->syscall_nr]);
379
regs->r00 = syscall(regs->r00, regs->r01,
380
regs->r02, regs->r03,
381
regs->r04, regs->r05);
382
}
383
384
/* allow strace to get the syscall return state */
385
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
386
ptrace_report_syscall_exit(regs, 0);
387
388
break;
389
case TRAP_DEBUG:
390
/* Trap0 0xdb is debug breakpoint */
391
if (user_mode(regs)) {
392
/*
393
* Some architecures add some per-thread state
394
* to distinguish between breakpoint traps and
395
* trace traps. We may want to do that, and
396
* set the si_code value appropriately, or we
397
* may want to use a different trap0 flavor.
398
*/
399
force_sig_fault(SIGTRAP, TRAP_BRKPT,
400
(void __user *) pt_elr(regs));
401
} else {
402
#ifdef CONFIG_KGDB
403
kgdb_handle_exception(pt_cause(regs), SIGTRAP,
404
TRAP_BRKPT, regs);
405
#endif
406
}
407
break;
408
}
409
/* Ignore other trap0 codes for now, especially 0 (Angel calls) */
410
}
411
412
/*
413
* Machine check exception handler
414
*/
415
void do_machcheck(struct pt_regs *regs);
416
void do_machcheck(struct pt_regs *regs)
417
{
418
/* Halt and catch fire */
419
__vmstop();
420
}
421
422
/*
423
* Treat this like the old 0xdb trap.
424
*/
425
426
void do_debug_exception(struct pt_regs *regs);
427
void do_debug_exception(struct pt_regs *regs)
428
{
429
regs->hvmer.vmest &= ~HVM_VMEST_CAUSE_MSK;
430
regs->hvmer.vmest |= (TRAP_DEBUG << HVM_VMEST_CAUSE_SFT);
431
do_trap0(regs);
432
}
433
434