Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/sparc/mm/fault_32.c
10817 views
1
/*
2
* fault.c: Page fault handlers for the Sparc.
3
*
4
* Copyright (C) 1995 David S. Miller ([email protected])
5
* Copyright (C) 1996 Eddie C. Dost ([email protected])
6
* Copyright (C) 1997 Jakub Jelinek ([email protected])
7
*/
8
9
#include <asm/head.h>
10
11
#include <linux/string.h>
12
#include <linux/types.h>
13
#include <linux/sched.h>
14
#include <linux/ptrace.h>
15
#include <linux/mman.h>
16
#include <linux/threads.h>
17
#include <linux/kernel.h>
18
#include <linux/signal.h>
19
#include <linux/mm.h>
20
#include <linux/smp.h>
21
#include <linux/perf_event.h>
22
#include <linux/interrupt.h>
23
#include <linux/module.h>
24
#include <linux/kdebug.h>
25
26
#include <asm/system.h>
27
#include <asm/page.h>
28
#include <asm/pgtable.h>
29
#include <asm/memreg.h>
30
#include <asm/openprom.h>
31
#include <asm/oplib.h>
32
#include <asm/smp.h>
33
#include <asm/traps.h>
34
#include <asm/uaccess.h>
35
36
extern int prom_node_root;
37
38
int show_unhandled_signals = 1;
39
40
/* At boot time we determine these two values necessary for setting
41
* up the segment maps and page table entries (pte's).
42
*/
43
44
int num_segmaps, num_contexts;
45
int invalid_segment;
46
47
/* various Virtual Address Cache parameters we find at boot time... */
48
49
int vac_size, vac_linesize, vac_do_hw_vac_flushes;
50
int vac_entries_per_context, vac_entries_per_segment;
51
int vac_entries_per_page;
52
53
/* Return how much physical memory we have. */
54
unsigned long probe_memory(void)
55
{
56
unsigned long total = 0;
57
int i;
58
59
for (i = 0; sp_banks[i].num_bytes; i++)
60
total += sp_banks[i].num_bytes;
61
62
return total;
63
}
64
65
extern void sun4c_complete_all_stores(void);
66
67
/* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
68
asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
69
unsigned long svaddr, unsigned long aerr,
70
unsigned long avaddr)
71
{
72
sun4c_complete_all_stores();
73
printk("FAULT: NMI received\n");
74
printk("SREGS: Synchronous Error %08lx\n", serr);
75
printk(" Synchronous Vaddr %08lx\n", svaddr);
76
printk(" Asynchronous Error %08lx\n", aerr);
77
printk(" Asynchronous Vaddr %08lx\n", avaddr);
78
if (sun4c_memerr_reg)
79
printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
80
printk("REGISTER DUMP:\n");
81
show_regs(regs);
82
prom_halt();
83
}
84
85
static void unhandled_fault(unsigned long, struct task_struct *,
86
struct pt_regs *) __attribute__ ((noreturn));
87
88
static void unhandled_fault(unsigned long address, struct task_struct *tsk,
89
struct pt_regs *regs)
90
{
91
if((unsigned long) address < PAGE_SIZE) {
92
printk(KERN_ALERT
93
"Unable to handle kernel NULL pointer dereference\n");
94
} else {
95
printk(KERN_ALERT "Unable to handle kernel paging request "
96
"at virtual address %08lx\n", address);
97
}
98
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
99
(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
100
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
101
(tsk->mm ? (unsigned long) tsk->mm->pgd :
102
(unsigned long) tsk->active_mm->pgd));
103
die_if_kernel("Oops", regs);
104
}
105
106
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
107
unsigned long address)
108
{
109
struct pt_regs regs;
110
unsigned long g2;
111
unsigned int insn;
112
int i;
113
114
i = search_extables_range(ret_pc, &g2);
115
switch (i) {
116
case 3:
117
/* load & store will be handled by fixup */
118
return 3;
119
120
case 1:
121
/* store will be handled by fixup, load will bump out */
122
/* for _to_ macros */
123
insn = *((unsigned int *) pc);
124
if ((insn >> 21) & 1)
125
return 1;
126
break;
127
128
case 2:
129
/* load will be handled by fixup, store will bump out */
130
/* for _from_ macros */
131
insn = *((unsigned int *) pc);
132
if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
133
return 2;
134
break;
135
136
default:
137
break;
138
}
139
140
memset(&regs, 0, sizeof (regs));
141
regs.pc = pc;
142
regs.npc = pc + 4;
143
__asm__ __volatile__(
144
"rd %%psr, %0\n\t"
145
"nop\n\t"
146
"nop\n\t"
147
"nop\n" : "=r" (regs.psr));
148
unhandled_fault(address, current, &regs);
149
150
/* Not reached */
151
return 0;
152
}
153
154
static inline void
155
show_signal_msg(struct pt_regs *regs, int sig, int code,
156
unsigned long address, struct task_struct *tsk)
157
{
158
if (!unhandled_signal(tsk, sig))
159
return;
160
161
if (!printk_ratelimit())
162
return;
163
164
printk("%s%s[%d]: segfault at %lx ip %p (rpc %p) sp %p error %x",
165
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
166
tsk->comm, task_pid_nr(tsk), address,
167
(void *)regs->pc, (void *)regs->u_regs[UREG_I7],
168
(void *)regs->u_regs[UREG_FP], code);
169
170
print_vma_addr(KERN_CONT " in ", regs->pc);
171
172
printk(KERN_CONT "\n");
173
}
174
175
static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
176
unsigned long addr)
177
{
178
siginfo_t info;
179
180
info.si_signo = sig;
181
info.si_code = code;
182
info.si_errno = 0;
183
info.si_addr = (void __user *) addr;
184
info.si_trapno = 0;
185
186
if (unlikely(show_unhandled_signals))
187
show_signal_msg(regs, sig, info.si_code,
188
addr, current);
189
190
force_sig_info (sig, &info, current);
191
}
192
193
extern unsigned long safe_compute_effective_address(struct pt_regs *,
194
unsigned int);
195
196
static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
197
{
198
unsigned int insn;
199
200
if (text_fault)
201
return regs->pc;
202
203
if (regs->psr & PSR_PS) {
204
insn = *(unsigned int *) regs->pc;
205
} else {
206
__get_user(insn, (unsigned int *) regs->pc);
207
}
208
209
return safe_compute_effective_address(regs, insn);
210
}
211
212
static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
213
int text_fault)
214
{
215
unsigned long addr = compute_si_addr(regs, text_fault);
216
217
__do_fault_siginfo(code, sig, regs, addr);
218
}
219
220
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
221
unsigned long address)
222
{
223
struct vm_area_struct *vma;
224
struct task_struct *tsk = current;
225
struct mm_struct *mm = tsk->mm;
226
unsigned int fixup;
227
unsigned long g2;
228
int from_user = !(regs->psr & PSR_PS);
229
int fault, code;
230
231
if(text_fault)
232
address = regs->pc;
233
234
/*
235
* We fault-in kernel-space virtual memory on-demand. The
236
* 'reference' page table is init_mm.pgd.
237
*
238
* NOTE! We MUST NOT take any locks for this case. We may
239
* be in an interrupt or a critical region, and should
240
* only copy the information from the master page table,
241
* nothing more.
242
*/
243
code = SEGV_MAPERR;
244
if (!ARCH_SUN4C && address >= TASK_SIZE)
245
goto vmalloc_fault;
246
247
/*
248
* If we're in an interrupt or have no user
249
* context, we must not take the fault..
250
*/
251
if (in_atomic() || !mm)
252
goto no_context;
253
254
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
255
256
down_read(&mm->mmap_sem);
257
258
/*
259
* The kernel referencing a bad kernel pointer can lock up
260
* a sun4c machine completely, so we must attempt recovery.
261
*/
262
if(!from_user && address >= PAGE_OFFSET)
263
goto bad_area;
264
265
vma = find_vma(mm, address);
266
if(!vma)
267
goto bad_area;
268
if(vma->vm_start <= address)
269
goto good_area;
270
if(!(vma->vm_flags & VM_GROWSDOWN))
271
goto bad_area;
272
if(expand_stack(vma, address))
273
goto bad_area;
274
/*
275
* Ok, we have a good vm_area for this memory access, so
276
* we can handle it..
277
*/
278
good_area:
279
code = SEGV_ACCERR;
280
if(write) {
281
if(!(vma->vm_flags & VM_WRITE))
282
goto bad_area;
283
} else {
284
/* Allow reads even for write-only mappings */
285
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
286
goto bad_area;
287
}
288
289
/*
290
* If for any reason at all we couldn't handle the fault,
291
* make sure we exit gracefully rather than endlessly redo
292
* the fault.
293
*/
294
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
295
if (unlikely(fault & VM_FAULT_ERROR)) {
296
if (fault & VM_FAULT_OOM)
297
goto out_of_memory;
298
else if (fault & VM_FAULT_SIGBUS)
299
goto do_sigbus;
300
BUG();
301
}
302
if (fault & VM_FAULT_MAJOR) {
303
current->maj_flt++;
304
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
305
regs, address);
306
} else {
307
current->min_flt++;
308
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
309
regs, address);
310
}
311
up_read(&mm->mmap_sem);
312
return;
313
314
/*
315
* Something tried to access memory that isn't in our memory map..
316
* Fix it, but check if it's kernel or user first..
317
*/
318
bad_area:
319
up_read(&mm->mmap_sem);
320
321
bad_area_nosemaphore:
322
/* User mode accesses just cause a SIGSEGV */
323
if (from_user) {
324
do_fault_siginfo(code, SIGSEGV, regs, text_fault);
325
return;
326
}
327
328
/* Is this in ex_table? */
329
no_context:
330
g2 = regs->u_regs[UREG_G2];
331
if (!from_user) {
332
fixup = search_extables_range(regs->pc, &g2);
333
if (fixup > 10) { /* Values below are reserved for other things */
334
extern const unsigned __memset_start[];
335
extern const unsigned __memset_end[];
336
extern const unsigned __csum_partial_copy_start[];
337
extern const unsigned __csum_partial_copy_end[];
338
339
#ifdef DEBUG_EXCEPTIONS
340
printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
341
printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
342
regs->pc, fixup, g2);
343
#endif
344
if ((regs->pc >= (unsigned long)__memset_start &&
345
regs->pc < (unsigned long)__memset_end) ||
346
(regs->pc >= (unsigned long)__csum_partial_copy_start &&
347
regs->pc < (unsigned long)__csum_partial_copy_end)) {
348
regs->u_regs[UREG_I4] = address;
349
regs->u_regs[UREG_I5] = regs->pc;
350
}
351
regs->u_regs[UREG_G2] = g2;
352
regs->pc = fixup;
353
regs->npc = regs->pc + 4;
354
return;
355
}
356
}
357
358
unhandled_fault (address, tsk, regs);
359
do_exit(SIGKILL);
360
361
/*
362
* We ran out of memory, or some other thing happened to us that made
363
* us unable to handle the page fault gracefully.
364
*/
365
out_of_memory:
366
up_read(&mm->mmap_sem);
367
if (from_user) {
368
pagefault_out_of_memory();
369
return;
370
}
371
goto no_context;
372
373
do_sigbus:
374
up_read(&mm->mmap_sem);
375
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
376
if (!from_user)
377
goto no_context;
378
379
vmalloc_fault:
380
{
381
/*
382
* Synchronize this task's top level page-table
383
* with the 'reference' page table.
384
*/
385
int offset = pgd_index(address);
386
pgd_t *pgd, *pgd_k;
387
pmd_t *pmd, *pmd_k;
388
389
pgd = tsk->active_mm->pgd + offset;
390
pgd_k = init_mm.pgd + offset;
391
392
if (!pgd_present(*pgd)) {
393
if (!pgd_present(*pgd_k))
394
goto bad_area_nosemaphore;
395
pgd_val(*pgd) = pgd_val(*pgd_k);
396
return;
397
}
398
399
pmd = pmd_offset(pgd, address);
400
pmd_k = pmd_offset(pgd_k, address);
401
402
if (pmd_present(*pmd) || !pmd_present(*pmd_k))
403
goto bad_area_nosemaphore;
404
*pmd = *pmd_k;
405
return;
406
}
407
}
408
409
asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
410
unsigned long address)
411
{
412
extern void sun4c_update_mmu_cache(struct vm_area_struct *,
413
unsigned long,pte_t *);
414
extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
415
struct task_struct *tsk = current;
416
struct mm_struct *mm = tsk->mm;
417
pgd_t *pgdp;
418
pte_t *ptep;
419
420
if (text_fault) {
421
address = regs->pc;
422
} else if (!write &&
423
!(regs->psr & PSR_PS)) {
424
unsigned int insn, __user *ip;
425
426
ip = (unsigned int __user *)regs->pc;
427
if (!get_user(insn, ip)) {
428
if ((insn & 0xc1680000) == 0xc0680000)
429
write = 1;
430
}
431
}
432
433
if (!mm) {
434
/* We are oopsing. */
435
do_sparc_fault(regs, text_fault, write, address);
436
BUG(); /* P3 Oops already, you bitch */
437
}
438
439
pgdp = pgd_offset(mm, address);
440
ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
441
442
if (pgd_val(*pgdp)) {
443
if (write) {
444
if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
445
== (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
446
unsigned long flags;
447
448
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
449
_SUN4C_PAGE_MODIFIED |
450
_SUN4C_PAGE_VALID |
451
_SUN4C_PAGE_DIRTY);
452
453
local_irq_save(flags);
454
if (sun4c_get_segmap(address) != invalid_segment) {
455
sun4c_put_pte(address, pte_val(*ptep));
456
local_irq_restore(flags);
457
return;
458
}
459
local_irq_restore(flags);
460
}
461
} else {
462
if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
463
== (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
464
unsigned long flags;
465
466
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
467
_SUN4C_PAGE_VALID);
468
469
local_irq_save(flags);
470
if (sun4c_get_segmap(address) != invalid_segment) {
471
sun4c_put_pte(address, pte_val(*ptep));
472
local_irq_restore(flags);
473
return;
474
}
475
local_irq_restore(flags);
476
}
477
}
478
}
479
480
/* This conditional is 'interesting'. */
481
if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
482
&& (pte_val(*ptep) & _SUN4C_PAGE_VALID))
483
/* Note: It is safe to not grab the MMAP semaphore here because
484
* we know that update_mmu_cache() will not sleep for
485
* any reason (at least not in the current implementation)
486
* and therefore there is no danger of another thread getting
487
* on the CPU and doing a shrink_mmap() on this vma.
488
*/
489
sun4c_update_mmu_cache (find_vma(current->mm, address), address,
490
ptep);
491
else
492
do_sparc_fault(regs, text_fault, write, address);
493
}
494
495
/* This always deals with user addresses. */
496
static void force_user_fault(unsigned long address, int write)
497
{
498
struct vm_area_struct *vma;
499
struct task_struct *tsk = current;
500
struct mm_struct *mm = tsk->mm;
501
int code;
502
503
code = SEGV_MAPERR;
504
505
down_read(&mm->mmap_sem);
506
vma = find_vma(mm, address);
507
if(!vma)
508
goto bad_area;
509
if(vma->vm_start <= address)
510
goto good_area;
511
if(!(vma->vm_flags & VM_GROWSDOWN))
512
goto bad_area;
513
if(expand_stack(vma, address))
514
goto bad_area;
515
good_area:
516
code = SEGV_ACCERR;
517
if(write) {
518
if(!(vma->vm_flags & VM_WRITE))
519
goto bad_area;
520
} else {
521
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
522
goto bad_area;
523
}
524
switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
525
case VM_FAULT_SIGBUS:
526
case VM_FAULT_OOM:
527
goto do_sigbus;
528
}
529
up_read(&mm->mmap_sem);
530
return;
531
bad_area:
532
up_read(&mm->mmap_sem);
533
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
534
return;
535
536
do_sigbus:
537
up_read(&mm->mmap_sem);
538
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
539
}
540
541
static void check_stack_aligned(unsigned long sp)
542
{
543
if (sp & 0x7UL)
544
force_sig(SIGILL, current);
545
}
546
547
void window_overflow_fault(void)
548
{
549
unsigned long sp;
550
551
sp = current_thread_info()->rwbuf_stkptrs[0];
552
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
553
force_user_fault(sp + 0x38, 1);
554
force_user_fault(sp, 1);
555
556
check_stack_aligned(sp);
557
}
558
559
void window_underflow_fault(unsigned long sp)
560
{
561
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
562
force_user_fault(sp + 0x38, 0);
563
force_user_fault(sp, 0);
564
565
check_stack_aligned(sp);
566
}
567
568
void window_ret_fault(struct pt_regs *regs)
569
{
570
unsigned long sp;
571
572
sp = regs->u_regs[UREG_FP];
573
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
574
force_user_fault(sp + 0x38, 0);
575
force_user_fault(sp, 0);
576
577
check_stack_aligned(sp);
578
}
579
580