Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arc/mm/fault.c
26442 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Page Fault Handling for ARC (TLB Miss / ProtV)
3
*
4
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5
*/
6
7
#include <linux/signal.h>
8
#include <linux/interrupt.h>
9
#include <linux/sched/signal.h>
10
#include <linux/errno.h>
11
#include <linux/ptrace.h>
12
#include <linux/uaccess.h>
13
#include <linux/kdebug.h>
14
#include <linux/perf_event.h>
15
#include <linux/mm_types.h>
16
#include <asm/entry.h>
17
#include <asm/mmu.h>
18
19
/*
20
* kernel virtual address is required to implement vmalloc/pkmap/fixmap
21
* Refer to asm/processor.h for System Memory Map
22
*
23
* It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
24
* from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
25
*/
26
noinline static int handle_kernel_vaddr_fault(unsigned long address)
27
{
28
/*
29
* Synchronize this task's top level page-table
30
* with the 'reference' page table.
31
*/
32
pgd_t *pgd, *pgd_k;
33
p4d_t *p4d, *p4d_k;
34
pud_t *pud, *pud_k;
35
pmd_t *pmd, *pmd_k;
36
37
pgd = pgd_offset(current->active_mm, address);
38
pgd_k = pgd_offset_k(address);
39
40
if (pgd_none (*pgd_k))
41
goto bad_area;
42
if (!pgd_present(*pgd))
43
set_pgd(pgd, *pgd_k);
44
45
p4d = p4d_offset(pgd, address);
46
p4d_k = p4d_offset(pgd_k, address);
47
if (p4d_none(*p4d_k))
48
goto bad_area;
49
if (!p4d_present(*p4d))
50
set_p4d(p4d, *p4d_k);
51
52
pud = pud_offset(p4d, address);
53
pud_k = pud_offset(p4d_k, address);
54
if (pud_none(*pud_k))
55
goto bad_area;
56
if (!pud_present(*pud))
57
set_pud(pud, *pud_k);
58
59
pmd = pmd_offset(pud, address);
60
pmd_k = pmd_offset(pud_k, address);
61
if (pmd_none(*pmd_k))
62
goto bad_area;
63
if (!pmd_present(*pmd))
64
set_pmd(pmd, *pmd_k);
65
66
/* XXX: create the TLB entry here */
67
return 0;
68
69
bad_area:
70
return 1;
71
}
72
73
void do_page_fault(unsigned long address, struct pt_regs *regs)
74
{
75
struct vm_area_struct *vma = NULL;
76
struct task_struct *tsk = current;
77
struct mm_struct *mm = tsk->mm;
78
int sig, si_code = SEGV_MAPERR;
79
unsigned int write = 0, exec = 0, mask;
80
vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
81
unsigned int flags; /* handle_mm_fault() input */
82
83
/*
84
* NOTE! We MUST NOT take any locks for this case. We may
85
* be in an interrupt or a critical region, and should
86
* only copy the information from the master page table,
87
* nothing more.
88
*/
89
if (address >= VMALLOC_START && !user_mode(regs)) {
90
if (unlikely(handle_kernel_vaddr_fault(address)))
91
goto no_context;
92
else
93
return;
94
}
95
96
/*
97
* If we're in an interrupt or have no user
98
* context, we must not take the fault..
99
*/
100
if (faulthandler_disabled() || !mm)
101
goto no_context;
102
103
if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
104
write = 1;
105
else if ((regs->ecr.vec == ECR_V_PROTV) &&
106
(regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
107
exec = 1;
108
109
flags = FAULT_FLAG_DEFAULT;
110
if (user_mode(regs))
111
flags |= FAULT_FLAG_USER;
112
if (write)
113
flags |= FAULT_FLAG_WRITE;
114
115
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
116
retry:
117
vma = lock_mm_and_find_vma(mm, address, regs);
118
if (!vma)
119
goto bad_area_nosemaphore;
120
121
/*
122
* vm_area is good, now check permissions for this memory access
123
*/
124
mask = VM_READ;
125
if (write)
126
mask = VM_WRITE;
127
if (exec)
128
mask = VM_EXEC;
129
130
if (!(vma->vm_flags & mask)) {
131
si_code = SEGV_ACCERR;
132
goto bad_area;
133
}
134
135
fault = handle_mm_fault(vma, address, flags, regs);
136
137
/* Quick path to respond to signals */
138
if (fault_signal_pending(fault, regs)) {
139
if (!user_mode(regs))
140
goto no_context;
141
return;
142
}
143
144
/* The fault is fully completed (including releasing mmap lock) */
145
if (fault & VM_FAULT_COMPLETED)
146
return;
147
148
/*
149
* Fault retry nuances, mmap_lock already relinquished by core mm
150
*/
151
if (unlikely(fault & VM_FAULT_RETRY)) {
152
flags |= FAULT_FLAG_TRIED;
153
goto retry;
154
}
155
156
bad_area:
157
mmap_read_unlock(mm);
158
159
bad_area_nosemaphore:
160
/*
161
* Major/minor page fault accounting
162
* (in case of retry we only land here once)
163
*/
164
if (likely(!(fault & VM_FAULT_ERROR)))
165
/* Normal return path: fault Handled Gracefully */
166
return;
167
168
if (!user_mode(regs))
169
goto no_context;
170
171
if (fault & VM_FAULT_OOM) {
172
pagefault_out_of_memory();
173
return;
174
}
175
176
if (fault & VM_FAULT_SIGBUS) {
177
sig = SIGBUS;
178
si_code = BUS_ADRERR;
179
}
180
else {
181
sig = SIGSEGV;
182
}
183
184
tsk->thread.fault_address = address;
185
force_sig_fault(sig, si_code, (void __user *)address);
186
return;
187
188
no_context:
189
if (fixup_exception(regs))
190
return;
191
192
die("Oops", regs, address);
193
}
194
195