Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/microblaze/mm/fault.c
26424 views
1
/*
2
* arch/microblaze/mm/fault.c
3
*
4
* Copyright (C) 2007 Xilinx, Inc. All rights reserved.
5
*
6
* Derived from "arch/ppc/mm/fault.c"
7
* Copyright (C) 1995-1996 Gary Thomas ([email protected])
8
*
9
* Derived from "arch/i386/mm/fault.c"
10
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
11
*
12
* Modified by Cort Dougan and Paul Mackerras.
13
*
14
* This file is subject to the terms and conditions of the GNU General
15
* Public License. See the file COPYING in the main directory of this
16
* archive for more details.
17
*
18
*/
19
20
#include <linux/extable.h>
21
#include <linux/signal.h>
22
#include <linux/sched.h>
23
#include <linux/kernel.h>
24
#include <linux/errno.h>
25
#include <linux/string.h>
26
#include <linux/types.h>
27
#include <linux/ptrace.h>
28
#include <linux/mman.h>
29
#include <linux/mm.h>
30
#include <linux/interrupt.h>
31
#include <linux/perf_event.h>
32
33
#include <asm/page.h>
34
#include <asm/mmu.h>
35
#include <linux/mmu_context.h>
36
#include <linux/uaccess.h>
37
#include <asm/exceptions.h>
38
39
static unsigned long pte_misses; /* updated by do_page_fault() */
40
static unsigned long pte_errors; /* updated by do_page_fault() */
41
42
/*
43
* Check whether the instruction at regs->pc is a store using
44
* an update addressing form which will update r1.
45
*/
46
static int store_updates_sp(struct pt_regs *regs)
47
{
48
unsigned int inst;
49
50
if (get_user(inst, (unsigned int __user *)regs->pc))
51
return 0;
52
/* check for 1 in the rD field */
53
if (((inst >> 21) & 0x1f) != 1)
54
return 0;
55
/* check for store opcodes */
56
if ((inst & 0xd0000000) == 0xd0000000)
57
return 1;
58
return 0;
59
}
60
61
62
/*
63
* bad_page_fault is called when we have a bad access from the kernel.
64
* It is called from do_page_fault above and from some of the procedures
65
* in traps.c.
66
*/
67
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
68
{
69
const struct exception_table_entry *fixup;
70
/* MS: no context */
71
/* Are we prepared to handle this fault? */
72
fixup = search_exception_tables(regs->pc);
73
if (fixup) {
74
regs->pc = fixup->fixup;
75
return;
76
}
77
78
/* kernel has accessed a bad area */
79
die("kernel access of bad area", regs, sig);
80
}
81
82
/*
83
* The error_code parameter is ESR for a data fault,
84
* 0 for an instruction fault.
85
*/
86
void do_page_fault(struct pt_regs *regs, unsigned long address,
87
unsigned long error_code)
88
{
89
struct vm_area_struct *vma;
90
struct mm_struct *mm = current->mm;
91
int code = SEGV_MAPERR;
92
int is_write = error_code & ESR_S;
93
vm_fault_t fault;
94
unsigned int flags = FAULT_FLAG_DEFAULT;
95
96
regs->ear = address;
97
regs->esr = error_code;
98
99
/* On a kernel SLB miss we can only check for a valid exception entry */
100
if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
101
pr_warn("kernel task_size exceed");
102
_exception(SIGSEGV, regs, code, address);
103
}
104
105
/* for instr TLB miss and instr storage exception ESR_S is undefined */
106
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
107
is_write = 0;
108
109
if (unlikely(faulthandler_disabled() || !mm)) {
110
if (kernel_mode(regs))
111
goto bad_area_nosemaphore;
112
113
/* faulthandler_disabled() in user mode is really bad,
114
as is current->mm == NULL. */
115
pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n",
116
mm);
117
pr_emerg("r15 = %lx MSR = %lx\n",
118
regs->r15, regs->msr);
119
die("Weird page fault", regs, SIGSEGV);
120
}
121
122
if (user_mode(regs))
123
flags |= FAULT_FLAG_USER;
124
125
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
126
127
/* When running in the kernel we expect faults to occur only to
128
* addresses in user space. All other faults represent errors in the
129
* kernel and should generate an OOPS. Unfortunately, in the case of an
130
* erroneous fault occurring in a code path which already holds mmap_lock
131
* we will deadlock attempting to validate the fault against the
132
* address space. Luckily the kernel only validly references user
133
* space from well defined areas of code, which are listed in the
134
* exceptions table.
135
*
136
* As the vast majority of faults will be valid we will only perform
137
* the source reference check when there is a possibility of a deadlock.
138
* Attempt to lock the address space, if we cannot we then validate the
139
* source. If this is invalid we can skip the address space check,
140
* thus avoiding the deadlock.
141
*/
142
if (unlikely(!mmap_read_trylock(mm))) {
143
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
144
goto bad_area_nosemaphore;
145
146
retry:
147
mmap_read_lock(mm);
148
}
149
150
vma = find_vma(mm, address);
151
if (unlikely(!vma))
152
goto bad_area;
153
154
if (vma->vm_start <= address)
155
goto good_area;
156
157
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
158
goto bad_area;
159
160
if (unlikely(!is_write))
161
goto bad_area;
162
163
/*
164
* N.B. The ABI allows programs to access up to
165
* a few hundred bytes below the stack pointer (TBD).
166
* The kernel signal delivery code writes up to about 1.5kB
167
* below the stack pointer (r1) before decrementing it.
168
* The exec code can write slightly over 640kB to the stack
169
* before setting the user r1. Thus we allow the stack to
170
* expand to 1MB without further checks.
171
*/
172
if (unlikely(address + 0x100000 < vma->vm_end)) {
173
174
/* get user regs even if this fault is in kernel mode */
175
struct pt_regs *uregs = current->thread.regs;
176
if (uregs == NULL)
177
goto bad_area;
178
179
/*
180
* A user-mode access to an address a long way below
181
* the stack pointer is only valid if the instruction
182
* is one which would update the stack pointer to the
183
* address accessed if the instruction completed,
184
* i.e. either stwu rs,n(r1) or stwux rs,r1,rb
185
* (or the byte, halfword, float or double forms).
186
*
187
* If we don't check this then any write to the area
188
* between the last mapped region and the stack will
189
* expand the stack rather than segfaulting.
190
*/
191
if (address + 2048 < uregs->r1
192
&& (kernel_mode(regs) || !store_updates_sp(regs)))
193
goto bad_area;
194
}
195
vma = expand_stack(mm, address);
196
if (!vma)
197
goto bad_area_nosemaphore;
198
199
good_area:
200
code = SEGV_ACCERR;
201
202
/* a write */
203
if (unlikely(is_write)) {
204
if (unlikely(!(vma->vm_flags & VM_WRITE)))
205
goto bad_area;
206
flags |= FAULT_FLAG_WRITE;
207
/* a read */
208
} else {
209
/* protection fault */
210
if (unlikely(error_code & 0x08000000))
211
goto bad_area;
212
if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
213
goto bad_area;
214
}
215
216
/*
217
* If for any reason at all we couldn't handle the fault,
218
* make sure we exit gracefully rather than endlessly redo
219
* the fault.
220
*/
221
fault = handle_mm_fault(vma, address, flags, regs);
222
223
if (fault_signal_pending(fault, regs)) {
224
if (!user_mode(regs))
225
bad_page_fault(regs, address, SIGBUS);
226
return;
227
}
228
229
/* The fault is fully completed (including releasing mmap lock) */
230
if (fault & VM_FAULT_COMPLETED)
231
return;
232
233
if (unlikely(fault & VM_FAULT_ERROR)) {
234
if (fault & VM_FAULT_OOM)
235
goto out_of_memory;
236
else if (fault & VM_FAULT_SIGSEGV)
237
goto bad_area;
238
else if (fault & VM_FAULT_SIGBUS)
239
goto do_sigbus;
240
BUG();
241
}
242
243
if (fault & VM_FAULT_RETRY) {
244
flags |= FAULT_FLAG_TRIED;
245
246
/*
247
* No need to mmap_read_unlock(mm) as we would
248
* have already released it in __lock_page_or_retry
249
* in mm/filemap.c.
250
*/
251
252
goto retry;
253
}
254
255
mmap_read_unlock(mm);
256
257
/*
258
* keep track of tlb+htab misses that are good addrs but
259
* just need pte's created via handle_mm_fault()
260
* -- Cort
261
*/
262
pte_misses++;
263
return;
264
265
bad_area:
266
mmap_read_unlock(mm);
267
268
bad_area_nosemaphore:
269
pte_errors++;
270
271
/* User mode accesses cause a SIGSEGV */
272
if (user_mode(regs)) {
273
_exception(SIGSEGV, regs, code, address);
274
return;
275
}
276
277
bad_page_fault(regs, address, SIGSEGV);
278
return;
279
280
/*
281
* We ran out of memory, or some other thing happened to us that made
282
* us unable to handle the page fault gracefully.
283
*/
284
out_of_memory:
285
mmap_read_unlock(mm);
286
if (!user_mode(regs))
287
bad_page_fault(regs, address, SIGKILL);
288
else
289
pagefault_out_of_memory();
290
return;
291
292
do_sigbus:
293
mmap_read_unlock(mm);
294
if (user_mode(regs)) {
295
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
296
return;
297
}
298
bad_page_fault(regs, address, SIGBUS);
299
}
300
301