Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
script3r
GitHub Repository: script3r/os161
Path: blob/master/kern/arch/mips/locore/trap.c
2111 views
1
/*
2
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
3
* The President and Fellows of Harvard College.
4
*
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
7
* are met:
8
* 1. Redistributions of source code must retain the above copyright
9
* notice, this list of conditions and the following disclaimer.
10
* 2. Redistributions in binary form must reproduce the above copyright
11
* notice, this list of conditions and the following disclaimer in the
12
* documentation and/or other materials provided with the distribution.
13
* 3. Neither the name of the University nor the names of its contributors
14
* may be used to endorse or promote products derived from this software
15
* without specific prior written permission.
16
*
17
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
18
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
21
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
* SUCH DAMAGE.
28
*/
29
30
#include <types.h>
31
#include <signal.h>
32
#include <lib.h>
33
#include <mips/specialreg.h>
34
#include <mips/trapframe.h>
35
#include <cpu.h>
36
#include <spl.h>
37
#include <thread.h>
38
#include <current.h>
39
#include <vm.h>
40
#include <mainbus.h>
41
#include <syscall.h>
42
43
44
/* in exception.S */
45
extern void asm_usermode(struct trapframe *tf);
46
47
/* called only from assembler, so not declared in a header */
48
void mips_trap(struct trapframe *tf);
49
50
51
/* Names for trap codes */
52
#define NTRAPCODES 13
53
static const char *const trapcodenames[NTRAPCODES] = {
54
"Interrupt",
55
"TLB modify trap",
56
"TLB miss on load",
57
"TLB miss on store",
58
"Address error on load",
59
"Address error on store",
60
"Bus error on code",
61
"Bus error on data",
62
"System call",
63
"Break instruction",
64
"Illegal instruction",
65
"Coprocessor unusable",
66
"Arithmetic overflow",
67
};
68
69
/*
70
* Function called when user-level code hits a fatal fault.
71
*/
72
static
73
void
74
kill_curthread(vaddr_t epc, unsigned code, vaddr_t vaddr)
75
{
76
int sig = 0;
77
78
KASSERT(code < NTRAPCODES);
79
switch (code) {
80
case EX_IRQ:
81
case EX_IBE:
82
case EX_DBE:
83
case EX_SYS:
84
/* should not be seen */
85
KASSERT(0);
86
sig = SIGABRT;
87
break;
88
case EX_MOD:
89
case EX_TLBL:
90
case EX_TLBS:
91
sig = SIGSEGV;
92
break;
93
case EX_ADEL:
94
case EX_ADES:
95
sig = SIGBUS;
96
break;
97
case EX_BP:
98
sig = SIGTRAP;
99
break;
100
case EX_RI:
101
sig = SIGILL;
102
break;
103
case EX_CPU:
104
sig = SIGSEGV;
105
break;
106
case EX_OVF:
107
sig = SIGFPE;
108
break;
109
}
110
111
sys__exit( -1 );
112
return;
113
114
kprintf("Fatal user mode trap %u sig %d (%s, epc 0x%x, vaddr 0x%x)\n",
115
code, sig, trapcodenames[code], epc, vaddr);
116
panic("I don't know how to handle this\n");
117
}
118
119
/*
120
* General trap (exception) handling function for mips.
121
* This is called by the assembly-language exception handler once
122
* the trapframe has been set up.
123
*/
124
void
125
mips_trap(struct trapframe *tf)
126
{
127
uint32_t code;
128
bool isutlb, iskern;
129
int spl;
130
131
/* The trap frame is supposed to be 37 registers long. */
132
KASSERT(sizeof(struct trapframe)==(37*4));
133
134
/*
135
* Extract the exception code info from the register fields.
136
*/
137
code = (tf->tf_cause & CCA_CODE) >> CCA_CODESHIFT;
138
isutlb = (tf->tf_cause & CCA_UTLB) != 0;
139
iskern = (tf->tf_status & CST_KUp) == 0;
140
141
KASSERT(code < NTRAPCODES);
142
143
/* Make sure we haven't run off our stack */
144
if (curthread != NULL && curthread->t_stack != NULL) {
145
KASSERT((vaddr_t)tf > (vaddr_t)curthread->t_stack);
146
KASSERT((vaddr_t)tf < (vaddr_t)(curthread->t_stack
147
+ STACK_SIZE));
148
}
149
150
/* Interrupt? Call the interrupt handler and return. */
151
if (code == EX_IRQ) {
152
int old_in;
153
bool doadjust;
154
155
old_in = curthread->t_in_interrupt;
156
curthread->t_in_interrupt = 1;
157
158
/*
159
* The processor has turned interrupts off; if the
160
* currently recorded interrupt state is interrupts on
161
* (spl of 0), adjust the recorded state to match, and
162
* restore after processing the interrupt.
163
*
164
* How can we get an interrupt if the recorded state
165
* is interrupts off? Well, as things currently stand
166
* when the CPU finishes idling it flips interrupts on
167
* and off to allow things to happen, but leaves
168
* curspl high while doing so.
169
*
170
* While we're here, assert that the interrupt
171
* handling code hasn't leaked a spinlock or an
172
* splhigh().
173
*/
174
175
if (curthread->t_curspl == 0) {
176
KASSERT(curthread->t_curspl == 0);
177
KASSERT(curthread->t_iplhigh_count == 0);
178
curthread->t_curspl = IPL_HIGH;
179
curthread->t_iplhigh_count++;
180
doadjust = true;
181
}
182
else {
183
doadjust = false;
184
}
185
186
mainbus_interrupt(tf);
187
188
if (doadjust) {
189
KASSERT(curthread->t_curspl == IPL_HIGH);
190
KASSERT(curthread->t_iplhigh_count == 1);
191
curthread->t_iplhigh_count--;
192
curthread->t_curspl = 0;
193
}
194
195
curthread->t_in_interrupt = old_in;
196
goto done2;
197
}
198
199
/*
200
* The processor turned interrupts off when it took the trap.
201
*
202
* While we're in the kernel, and not actually handling an
203
* interrupt, restore the interrupt state to where it was in
204
* the previous context, which may be low (interrupts on).
205
*
206
* Do this by forcing splhigh(), which may do a redundant
207
* cpu_irqoff() but forces the stored MI interrupt state into
208
* sync, then restoring the previous state.
209
*/
210
spl = splhigh();
211
splx(spl);
212
213
/* Syscall? Call the syscall handler and return. */
214
if (code == EX_SYS) {
215
/* Interrupts should have been on while in user mode. */
216
KASSERT(curthread->t_curspl == 0);
217
KASSERT(curthread->t_iplhigh_count == 0);
218
219
DEBUG(DB_SYSCALL, "syscall: #%d, args %x %x %x %x\n",
220
tf->tf_v0, tf->tf_a0, tf->tf_a1, tf->tf_a2, tf->tf_a3);
221
222
syscall(tf);
223
goto done;
224
}
225
226
/*
227
* Ok, it wasn't any of the really easy cases.
228
* Call vm_fault on the TLB exceptions.
229
* Panic on the bus error exceptions.
230
*/
231
switch (code) {
232
case EX_MOD:
233
if (vm_fault(VM_FAULT_READONLY, tf->tf_vaddr)==0) {
234
goto done;
235
}
236
break;
237
case EX_TLBL:
238
if (vm_fault(VM_FAULT_READ, tf->tf_vaddr)==0) {
239
goto done;
240
}
241
break;
242
case EX_TLBS:
243
if (vm_fault(VM_FAULT_WRITE, tf->tf_vaddr)==0) {
244
goto done;
245
}
246
break;
247
case EX_IBE:
248
case EX_DBE:
249
/*
250
* This means you loaded invalid TLB entries, or
251
* touched invalid parts of the direct-mapped
252
* segments. These are serious kernel errors, so
253
* panic.
254
*
255
* The MIPS won't even tell you what invalid address
256
* caused the bus error.
257
*/
258
panic("Bus error exception, PC=0x%x\n", tf->tf_epc);
259
break;
260
}
261
262
/*
263
* If we get to this point, it's a fatal fault - either it's
264
* one of the other exceptions, like illegal instruction, or
265
* it was a page fault we couldn't handle.
266
*/
267
268
if (!iskern) {
269
/*
270
* Fatal fault in user mode.
271
* Kill the current user process.
272
*/
273
kill_curthread(tf->tf_epc, code, tf->tf_vaddr);
274
goto done;
275
}
276
277
/*
278
* Fatal fault in kernel mode.
279
*
280
* If pcb_badfaultfunc is set, we do not panic; badfaultfunc is
281
* set by copyin/copyout and related functions to signify that
282
* the addresses they're accessing are userlevel-supplied and
283
* not trustable. What we actually want to do is resume
284
* execution at the function pointed to by badfaultfunc. That's
285
* going to be "copyfail" (see copyinout.c), which longjmps
286
* back to copyin/copyout or wherever and returns EFAULT.
287
*
288
* Note that we do not just *call* this function, because that
289
* won't necessarily do anything. We want the control flow
290
* that is currently executing in copyin (or whichever), and
291
* is stopped while we process the exception, to *teleport* to
292
* copyfail.
293
*
294
* This is accomplished by changing tf->tf_epc and returning
295
* from the exception handler.
296
*/
297
298
if (curthread != NULL &&
299
curthread->t_machdep.tm_badfaultfunc != NULL) {
300
tf->tf_epc = (vaddr_t) curthread->t_machdep.tm_badfaultfunc;
301
goto done;
302
}
303
304
/*
305
* Really fatal kernel-mode fault.
306
*/
307
308
kprintf("panic: Fatal exception %u (%s) in kernel mode\n", code,
309
trapcodenames[code]);
310
kprintf("panic: EPC 0x%x, exception vaddr 0x%x\n",
311
tf->tf_epc, tf->tf_vaddr);
312
313
panic("I can't handle this... I think I'll just die now...\n");
314
315
done:
316
/*
317
* Turn interrupts off on the processor, without affecting the
318
* stored interrupt state.
319
*/
320
cpu_irqoff();
321
done2:
322
323
/*
324
* The boot thread can get here (e.g. on interrupt return) but
325
* since it doesn't go to userlevel, it can't be returning to
326
* userlevel, so there's no need to set cputhreads[] and
327
* cpustacks[]. Just return.
328
*/
329
if (curthread->t_stack == NULL) {
330
return;
331
}
332
333
cputhreads[curcpu->c_number] = (vaddr_t)curthread;
334
cpustacks[curcpu->c_number] = (vaddr_t)curthread->t_stack + STACK_SIZE;
335
336
/*
337
* This assertion will fail if either
338
* (1) curthread->t_stack is corrupted, or
339
* (2) the trap frame is somehow on the wrong kernel stack.
340
*
341
* If cpustacks[] is corrupted, the next trap back to the
342
* kernel will (most likely) hang the system, so it's better
343
* to find out now.
344
*/
345
KASSERT(SAME_STACK(cpustacks[curcpu->c_number]-1, (vaddr_t)tf));
346
}
347
348
/*
349
* Function for entering user mode.
350
*
351
* This should not be used by threads returning from traps - they
352
* should just return from mips_trap(). It should be used by threads
353
* entering user mode for the first time - whether the child thread in
354
* a fork(), or into a brand-new address space after exec(), or when
355
* starting the first userlevel program.
356
*
357
* It works by jumping into the exception return code.
358
*
359
* mips_usermode is common code for this. It cannot usefully be called
360
* outside the mips port, but should be called from one of the
361
* following places:
362
* - enter_new_process, for use by exec and equivalent.
363
* - enter_forked_process, in syscall.c, for use by fork.
364
*/
365
void
366
mips_usermode(struct trapframe *tf)
367
{
368
369
/*
370
* Interrupts should be off within the kernel while entering
371
* user mode. However, while in user mode, interrupts should
372
* be on. To interact properly with the spl-handling logic
373
* above, we explicitly call spl0() and then call cpu_irqoff().
374
*/
375
spl0();
376
cpu_irqoff();
377
378
cputhreads[curcpu->c_number] = (vaddr_t)curthread;
379
cpustacks[curcpu->c_number] = (vaddr_t)curthread->t_stack + STACK_SIZE;
380
381
/*
382
* This assertion will fail if either
383
* (1) cpustacks[] is corrupted, or
384
* (2) the trap frame is not on our own kernel stack, or
385
* (3) the boot thread tries to enter user mode.
386
*
387
* If cpustacks[] is corrupted, the next trap back to the
388
* kernel will (most likely) hang the system, so it's better
389
* to find out now.
390
*
391
* It's necessary for the trap frame used here to be on the
392
* current thread's own stack. It cannot correctly be on
393
* either another thread's stack or in the kernel heap.
394
* (Exercise: why?)
395
*/
396
KASSERT(SAME_STACK(cpustacks[curcpu->c_number]-1, (vaddr_t)tf));
397
398
/*
399
* This actually does it. See exception.S.
400
*/
401
asm_usermode(tf);
402
}
403
404
/*
405
* enter_new_process: go to user mode after loading an executable.
406
*
407
* Performs the necessary initialization so that the user program will
408
* get the arguments supplied in argc/argv (note that argv must be a
409
* user-level address), and begin executing at the specified entry
410
* point. The stack pointer is initialized from the stackptr
411
* argument. Note that passing argc/argv may use additional stack
412
* space on some other platforms (but not on mips).
413
*
414
* Works by creating an ersatz trapframe.
415
*/
416
void
417
enter_new_process(int argc, userptr_t argv, vaddr_t stack, vaddr_t entry)
418
{
419
struct trapframe tf;
420
421
bzero(&tf, sizeof(tf));
422
423
tf.tf_status = CST_IRQMASK | CST_IEp | CST_KUp;
424
tf.tf_epc = entry;
425
tf.tf_a0 = argc;
426
tf.tf_a1 = (vaddr_t)argv;
427
tf.tf_sp = stack;
428
429
mips_usermode(&tf);
430
}
431
432