Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
script3r
GitHub Repository: script3r/os161
Path: blob/master/kern/arch/mips/vm/dumbvm.c
2101 views
1
/*
2
* Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
3
* The President and Fellows of Harvard College.
4
*
5
* Redistribution and use in source and binary forms, with or without
6
* modification, are permitted provided that the following conditions
7
* are met:
8
* 1. Redistributions of source code must retain the above copyright
9
* notice, this list of conditions and the following disclaimer.
10
* 2. Redistributions in binary form must reproduce the above copyright
11
* notice, this list of conditions and the following disclaimer in the
12
* documentation and/or other materials provided with the distribution.
13
* 3. Neither the name of the University nor the names of its contributors
14
* may be used to endorse or promote products derived from this software
15
* without specific prior written permission.
16
*
17
* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
18
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
* ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
21
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
* SUCH DAMAGE.
28
*/
29
30
#include <types.h>
31
#include <kern/errno.h>
32
#include <lib.h>
33
#include <spl.h>
34
#include <spinlock.h>
35
#include <thread.h>
36
#include <current.h>
37
#include <mips/tlb.h>
38
#include <addrspace.h>
39
#include <vm.h>
40
41
/*
42
* Dumb MIPS-only "VM system" that is intended to only be just barely
43
* enough to struggle off the ground. You should replace all of this
44
* code while doing the VM assignment. In fact, starting in that
45
* assignment, this file is not included in your kernel!
46
*/
47
48
/* under dumbvm, always have 48k of user stack */
49
#define DUMBVM_STACKPAGES 12
50
51
/*
52
* Wrap rma_stealmem in a spinlock.
53
*/
54
static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
55
56
void
57
vm_bootstrap(void)
58
{
59
/* Do nothing. */
60
}
61
62
static
63
paddr_t
64
getppages(unsigned long npages)
65
{
66
paddr_t addr;
67
68
spinlock_acquire(&stealmem_lock);
69
70
addr = ram_stealmem(npages);
71
72
spinlock_release(&stealmem_lock);
73
return addr;
74
}
75
76
/* Allocate/free some kernel-space virtual pages */
77
vaddr_t
78
alloc_kpages(int npages)
79
{
80
paddr_t pa;
81
pa = getppages(npages);
82
if (pa==0) {
83
return 0;
84
}
85
return PADDR_TO_KVADDR(pa);
86
}
87
88
void
89
free_kpages(vaddr_t addr)
90
{
91
/* nothing - leak the memory. */
92
93
(void)addr;
94
}
95
96
void
97
vm_tlbshootdown_all(void)
98
{
99
panic("dumbvm tried to do tlb shootdown?!\n");
100
}
101
102
void
103
vm_tlbshootdown(const struct tlbshootdown *ts)
104
{
105
(void)ts;
106
panic("dumbvm tried to do tlb shootdown?!\n");
107
}
108
109
int
110
vm_fault(int faulttype, vaddr_t faultaddress)
111
{
112
vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
113
paddr_t paddr;
114
int i;
115
uint32_t ehi, elo;
116
struct addrspace *as;
117
int spl;
118
119
faultaddress &= PAGE_FRAME;
120
121
DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
122
123
switch (faulttype) {
124
case VM_FAULT_READONLY:
125
/* We always create pages read-write, so we can't get this */
126
panic("dumbvm: got VM_FAULT_READONLY\n");
127
case VM_FAULT_READ:
128
case VM_FAULT_WRITE:
129
break;
130
default:
131
return EINVAL;
132
}
133
134
as = curthread->t_addrspace;
135
if (as == NULL) {
136
/*
137
* No address space set up. This is probably a kernel
138
* fault early in boot. Return EFAULT so as to panic
139
* instead of getting into an infinite faulting loop.
140
*/
141
return EFAULT;
142
}
143
144
/* Assert that the address space has been set up properly. */
145
KASSERT(as->as_vbase1 != 0);
146
KASSERT(as->as_pbase1 != 0);
147
KASSERT(as->as_npages1 != 0);
148
KASSERT(as->as_vbase2 != 0);
149
KASSERT(as->as_pbase2 != 0);
150
KASSERT(as->as_npages2 != 0);
151
KASSERT(as->as_stackpbase != 0);
152
KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
153
KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
154
KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
155
KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
156
KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
157
158
vbase1 = as->as_vbase1;
159
vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
160
vbase2 = as->as_vbase2;
161
vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
162
stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
163
stacktop = USERSTACK;
164
165
if (faultaddress >= vbase1 && faultaddress < vtop1) {
166
paddr = (faultaddress - vbase1) + as->as_pbase1;
167
}
168
else if (faultaddress >= vbase2 && faultaddress < vtop2) {
169
paddr = (faultaddress - vbase2) + as->as_pbase2;
170
}
171
else if (faultaddress >= stackbase && faultaddress < stacktop) {
172
paddr = (faultaddress - stackbase) + as->as_stackpbase;
173
}
174
else {
175
return EFAULT;
176
}
177
178
/* make sure it's page-aligned */
179
KASSERT((paddr & PAGE_FRAME) == paddr);
180
181
/* Disable interrupts on this CPU while frobbing the TLB. */
182
spl = splhigh();
183
184
for (i=0; i<NUM_TLB; i++) {
185
tlb_read(&ehi, &elo, i);
186
if (elo & TLBLO_VALID) {
187
continue;
188
}
189
ehi = faultaddress;
190
elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
191
DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
192
tlb_write(ehi, elo, i);
193
splx(spl);
194
return 0;
195
}
196
197
kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
198
splx(spl);
199
return EFAULT;
200
}
201
202
struct addrspace *
203
as_create(void)
204
{
205
struct addrspace *as = kmalloc(sizeof(struct addrspace));
206
if (as==NULL) {
207
return NULL;
208
}
209
210
as->as_vbase1 = 0;
211
as->as_pbase1 = 0;
212
as->as_npages1 = 0;
213
as->as_vbase2 = 0;
214
as->as_pbase2 = 0;
215
as->as_npages2 = 0;
216
as->as_stackpbase = 0;
217
218
return as;
219
}
220
221
void
222
as_destroy(struct addrspace *as)
223
{
224
kfree(as);
225
}
226
227
void
228
as_activate(struct addrspace *as)
229
{
230
int i, spl;
231
232
(void)as;
233
234
/* Disable interrupts on this CPU while frobbing the TLB. */
235
spl = splhigh();
236
237
for (i=0; i<NUM_TLB; i++) {
238
tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
239
}
240
241
splx(spl);
242
}
243
244
int
245
as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
246
int readable, int writeable, int executable)
247
{
248
size_t npages;
249
250
/* Align the region. First, the base... */
251
sz += vaddr & ~(vaddr_t)PAGE_FRAME;
252
vaddr &= PAGE_FRAME;
253
254
/* ...and now the length. */
255
sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
256
257
npages = sz / PAGE_SIZE;
258
259
/* We don't use these - all pages are read-write */
260
(void)readable;
261
(void)writeable;
262
(void)executable;
263
264
if (as->as_vbase1 == 0) {
265
as->as_vbase1 = vaddr;
266
as->as_npages1 = npages;
267
return 0;
268
}
269
270
if (as->as_vbase2 == 0) {
271
as->as_vbase2 = vaddr;
272
as->as_npages2 = npages;
273
return 0;
274
}
275
276
/*
277
* Support for more than two regions is not available.
278
*/
279
kprintf("dumbvm: Warning: too many regions\n");
280
return EUNIMP;
281
}
282
283
static
284
void
285
as_zero_region(paddr_t paddr, unsigned npages)
286
{
287
bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
288
}
289
290
int
291
as_prepare_load(struct addrspace *as)
292
{
293
KASSERT(as->as_pbase1 == 0);
294
KASSERT(as->as_pbase2 == 0);
295
KASSERT(as->as_stackpbase == 0);
296
297
as->as_pbase1 = getppages(as->as_npages1);
298
if (as->as_pbase1 == 0) {
299
return ENOMEM;
300
}
301
302
as->as_pbase2 = getppages(as->as_npages2);
303
if (as->as_pbase2 == 0) {
304
return ENOMEM;
305
}
306
307
as->as_stackpbase = getppages(DUMBVM_STACKPAGES);
308
if (as->as_stackpbase == 0) {
309
return ENOMEM;
310
}
311
312
as_zero_region(as->as_pbase1, as->as_npages1);
313
as_zero_region(as->as_pbase2, as->as_npages2);
314
as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
315
316
return 0;
317
}
318
319
int
320
as_complete_load(struct addrspace *as)
321
{
322
(void)as;
323
return 0;
324
}
325
326
int
327
as_define_stack(struct addrspace *as, vaddr_t *stackptr)
328
{
329
KASSERT(as->as_stackpbase != 0);
330
331
*stackptr = USERSTACK;
332
return 0;
333
}
334
335
int
336
as_copy(struct addrspace *old, struct addrspace **ret)
337
{
338
struct addrspace *new;
339
340
new = as_create();
341
if (new==NULL) {
342
return ENOMEM;
343
}
344
345
new->as_vbase1 = old->as_vbase1;
346
new->as_npages1 = old->as_npages1;
347
new->as_vbase2 = old->as_vbase2;
348
new->as_npages2 = old->as_npages2;
349
350
/* (Mis)use as_prepare_load to allocate some physical memory. */
351
if (as_prepare_load(new)) {
352
as_destroy(new);
353
return ENOMEM;
354
}
355
356
KASSERT(new->as_pbase1 != 0);
357
KASSERT(new->as_pbase2 != 0);
358
KASSERT(new->as_stackpbase != 0);
359
360
memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
361
(const void *)PADDR_TO_KVADDR(old->as_pbase1),
362
old->as_npages1*PAGE_SIZE);
363
364
memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
365
(const void *)PADDR_TO_KVADDR(old->as_pbase2),
366
old->as_npages2*PAGE_SIZE);
367
368
memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
369
(const void *)PADDR_TO_KVADDR(old->as_stackpbase),
370
DUMBVM_STACKPAGES*PAGE_SIZE);
371
372
*ret = new;
373
return 0;
374
}
375
376