Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
script3r
GitHub Repository: script3r/os161
Path: blob/master/kern/vm/vmpage.c
2092 views
1
#include <types.h>
2
#include <kern/errno.h>
3
#include <lib.h>
4
#include <spinlock.h>
5
#include <synch.h>
6
#include <thread.h>
7
#include <addrspace.h>
8
#include <synch.h>
9
#include <wchan.h>
10
#include <vm.h>
11
#include <vm/page.h>
12
#include <vm/region.h>
13
#include <vm/swap.h>
14
#include <current.h>
15
#include <machine/coremap.h>
16
17
struct wchan *wc_transit;
18
static
19
int
20
vm_page_new( struct vm_page **vmp_ret, paddr_t *paddr_ret ) {
21
struct vm_page *vmp;
22
paddr_t paddr;
23
24
vmp = vm_page_create();
25
if( vmp == NULL )
26
return ENOMEM;
27
28
//attempt to allocate swap space.
29
vmp->vmp_swapaddr = swap_alloc();
30
if( vmp->vmp_swapaddr == INVALID_SWAPADDR ) {
31
vm_page_destroy( vmp );
32
return ENOSPC;
33
}
34
35
//allocate a single coremap_entry
36
paddr = coremap_alloc( vmp, true );
37
if( paddr == INVALID_PADDR ) {
38
vm_page_destroy( vmp );
39
return ENOSPC;
40
}
41
42
//page is already wired, now just lock it.
43
vm_page_lock( vmp );
44
45
KASSERT( coremap_is_wired( paddr ) );
46
47
//adjust the physical address, and mark the page dirty.
48
vmp->vmp_paddr = paddr;
49
50
*vmp_ret = vmp;
51
*paddr_ret = paddr;
52
53
return 0;
54
}
55
56
static
57
void
58
vm_page_acquire( struct vm_page *vmp ) {
59
paddr_t paddr;
60
paddr_t wired;
61
62
wired = INVALID_PADDR;
63
64
//lock the page.
65
vm_page_lock( vmp );
66
67
for( ;; ) {
68
//get the physical address
69
paddr = vmp->vmp_paddr & PAGE_FRAME;
70
71
//if the physcal address matches the wired address
72
//we are done.
73
if( paddr == wired )
74
break;
75
76
//unlock the page.
77
vm_page_unlock( vmp );
78
79
//that means we pinned this physical addr before
80
if( wired != INVALID_PADDR )
81
coremap_unwire( wired );
82
83
84
//check if the page has been paged out.
85
if( paddr == INVALID_PADDR ) {
86
vm_page_lock( vmp );
87
KASSERT( (vmp->vmp_paddr & PAGE_FRAME) == INVALID_PADDR );
88
break;
89
}
90
91
coremap_wire( paddr );
92
wired = paddr;
93
vm_page_lock( vmp );
94
}
95
96
if( paddr != INVALID_PADDR )
97
KASSERT( coremap_is_wired( paddr ) );
98
99
KASSERT( spinlock_do_i_hold( &vmp->vmp_lk ) );
100
}
101
102
void
103
vm_page_destroy( struct vm_page *vmp ) {
104
paddr_t paddr;
105
106
//lock and wire the page.
107
vm_page_acquire( vmp );
108
109
paddr = vmp->vmp_paddr & PAGE_FRAME;
110
//if the page is in core.
111
if( paddr != INVALID_PADDR ) {
112
//invalidate it
113
vmp->vmp_paddr = INVALID_PADDR;
114
115
KASSERT( coremap_is_wired( paddr ) );
116
117
//unlock and free the coremap entry associated
118
vm_page_unlock( vmp );
119
coremap_free( paddr, false );
120
}
121
else {
122
//the physical address is already invalid ...
123
//just unlock, so we can free the page.
124
vm_page_unlock( vmp );
125
}
126
127
//release the swap space if it exists.
128
if( vmp->vmp_swapaddr != INVALID_SWAPADDR )
129
swap_dealloc( vmp->vmp_swapaddr );
130
131
spinlock_cleanup( &vmp->vmp_lk );
132
kfree( vmp );
133
}
134
135
void
136
vm_page_lock( struct vm_page *vmp ) {
137
KASSERT( !spinlock_do_i_hold( &vmp->vmp_lk ) );
138
KASSERT( curthread->t_vmp_count == 0 || curthread->t_clone );
139
140
spinlock_acquire( &vmp->vmp_lk );
141
++curthread->t_vmp_count;
142
}
143
144
void
145
vm_page_unlock( struct vm_page *vmp ) {
146
KASSERT( spinlock_do_i_hold( &vmp->vmp_lk ) );
147
KASSERT( curthread->t_vmp_count == 1 || curthread->t_clone );
148
149
spinlock_release( &vmp->vmp_lk );
150
--curthread->t_vmp_count;
151
}
152
153
int
154
vm_page_clone( struct vm_page *source, struct vm_page **target ) {
155
struct vm_page *vmp;
156
int res;
157
paddr_t paddr;
158
paddr_t source_paddr;
159
off_t swap_addr;
160
161
//we are in clone
162
curthread->t_clone = 1;
163
164
//create a new vm_page
165
res = vm_page_new( &vmp, &paddr );
166
if( res ) {
167
curthread->t_clone = 0;
168
return res;
169
}
170
171
KASSERT( coremap_is_wired( paddr ) );
172
KASSERT( spinlock_do_i_hold( &vmp->vmp_lk ) );
173
KASSERT( curthread->t_vmp_count == 1 );
174
175
//acquire the source page.
176
vm_page_acquire( source );
177
178
source_paddr = source->vmp_paddr & PAGE_FRAME;
179
//if the source page is not in core, swap it in.
180
if( source_paddr == INVALID_PADDR ) {
181
//get the swap offset of the source page.
182
swap_addr = source->vmp_swapaddr;
183
184
//unlock the source page.
185
vm_page_unlock( source );
186
187
//alocate memory for the source page.
188
source_paddr = coremap_alloc( source, true );
189
if( source_paddr == INVALID_PADDR ) {
190
//unwire the page, since it was wired by vm_page_new.
191
coremap_unwire( paddr );
192
193
//unlock and destroy the new page.
194
vm_page_unlock( vmp );
195
vm_page_destroy( vmp );
196
197
//not in clone anymore
198
curthread->t_clone = 0;
199
return ENOMEM;
200
}
201
LOCK_PAGING_GIANT();
202
//swap in the contents located ins swap_addr into source_paddr.
203
swap_in( source_paddr, swap_addr );
204
205
//lock the source.
206
vm_page_lock( source );
207
208
UNLOCK_PAGING_GIANT();
209
210
//make sure nobody paged-in this page.
211
KASSERT( (source->vmp_paddr & PAGE_FRAME) == INVALID_PADDR );
212
213
//adjust the physical address to reflect the
214
//address that currently stores the swapped in content.
215
source->vmp_paddr = source_paddr;
216
}
217
218
KASSERT( coremap_is_wired( source_paddr ) );
219
KASSERT( coremap_is_wired( paddr ) );
220
221
//clone from source to the new address.
222
coremap_clone( source_paddr, paddr );
223
224
//unlock both source and target.
225
vm_page_unlock( source );
226
vm_page_unlock( vmp );
227
228
//unwire both pages.
229
coremap_unwire( source_paddr );
230
coremap_unwire( paddr );
231
232
233
*target = vmp;
234
235
//not in clone anymore
236
curthread->t_clone = 0;
237
return 0;
238
}
239
240
struct vm_page *
241
vm_page_create( ) {
242
struct vm_page *vmp;
243
244
vmp = kmalloc( sizeof( struct vm_page ) );
245
if( vmp == NULL )
246
return NULL;
247
248
spinlock_init( &vmp->vmp_lk );
249
250
//initialize both the physical address
251
//and swap address to be invalid.
252
vmp->vmp_paddr = INVALID_PADDR;
253
vmp->vmp_swapaddr = INVALID_SWAPADDR;
254
vmp->vmp_in_transit = false;
255
256
return vmp;
257
}
258
259
int
260
vm_page_new_blank( struct vm_page **ret ) {
261
struct vm_page *vmp;
262
paddr_t paddr;
263
int res;
264
265
res = vm_page_new( &vmp, &paddr );
266
if( res )
267
return res;
268
269
//make sure the page is locked.
270
KASSERT( coremap_is_wired( paddr ) );
271
272
//unlock the page.
273
vm_page_unlock( vmp );
274
275
//zero the paddr and unwire it
276
coremap_zero( paddr );
277
coremap_unwire( paddr );
278
279
*ret = vmp;
280
return 0;
281
}
282
283
static
284
void
285
vm_page_wait_for_transit( struct vm_page *vmp ) {
286
wchan_lock( wc_transit );
287
vm_page_unlock( vmp );
288
KASSERT( curthread->t_vmp_count == 0 );
289
wchan_sleep( wc_transit );
290
vm_page_lock( vmp );
291
}
292
293
int
294
vm_page_fault( struct vm_page *vmp, struct addrspace *as, int fault_type, vaddr_t fault_vaddr ) {
295
paddr_t paddr;
296
int writeable;
297
off_t swap_addr;
298
bool success;
299
(void) as;
300
301
//which fault happened?
302
switch( fault_type ) {
303
case VM_FAULT_READ:
304
writeable = 0;
305
break;
306
case VM_FAULT_WRITE:
307
case VM_FAULT_READONLY:
308
writeable = 1;
309
break;
310
default:
311
return EINVAL;
312
313
}
314
315
do {
316
success = true;
317
vm_page_lock( vmp );
318
while( vmp->vmp_in_transit == true )
319
vm_page_wait_for_transit( vmp );
320
321
vm_page_unlock( vmp );
322
vm_page_acquire( vmp );
323
if( vmp->vmp_in_transit ) {
324
success = false;
325
coremap_unwire( (vmp->vmp_paddr & PAGE_FRAME ) );
326
vm_page_unlock( vmp );
327
}
328
} while( !success );
329
330
//get the physical address.
331
paddr = vmp->vmp_paddr & PAGE_FRAME;
332
333
//if the page is out of core
334
if( paddr == INVALID_PADDR ) {
335
swap_addr = vmp->vmp_swapaddr;
336
KASSERT( vmp->vmp_swapaddr != INVALID_SWAPADDR );
337
338
//unlock the page while allocating.
339
vm_page_unlock( vmp );
340
341
//allocate memory.
342
paddr = coremap_alloc( vmp, true );
343
if( paddr == INVALID_PADDR )
344
return ENOMEM;
345
346
KASSERT( coremap_is_wired( paddr ) );
347
348
LOCK_PAGING_GIANT();
349
//swap the page in.
350
swap_in( paddr, swap_addr );
351
vm_page_lock( vmp );
352
UNLOCK_PAGING_GIANT();
353
354
//make sure the page address is still invalid.
355
KASSERT( vmp->vmp_paddr == INVALID_PADDR );
356
KASSERT( vmp->vmp_swapaddr == swap_addr );
357
KASSERT( coremap_is_wired( paddr ) );
358
359
//update the physical address.
360
vmp->vmp_paddr = paddr;
361
}
362
363
//map fault_vaddr into paddr with writeable flags.
364
vm_map( fault_vaddr, paddr, writeable );
365
366
//unwire the coremap entry.
367
coremap_unwire( paddr );
368
369
//unlock the page.
370
vm_page_unlock( vmp );
371
return 0;
372
}
373
374
/**
375
* evict the page from core.
376
*/
377
void
378
vm_page_evict( struct vm_page *victim ) {
379
paddr_t paddr;
380
off_t swap_addr;
381
382
KASSERT( lock_do_i_hold( giant_paging_lock ) );
383
384
//lock the page while evicting.
385
vm_page_lock( victim );
386
387
paddr = victim->vmp_paddr & PAGE_FRAME;
388
swap_addr = victim->vmp_swapaddr;
389
390
KASSERT( paddr != INVALID_PADDR );
391
KASSERT( swap_addr != INVALID_SWAPADDR );
392
KASSERT( coremap_is_wired( paddr ) );
393
394
//mark it as being in transit.
395
KASSERT( victim->vmp_in_transit == false );
396
victim->vmp_in_transit = true;
397
398
//unlock it
399
vm_page_unlock( victim );
400
401
//swapout.
402
swap_out( paddr, swap_addr );
403
404
//lock the victim
405
vm_page_lock( victim );
406
407
//update the page information.
408
KASSERT( victim->vmp_in_transit == true );
409
KASSERT( (victim->vmp_paddr & PAGE_FRAME) == paddr );
410
KASSERT( coremap_is_wired( paddr ) );
411
412
victim->vmp_in_transit = false;
413
victim->vmp_paddr = INVALID_PADDR;
414
415
wchan_wakeall( wc_transit );
416
vm_page_unlock( victim );
417
418
}
419
420
421