Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
script3r
GitHub Repository: script3r/os161
Path: blob/master/kern/arch/mips/vm/coremap.c
2096 views
1
#include <types.h>
2
#include <lib.h>
3
#include <synch.h>
4
#include <wchan.h>
5
#include <thread.h>
6
#include <cpu.h>
7
#include <vm.h>
8
#include <vm/page.h>
9
#include <vm/swap.h>
10
#include <current.h>
11
#include <machine/coremap.h>
12
#include <machine/tlb.h>
13
14
struct coremap_stats cm_stats;
15
struct coremap_entry *coremap;
16
struct wchan *wc_wire;
17
struct wchan *wc_shootdown;
18
struct spinlock slk_coremap = SPINLOCK_INITIALIZER;
19
bool coremap_initialized = false;
20
21
22
extern struct spinlock slk_steal;
23
extern paddr_t firstpaddr;
24
extern paddr_t lastpaddr;
25
26
27
/**
28
* initialize the statistics given the first and last physical addresses
29
* that we are responsible for.
30
*/
31
static
32
void
33
coremap_init_stats( paddr_t first, paddr_t last ) {
34
cm_stats.cms_base = first / PAGE_SIZE;
35
cm_stats.cms_total_frames = last / PAGE_SIZE - cm_stats.cms_base;
36
cm_stats.cms_kpages = 0;
37
cm_stats.cms_upages = 0;
38
cm_stats.cms_free = cm_stats.cms_total_frames;
39
cm_stats.cms_wired = 0;
40
}
41
42
/**
43
* initializes the coremap entry residing on index "ix".
44
*/
45
static
46
void
47
coremap_init_entry( unsigned int ix ) {
48
KASSERT( ix < cm_stats.cms_total_frames );
49
50
coremap[ix].cme_kernel = 0;
51
coremap[ix].cme_last = 0;
52
coremap[ix].cme_alloc = 0;
53
coremap[ix].cme_wired = 0;
54
coremap[ix].cme_tlb_ix = -1;
55
coremap[ix].cme_cpu = 0;
56
}
57
58
/**
59
* initialize our coremap data structure.
60
* we figure out how much physical core memory we have to manage,
61
* then allocate memory for our coremap by stealing it from the ram,
62
* finally, we initialize each of our coremap_entries.
63
*/
64
void
65
coremap_bootstrap( void ) {
66
paddr_t first; //first physical address
67
paddr_t last; //last physucal addr
68
uint32_t nframes; //total number of frames
69
size_t nsize; //size of coremap
70
uint32_t i;
71
72
first = firstpaddr;
73
last = lastpaddr;
74
75
//the number of frames we have to manage.
76
nframes = (last - first) / PAGE_SIZE;
77
78
//calculate the necessary size and round it up
79
//to the nearest page size.
80
nsize = nframes * sizeof( struct coremap_entry );
81
nsize = ROUNDUP( nsize, PAGE_SIZE );
82
83
//now, actually steal the memory.
84
//the kernel is directly mapped, so we simply convert the physical address using
85
//the PADDR_TO_KVADDR macro.
86
coremap = (struct coremap_entry *) PADDR_TO_KVADDR( first );
87
88
//advance the first address, since we just stole memory.
89
first += nsize;
90
91
//initialize our stats.
92
coremap_init_stats( first, last );
93
94
//initialize each coremap entry.
95
for( i = 0; i < cm_stats.cms_total_frames; ++i )
96
coremap_init_entry( i );
97
98
//create the waiting channel for those
99
//that are waiting to wire a certain frame.
100
wc_wire = wchan_create( "wc_wire" );
101
if( wc_wire == NULL )
102
panic( "coremap_bootstrap: could not create wc_wire" );
103
104
//create the waiting channel for those
105
//who are waiting for the shootdown to be complete.
106
wc_shootdown = wchan_create( "wc_shootdown" );
107
if( wc_shootdown == NULL )
108
panic( "coremap_bootstrap: could not create wc_shootdown" );
109
110
wc_transit = wchan_create( "wc_transit" );
111
if( wc_transit == NULL )
112
panic( "coremap_bootstrap: wc_transit." );
113
114
//create the giant paging lock.
115
giant_paging_lock = lock_create( "giant_paging_lock" );
116
if( giant_paging_lock == NULL )
117
panic( "vm_bootstrap: could not create giant_paging_lock." );
118
119
120
coremap_initialized = true;
121
}
122
123
/**
124
* this functiond decides whether a given coremap_entry index
125
* is free, or it is allocated.
126
*/
127
static
128
bool
129
coremap_is_free( int ix ) {
130
COREMAP_IS_LOCKED();
131
return coremap[ix].cme_alloc == 0;
132
}
133
134
static
135
bool
136
coremap_is_pageable( int ix ) {
137
COREMAP_IS_LOCKED();
138
return
139
coremap[ix].cme_wired == 0 && //must not be wired
140
coremap[ix].cme_kernel == 0;
141
}
142
143
static
144
int
145
rank_region_for_paging( int ix, int size ) {
146
int score;
147
int i;
148
149
score = 0;
150
for( i = ix; i < ix + size; ++i ) {
151
if( !coremap_is_pageable( i ) )
152
return -1;
153
154
if( coremap_is_free( i ) )
155
++score;
156
}
157
158
return score;
159
}
160
161
static
162
void
163
coremap_ensure_integrity() {
164
COREMAP_IS_LOCKED();
165
KASSERT( cm_stats.cms_total_frames ==
166
cm_stats.cms_upages + cm_stats.cms_kpages + cm_stats.cms_free );
167
}
168
169
/**
170
* finds an optimal range inside the coremap
171
* to allocate npages. the optimal range is one that requires the least amount of evictions.
172
*/
173
static
174
int
175
find_optimal_range( int npages ) {
176
int best_base;
177
int best_count;
178
int curr_count;
179
uint32_t i;
180
181
COREMAP_IS_LOCKED();
182
best_count = -1;
183
best_base = -1;
184
185
for( i = 0; i < cm_stats.cms_total_frames - npages; ++i ) {
186
curr_count = rank_region_for_paging( i, npages );
187
if( curr_count > best_count ) {
188
best_base = i;
189
best_count = curr_count;
190
}
191
}
192
193
return best_base;
194
}
195
196
static
197
int
198
find_pageable_without_mapping( void ) {
199
unsigned i;
200
201
COREMAP_IS_LOCKED();
202
for( i = 0; i < cm_stats.cms_total_frames; ++i )
203
if( coremap_is_pageable( i ) && coremap[i].cme_tlb_ix == -1 )
204
return i;
205
206
return -1;
207
208
}
209
210
/**
211
* finds a page that could be paged-out.
212
*/
213
static
214
int
215
find_pageable_page( void ) {
216
uint32_t i;
217
uint32_t start;
218
int res;
219
220
COREMAP_IS_LOCKED();
221
222
res = find_pageable_without_mapping();
223
if( res >= 0 )
224
return res;
225
226
start = random() % cm_stats.cms_total_frames;
227
for( i = start; i < cm_stats.cms_total_frames; ++i )
228
if( coremap_is_pageable( i ) )
229
return i;
230
231
for( i = 0; i < start; ++i )
232
if( coremap_is_pageable( i ) )
233
return i;
234
235
return -1;
236
}
237
238
static
239
void
240
coremap_evict( int ix_cme ) {
241
struct vm_page *victim;
242
struct tlbshootdown tlb_shootdown;
243
244
COREMAP_IS_LOCKED();
245
246
//the coremap entry must have a virtual page associated with it.
247
KASSERT( coremap[ix_cme].cme_page != NULL );
248
KASSERT( coremap[ix_cme].cme_alloc == 1 );
249
KASSERT( coremap_is_pageable( ix_cme ) );
250
KASSERT( lock_do_i_hold( giant_paging_lock ) );
251
252
//get the victim.
253
victim = coremap[ix_cme].cme_page;
254
KASSERT( (victim->vmp_paddr & PAGE_FRAME ) == COREMAP_TO_PADDR( ix_cme ) );
255
256
//wire the frame.
257
coremap[ix_cme].cme_wired = 1;
258
259
//if there's a live tlb mapping ...
260
if( coremap[ix_cme].cme_tlb_ix != -1 ) {
261
//if it is outside of our jurisdiction ...
262
if( coremap[ix_cme].cme_cpu != curcpu->c_number ) {
263
//request a shootdown from the appropriate cpu.
264
tlb_shootdown.ts_tlb_ix = coremap[ix_cme].cme_tlb_ix;
265
tlb_shootdown.ts_cme_ix = ix_cme;
266
267
//send the shootdown.
268
ipi_tlbshootdown_by_num( coremap[ix_cme].cme_cpu, &tlb_shootdown );
269
270
//wait until the shootdown is complete.
271
while( coremap[ix_cme].cme_tlb_ix != -1 )
272
tlb_shootdown_wait();
273
}
274
else {
275
//we can just handle the request ourselves.
276
tlb_invalidate( coremap[ix_cme].cme_tlb_ix );
277
}
278
}
279
280
KASSERT( coremap[ix_cme].cme_wired == 1 );
281
KASSERT( coremap[ix_cme].cme_tlb_ix == -1 );
282
KASSERT( coremap[ix_cme].cme_cpu == 0 );
283
284
//unlock the coremap
285
UNLOCK_COREMAP();
286
287
//evict the page from memory.
288
vm_page_evict( victim );
289
290
//lock it again.
291
LOCK_COREMAP();
292
293
KASSERT( coremap[ix_cme].cme_wired == 1 );
294
KASSERT( coremap[ix_cme].cme_page == victim );
295
KASSERT( coremap[ix_cme].cme_alloc == 1 );
296
297
//make the coremap entry available.
298
coremap[ix_cme].cme_wired = 0;
299
coremap[ix_cme].cme_page = NULL;
300
coremap[ix_cme].cme_alloc = 0;
301
302
wchan_wakeall( wc_wire );
303
304
//update the stats.
305
--cm_stats.cms_upages;
306
++cm_stats.cms_free;
307
308
//ensure coremap integrity.
309
coremap_ensure_integrity();
310
}
311
312
313
static
314
int
315
coremap_page_replace( void ) {
316
int ix;
317
318
KASSERT( lock_do_i_hold( giant_paging_lock ) );
319
320
COREMAP_IS_LOCKED();
321
KASSERT( cm_stats.cms_free == 0 );
322
323
//find a page that we could evict.
324
ix = find_pageable_page();
325
if( ix < 0 )
326
return ix;
327
328
KASSERT( coremap_is_pageable( ix ) );
329
KASSERT( coremap[ix].cme_alloc == 1 );
330
KASSERT( coremap[ix].cme_page != NULL );
331
332
coremap_evict( ix );
333
334
return ix;
335
}
336
337
static
338
void
339
coremap_wire_wait( ) {
340
KASSERT( curthread->t_vmp_count == 0 );
341
wchan_lock( wc_wire );
342
UNLOCK_COREMAP();
343
wchan_sleep( wc_wire );
344
LOCK_COREMAP();
345
}
346
347
static
348
paddr_t
349
coremap_alloc_single( struct vm_page *vmp, bool wired ) {
350
int ix;
351
int i;
352
353
LOCK_PAGING_IF_POSSIBLE();
354
355
//lock the coremap for atomicity.
356
LOCK_COREMAP();
357
358
//so far we don't know any index.
359
ix = -1;
360
361
//check to see if we have a free page.
362
if( cm_stats.cms_free > 0 ) {
363
//we do, so simply find it.
364
for( i = cm_stats.cms_total_frames-1; i >= 0; --i ) {
365
if( coremap_is_free( i ) ) {
366
ix = i;
367
break;
368
}
369
}
370
371
KASSERT( ix >= 0 );
372
}
373
374
//at this point, two things could happen.
375
//either, ix still is -1, which means we couldn't find a single free page.
376
//or it contains a valid address.
377
378
//if we are not in an interrupt, we simply try to evict a page.
379
if( ix < 0 && curthread != NULL && !curthread->t_in_interrupt )
380
ix = coremap_page_replace();
381
382
383
//if the index is still negative, it means that
384
//there's nothing to do anymore, we cannot grab a page.
385
if( ix < 0 ) {
386
UNLOCK_COREMAP();
387
if( lock_do_i_hold( giant_paging_lock ) )
388
UNLOCK_PAGING_GIANT();
389
return INVALID_PADDR;
390
}
391
392
//mark the page we just got as allocated.
393
//and if we had a virtual page associated, then store it inside the coremap.
394
mark_pages_as_allocated( ix, 1, wired, ( vmp == NULL ) );
395
KASSERT( coremap[ix].cme_page == NULL );
396
coremap[ix].cme_page = vmp;
397
398
//unlock and return
399
UNLOCK_COREMAP();
400
401
UNLOCK_PAGING_IF_POSSIBLE();
402
403
return COREMAP_TO_PADDR( ix );
404
}
405
406
paddr_t
407
coremap_alloc( struct vm_page *vmp, bool wired ) {
408
return coremap_alloc_single( vmp, wired );
409
}
410
411
void
412
coremap_clone( paddr_t source, paddr_t target ) {
413
vaddr_t vsource;
414
vaddr_t vtarget;
415
416
KASSERT( (source & PAGE_FRAME) == source );
417
KASSERT( (target & PAGE_FRAME) == target );
418
419
KASSERT( source != INVALID_PADDR );
420
KASSERT( target != INVALID_PADDR );
421
422
KASSERT( coremap_is_wired( source ) );
423
KASSERT( coremap_is_wired( target ) );
424
425
vsource = PADDR_TO_KVADDR( source );
426
vtarget = PADDR_TO_KVADDR( target );
427
428
memmove( (void*)vtarget, (const void*)vsource, PAGE_SIZE );
429
}
430
431
/**
432
* Mark pages as allocated.
433
* Coremap must already be locked.
434
*/
435
void
436
mark_pages_as_allocated( int start, int num, bool wired, bool is_kernel ) {
437
int i;
438
439
COREMAP_IS_LOCKED();
440
441
//go over each page in the range
442
//and mark them as allocated.
443
for( i = start; i < start + num; ++i ) {
444
KASSERT( coremap[i].cme_alloc == 0 );
445
KASSERT( coremap[i].cme_wired == 0 );
446
447
coremap[i].cme_alloc = 1;
448
coremap[i].cme_wired = ( wired ) ? 1 : 0;
449
coremap[i].cme_kernel = ( is_kernel ) ? 1 : 0;
450
}
451
452
//mark the last page of this allocation as the last.
453
coremap[i-1].cme_last = 1;
454
455
//update statistics
456
if( is_kernel )
457
cm_stats.cms_kpages += num;
458
else
459
cm_stats.cms_upages += num;
460
461
//we have less free pages now.
462
cm_stats.cms_free -= num;
463
464
//paranoia.
465
coremap_ensure_integrity();
466
467
}
468
469
static
470
paddr_t
471
coremap_alloc_multipages( int npages ) {
472
int ix;
473
int i;
474
475
//lock the coremap for atomicity.
476
LOCK_PAGING_IF_POSSIBLE();
477
478
//lock the coremap
479
LOCK_COREMAP();
480
481
//find the optimal range to store npages.
482
//the optimal range is simply the range that has the least amount
483
//if evictions.
484
ix = find_optimal_range( npages );
485
486
//if we couldn't find a range ... too bad.
487
if( ix < 0 ) {
488
UNLOCK_COREMAP();
489
UNLOCK_PAGING_IF_POSSIBLE();
490
return INVALID_PADDR;
491
}
492
493
//now, we evict those pages that need to be evicted.
494
for( i = ix; i < ix + npages; ++i ) {
495
if( coremap[i].cme_alloc ) {
496
//if we can evict, oh well, then just do it.
497
if( curthread != NULL && !curthread->t_in_interrupt )
498
coremap_evict( i );
499
else {
500
UNLOCK_COREMAP();
501
UNLOCK_PAGING_IF_POSSIBLE();
502
return INVALID_PADDR;
503
}
504
}
505
}
506
507
//at this point, the entire range we choose is ours.
508
//so we simply mark them as allocated.
509
mark_pages_as_allocated( ix, npages, false, true );
510
511
//unlock the coremap and proceed with life.
512
UNLOCK_COREMAP();
513
UNLOCK_PAGING_IF_POSSIBLE();
514
return COREMAP_TO_PADDR( ix );
515
}
516
517
518
519
520
static
521
paddr_t
522
get_kpages_by_stealing( int npages ) {
523
paddr_t paddr;
524
525
KASSERT( !coremap_initialized );
526
527
spinlock_acquire( &slk_steal );
528
paddr = ram_stealmem( npages );
529
spinlock_release( &slk_steal );
530
531
return paddr;
532
533
}
534
535
/**
536
* allocate kernel pages
537
*/
538
vaddr_t
539
alloc_kpages( int npages ) {
540
paddr_t paddr; //the physical addr of the allocated page
541
vaddr_t vaddr;
542
543
if( !coremap_initialized ) {
544
paddr = get_kpages_by_stealing( npages );
545
vaddr = PADDR_TO_KVADDR( paddr );
546
return vaddr;
547
}
548
549
//if we have multiple allocation requests, then call the multi-version.
550
//otherwise, simply allocate a single page.
551
paddr = ( npages > 1 ) ?
552
coremap_alloc_multipages( npages ) :
553
coremap_alloc_single( NULL, 0 );
554
555
//if we have an invalid physical address
556
//return 0 as a virtual address, which is not possible.
557
if( paddr == INVALID_PADDR )
558
return 0;
559
560
vaddr = PADDR_TO_KVADDR( paddr );
561
return vaddr;
562
}
563
564
/**
565
* free a series of pages.
566
*/
567
void
568
free_kpages( vaddr_t vaddr ) {
569
coremap_free( KVADDR_TO_PADDR( vaddr ), true );
570
}
571
572
/**
573
* free a kernel coremap allocation.
574
*/
575
void
576
coremap_free( paddr_t paddr, bool is_kernel ) {
577
uint32_t i;
578
uint32_t ix;
579
580
KASSERT( (paddr & PAGE_FRAME) == paddr );
581
//convert the given physical address into the appropriate
582
//physical frame.
583
ix = PADDR_TO_COREMAP( paddr );
584
585
//lock the coremap for atomicity.
586
LOCK_COREMAP();
587
588
//we loop over starting from ix, until possibly the end.
589
for( i = ix; i < cm_stats.cms_total_frames; ++i ) {
590
//make sure the page is actually allocated.
591
//further, make sure it is wired or is a kernel page.
592
KASSERT( coremap[i].cme_alloc == 1 );
593
KASSERT( coremap[i].cme_wired || is_kernel );
594
595
//invalidate the given c
596
if( coremap[i].cme_tlb_ix >= 0 )
597
tlb_invalidate( coremap[i].cme_tlb_ix );
598
599
//mark it as deallocated and update stats.
600
coremap[i].cme_alloc = 0;
601
coremap[i].cme_kernel ? --cm_stats.cms_kpages : --cm_stats.cms_upages;
602
coremap[i].cme_page = NULL;
603
coremap[i].cme_wired = 0;
604
605
//just released a wire.
606
wchan_wakeall( wc_wire );
607
608
//one extra free page.
609
++cm_stats.cms_free;
610
611
//paranoia.
612
coremap_ensure_integrity();
613
614
//if we are the last in a series of allocations, bail.
615
if( coremap[i].cme_last ) {
616
coremap[i].cme_last = 0;
617
break;
618
}
619
}
620
UNLOCK_COREMAP();
621
}
622
623
void
624
vm_tlbshootdown( const struct tlbshootdown *ts ) {
625
int cme_ix;
626
int tlb_ix;
627
628
LOCK_COREMAP();
629
630
cme_ix = ts->ts_cme_ix;
631
tlb_ix = ts->ts_tlb_ix;
632
633
if( coremap[cme_ix].cme_cpu == curcpu->c_number && coremap[cme_ix].cme_tlb_ix == tlb_ix )
634
tlb_invalidate( tlb_ix );
635
636
wchan_wakeall( wc_shootdown );
637
UNLOCK_COREMAP();
638
}
639
640
void
641
vm_tlbshootdown_all( void ) {
642
LOCK_COREMAP();
643
tlb_clear();
644
wchan_wakeall( wc_shootdown );
645
UNLOCK_COREMAP();
646
}
647
648
void
649
coremap_wire( paddr_t paddr ) {
650
unsigned cix;
651
652
cix = PADDR_TO_COREMAP( paddr );
653
654
//lock the coremap
655
LOCK_COREMAP();
656
657
//while the page is already wired
658
while( coremap[cix].cme_wired != 0 )
659
coremap_wire_wait();
660
661
KASSERT( coremap[cix].cme_wired == 0 );
662
coremap[cix].cme_wired = 1;
663
664
UNLOCK_COREMAP();
665
}
666
667
void
668
coremap_unwire( paddr_t paddr ) {
669
unsigned cix;
670
671
cix = PADDR_TO_COREMAP( paddr );
672
673
LOCK_COREMAP();
674
675
KASSERT( coremap[cix].cme_wired == 1 );
676
coremap[cix].cme_wired = 0;
677
wchan_wakeall( wc_wire );
678
679
UNLOCK_COREMAP();
680
}
681
682
void
683
coremap_zero( paddr_t paddr ) {
684
vaddr_t vaddr;
685
686
KASSERT( (paddr & PAGE_FRAME) == paddr );
687
KASSERT( paddr != INVALID_PADDR );
688
KASSERT( coremap_is_wired( paddr ) );
689
690
vaddr = PADDR_TO_KVADDR( paddr );
691
bzero( (char*)vaddr, PAGE_SIZE );
692
}
693
694
bool
695
coremap_is_wired( paddr_t paddr ) {
696
unsigned ix;
697
698
KASSERT( ( paddr & PAGE_FRAME ) == paddr );
699
700
ix = PADDR_TO_COREMAP( paddr );
701
return coremap[ix].cme_wired != 0;
702
}
703
704