#include <types.h>
#include <lib.h>
#include <synch.h>
#include <wchan.h>
#include <thread.h>
#include <cpu.h>
#include <vm.h>
#include <vm/page.h>
#include <vm/swap.h>
#include <current.h>
#include <machine/coremap.h>
#include <machine/tlb.h>
struct coremap_stats cm_stats;
struct coremap_entry *coremap;
struct wchan *wc_wire;
struct wchan *wc_shootdown;
struct spinlock slk_coremap = SPINLOCK_INITIALIZER;
bool coremap_initialized = false;
extern struct spinlock slk_steal;
extern paddr_t firstpaddr;
extern paddr_t lastpaddr;
static
void
coremap_init_stats( paddr_t first, paddr_t last ) {
cm_stats.cms_base = first / PAGE_SIZE;
cm_stats.cms_total_frames = last / PAGE_SIZE - cm_stats.cms_base;
cm_stats.cms_kpages = 0;
cm_stats.cms_upages = 0;
cm_stats.cms_free = cm_stats.cms_total_frames;
cm_stats.cms_wired = 0;
}
static
void
coremap_init_entry( unsigned int ix ) {
KASSERT( ix < cm_stats.cms_total_frames );
coremap[ix].cme_kernel = 0;
coremap[ix].cme_last = 0;
coremap[ix].cme_alloc = 0;
coremap[ix].cme_wired = 0;
coremap[ix].cme_tlb_ix = -1;
coremap[ix].cme_cpu = 0;
}
void
coremap_bootstrap( void ) {
paddr_t first;
paddr_t last;
uint32_t nframes;
size_t nsize;
uint32_t i;
first = firstpaddr;
last = lastpaddr;
nframes = (last - first) / PAGE_SIZE;
nsize = nframes * sizeof( struct coremap_entry );
nsize = ROUNDUP( nsize, PAGE_SIZE );
coremap = (struct coremap_entry *) PADDR_TO_KVADDR( first );
first += nsize;
coremap_init_stats( first, last );
for( i = 0; i < cm_stats.cms_total_frames; ++i )
coremap_init_entry( i );
wc_wire = wchan_create( "wc_wire" );
if( wc_wire == NULL )
panic( "coremap_bootstrap: could not create wc_wire" );
wc_shootdown = wchan_create( "wc_shootdown" );
if( wc_shootdown == NULL )
panic( "coremap_bootstrap: could not create wc_shootdown" );
wc_transit = wchan_create( "wc_transit" );
if( wc_transit == NULL )
panic( "coremap_bootstrap: wc_transit." );
giant_paging_lock = lock_create( "giant_paging_lock" );
if( giant_paging_lock == NULL )
panic( "vm_bootstrap: could not create giant_paging_lock." );
coremap_initialized = true;
}
static
bool
coremap_is_free( int ix ) {
COREMAP_IS_LOCKED();
return coremap[ix].cme_alloc == 0;
}
static
bool
coremap_is_pageable( int ix ) {
COREMAP_IS_LOCKED();
return
coremap[ix].cme_wired == 0 &&
coremap[ix].cme_kernel == 0;
}
static
int
rank_region_for_paging( int ix, int size ) {
int score;
int i;
score = 0;
for( i = ix; i < ix + size; ++i ) {
if( !coremap_is_pageable( i ) )
return -1;
if( coremap_is_free( i ) )
++score;
}
return score;
}
static
void
coremap_ensure_integrity() {
COREMAP_IS_LOCKED();
KASSERT( cm_stats.cms_total_frames ==
cm_stats.cms_upages + cm_stats.cms_kpages + cm_stats.cms_free );
}
static
int
find_optimal_range( int npages ) {
int best_base;
int best_count;
int curr_count;
uint32_t i;
COREMAP_IS_LOCKED();
best_count = -1;
best_base = -1;
for( i = 0; i < cm_stats.cms_total_frames - npages; ++i ) {
curr_count = rank_region_for_paging( i, npages );
if( curr_count > best_count ) {
best_base = i;
best_count = curr_count;
}
}
return best_base;
}
static
int
find_pageable_without_mapping( void ) {
unsigned i;
COREMAP_IS_LOCKED();
for( i = 0; i < cm_stats.cms_total_frames; ++i )
if( coremap_is_pageable( i ) && coremap[i].cme_tlb_ix == -1 )
return i;
return -1;
}
static
int
find_pageable_page( void ) {
uint32_t i;
uint32_t start;
int res;
COREMAP_IS_LOCKED();
res = find_pageable_without_mapping();
if( res >= 0 )
return res;
start = random() % cm_stats.cms_total_frames;
for( i = start; i < cm_stats.cms_total_frames; ++i )
if( coremap_is_pageable( i ) )
return i;
for( i = 0; i < start; ++i )
if( coremap_is_pageable( i ) )
return i;
return -1;
}
static
void
coremap_evict( int ix_cme ) {
struct vm_page *victim;
struct tlbshootdown tlb_shootdown;
COREMAP_IS_LOCKED();
KASSERT( coremap[ix_cme].cme_page != NULL );
KASSERT( coremap[ix_cme].cme_alloc == 1 );
KASSERT( coremap_is_pageable( ix_cme ) );
KASSERT( lock_do_i_hold( giant_paging_lock ) );
victim = coremap[ix_cme].cme_page;
KASSERT( (victim->vmp_paddr & PAGE_FRAME ) == COREMAP_TO_PADDR( ix_cme ) );
coremap[ix_cme].cme_wired = 1;
if( coremap[ix_cme].cme_tlb_ix != -1 ) {
if( coremap[ix_cme].cme_cpu != curcpu->c_number ) {
tlb_shootdown.ts_tlb_ix = coremap[ix_cme].cme_tlb_ix;
tlb_shootdown.ts_cme_ix = ix_cme;
ipi_tlbshootdown_by_num( coremap[ix_cme].cme_cpu, &tlb_shootdown );
while( coremap[ix_cme].cme_tlb_ix != -1 )
tlb_shootdown_wait();
}
else {
tlb_invalidate( coremap[ix_cme].cme_tlb_ix );
}
}
KASSERT( coremap[ix_cme].cme_wired == 1 );
KASSERT( coremap[ix_cme].cme_tlb_ix == -1 );
KASSERT( coremap[ix_cme].cme_cpu == 0 );
UNLOCK_COREMAP();
vm_page_evict( victim );
LOCK_COREMAP();
KASSERT( coremap[ix_cme].cme_wired == 1 );
KASSERT( coremap[ix_cme].cme_page == victim );
KASSERT( coremap[ix_cme].cme_alloc == 1 );
coremap[ix_cme].cme_wired = 0;
coremap[ix_cme].cme_page = NULL;
coremap[ix_cme].cme_alloc = 0;
wchan_wakeall( wc_wire );
--cm_stats.cms_upages;
++cm_stats.cms_free;
coremap_ensure_integrity();
}
static
int
coremap_page_replace( void ) {
int ix;
KASSERT( lock_do_i_hold( giant_paging_lock ) );
COREMAP_IS_LOCKED();
KASSERT( cm_stats.cms_free == 0 );
ix = find_pageable_page();
if( ix < 0 )
return ix;
KASSERT( coremap_is_pageable( ix ) );
KASSERT( coremap[ix].cme_alloc == 1 );
KASSERT( coremap[ix].cme_page != NULL );
coremap_evict( ix );
return ix;
}
static
void
coremap_wire_wait( ) {
KASSERT( curthread->t_vmp_count == 0 );
wchan_lock( wc_wire );
UNLOCK_COREMAP();
wchan_sleep( wc_wire );
LOCK_COREMAP();
}
static
paddr_t
coremap_alloc_single( struct vm_page *vmp, bool wired ) {
int ix;
int i;
LOCK_PAGING_IF_POSSIBLE();
LOCK_COREMAP();
ix = -1;
if( cm_stats.cms_free > 0 ) {
for( i = cm_stats.cms_total_frames-1; i >= 0; --i ) {
if( coremap_is_free( i ) ) {
ix = i;
break;
}
}
KASSERT( ix >= 0 );
}
if( ix < 0 && curthread != NULL && !curthread->t_in_interrupt )
ix = coremap_page_replace();
if( ix < 0 ) {
UNLOCK_COREMAP();
if( lock_do_i_hold( giant_paging_lock ) )
UNLOCK_PAGING_GIANT();
return INVALID_PADDR;
}
mark_pages_as_allocated( ix, 1, wired, ( vmp == NULL ) );
KASSERT( coremap[ix].cme_page == NULL );
coremap[ix].cme_page = vmp;
UNLOCK_COREMAP();
UNLOCK_PAGING_IF_POSSIBLE();
return COREMAP_TO_PADDR( ix );
}
paddr_t
coremap_alloc( struct vm_page *vmp, bool wired ) {
return coremap_alloc_single( vmp, wired );
}
void
coremap_clone( paddr_t source, paddr_t target ) {
vaddr_t vsource;
vaddr_t vtarget;
KASSERT( (source & PAGE_FRAME) == source );
KASSERT( (target & PAGE_FRAME) == target );
KASSERT( source != INVALID_PADDR );
KASSERT( target != INVALID_PADDR );
KASSERT( coremap_is_wired( source ) );
KASSERT( coremap_is_wired( target ) );
vsource = PADDR_TO_KVADDR( source );
vtarget = PADDR_TO_KVADDR( target );
memmove( (void*)vtarget, (const void*)vsource, PAGE_SIZE );
}
void
mark_pages_as_allocated( int start, int num, bool wired, bool is_kernel ) {
int i;
COREMAP_IS_LOCKED();
for( i = start; i < start + num; ++i ) {
KASSERT( coremap[i].cme_alloc == 0 );
KASSERT( coremap[i].cme_wired == 0 );
coremap[i].cme_alloc = 1;
coremap[i].cme_wired = ( wired ) ? 1 : 0;
coremap[i].cme_kernel = ( is_kernel ) ? 1 : 0;
}
coremap[i-1].cme_last = 1;
if( is_kernel )
cm_stats.cms_kpages += num;
else
cm_stats.cms_upages += num;
cm_stats.cms_free -= num;
coremap_ensure_integrity();
}
static
paddr_t
coremap_alloc_multipages( int npages ) {
int ix;
int i;
LOCK_PAGING_IF_POSSIBLE();
LOCK_COREMAP();
ix = find_optimal_range( npages );
if( ix < 0 ) {
UNLOCK_COREMAP();
UNLOCK_PAGING_IF_POSSIBLE();
return INVALID_PADDR;
}
for( i = ix; i < ix + npages; ++i ) {
if( coremap[i].cme_alloc ) {
if( curthread != NULL && !curthread->t_in_interrupt )
coremap_evict( i );
else {
UNLOCK_COREMAP();
UNLOCK_PAGING_IF_POSSIBLE();
return INVALID_PADDR;
}
}
}
mark_pages_as_allocated( ix, npages, false, true );
UNLOCK_COREMAP();
UNLOCK_PAGING_IF_POSSIBLE();
return COREMAP_TO_PADDR( ix );
}
static
paddr_t
get_kpages_by_stealing( int npages ) {
paddr_t paddr;
KASSERT( !coremap_initialized );
spinlock_acquire( &slk_steal );
paddr = ram_stealmem( npages );
spinlock_release( &slk_steal );
return paddr;
}
vaddr_t
alloc_kpages( int npages ) {
paddr_t paddr;
vaddr_t vaddr;
if( !coremap_initialized ) {
paddr = get_kpages_by_stealing( npages );
vaddr = PADDR_TO_KVADDR( paddr );
return vaddr;
}
paddr = ( npages > 1 ) ?
coremap_alloc_multipages( npages ) :
coremap_alloc_single( NULL, 0 );
if( paddr == INVALID_PADDR )
return 0;
vaddr = PADDR_TO_KVADDR( paddr );
return vaddr;
}
void
free_kpages( vaddr_t vaddr ) {
coremap_free( KVADDR_TO_PADDR( vaddr ), true );
}
void
coremap_free( paddr_t paddr, bool is_kernel ) {
uint32_t i;
uint32_t ix;
KASSERT( (paddr & PAGE_FRAME) == paddr );
ix = PADDR_TO_COREMAP( paddr );
LOCK_COREMAP();
for( i = ix; i < cm_stats.cms_total_frames; ++i ) {
KASSERT( coremap[i].cme_alloc == 1 );
KASSERT( coremap[i].cme_wired || is_kernel );
if( coremap[i].cme_tlb_ix >= 0 )
tlb_invalidate( coremap[i].cme_tlb_ix );
coremap[i].cme_alloc = 0;
coremap[i].cme_kernel ? --cm_stats.cms_kpages : --cm_stats.cms_upages;
coremap[i].cme_page = NULL;
coremap[i].cme_wired = 0;
wchan_wakeall( wc_wire );
++cm_stats.cms_free;
coremap_ensure_integrity();
if( coremap[i].cme_last ) {
coremap[i].cme_last = 0;
break;
}
}
UNLOCK_COREMAP();
}
void
vm_tlbshootdown( const struct tlbshootdown *ts ) {
int cme_ix;
int tlb_ix;
LOCK_COREMAP();
cme_ix = ts->ts_cme_ix;
tlb_ix = ts->ts_tlb_ix;
if( coremap[cme_ix].cme_cpu == curcpu->c_number && coremap[cme_ix].cme_tlb_ix == tlb_ix )
tlb_invalidate( tlb_ix );
wchan_wakeall( wc_shootdown );
UNLOCK_COREMAP();
}
void
vm_tlbshootdown_all( void ) {
LOCK_COREMAP();
tlb_clear();
wchan_wakeall( wc_shootdown );
UNLOCK_COREMAP();
}
void
coremap_wire( paddr_t paddr ) {
unsigned cix;
cix = PADDR_TO_COREMAP( paddr );
LOCK_COREMAP();
while( coremap[cix].cme_wired != 0 )
coremap_wire_wait();
KASSERT( coremap[cix].cme_wired == 0 );
coremap[cix].cme_wired = 1;
UNLOCK_COREMAP();
}
void
coremap_unwire( paddr_t paddr ) {
unsigned cix;
cix = PADDR_TO_COREMAP( paddr );
LOCK_COREMAP();
KASSERT( coremap[cix].cme_wired == 1 );
coremap[cix].cme_wired = 0;
wchan_wakeall( wc_wire );
UNLOCK_COREMAP();
}
void
coremap_zero( paddr_t paddr ) {
vaddr_t vaddr;
KASSERT( (paddr & PAGE_FRAME) == paddr );
KASSERT( paddr != INVALID_PADDR );
KASSERT( coremap_is_wired( paddr ) );
vaddr = PADDR_TO_KVADDR( paddr );
bzero( (char*)vaddr, PAGE_SIZE );
}
bool
coremap_is_wired( paddr_t paddr ) {
unsigned ix;
KASSERT( ( paddr & PAGE_FRAME ) == paddr );
ix = PADDR_TO_COREMAP( paddr );
return coremap[ix].cme_wired != 0;
}