#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/uma.h>
#include <vm/vm.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <machine/md_var.h>
#include <machine/platform.h>
#include <machine/vmparam.h>
#include <machine/trap.h>
#include "mmu_oea64.h"
uintptr_t moea64_get_unique_vsid(void);
void moea64_release_vsid(uint64_t vsid);
static void slb_zone_init(void *);
static uma_zone_t slbt_zone;
static uma_zone_t slb_cache_zone;
int n_slbs = 64;
SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL);
struct slbtnode {
uint16_t ua_alloc;
uint8_t ua_level;
uint64_t ua_base;
union {
struct slbtnode *ua_child[16];
struct slb slb_entries[16];
} u;
};
#define UAD_ROOT_LEVEL 8
#define UAD_LEAF_LEVEL 0
static inline int
esid2idx(uint64_t esid, int level)
{
int shift;
shift = level * 4;
return ((esid >> shift) & 0xF);
}
#define uad_baseok(ua) \
(esid2base(ua->ua_base, ua->ua_level) == ua->ua_base)
static inline uint64_t
esid2base(uint64_t esid, int level)
{
uint64_t mask;
int shift;
shift = (level + 1) * 4;
mask = ~((1ULL << shift) - 1);
return (esid & mask);
}
static struct slb *
make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent)
{
struct slbtnode *child;
struct slb *retval;
int idx;
idx = esid2idx(esid, parent->ua_level);
KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!"));
child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
KASSERT(child != NULL, ("unhandled NULL case"));
child->ua_level = UAD_LEAF_LEVEL;
child->ua_base = esid2base(esid, child->ua_level);
idx = esid2idx(esid, child->ua_level);
child->u.slb_entries[idx].slbv = slbv;
child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
setbit(&child->ua_alloc, idx);
retval = &child->u.slb_entries[idx];
powerpc_lwsync();
idx = esid2idx(esid, parent->ua_level);
parent->u.ua_child[idx] = child;
setbit(&parent->ua_alloc, idx);
return (retval);
}
static struct slbtnode*
make_intermediate(uint64_t esid, struct slbtnode *parent)
{
struct slbtnode *child, *inter;
int idx, level;
idx = esid2idx(esid, parent->ua_level);
child = parent->u.ua_child[idx];
KASSERT(esid2base(esid, child->ua_level) != child->ua_base,
("No need for an intermediate node?"));
level = child->ua_level + 1;
while (esid2base(esid, level) !=
esid2base(child->ua_base, level))
level++;
KASSERT(level < parent->ua_level,
("Found splitting level %d for %09jx and %09jx, "
"but it's the same as %p's",
level, esid, child->ua_base, parent));
inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
KASSERT(inter != NULL, ("unhandled NULL case"));
inter->ua_level = level;
inter->ua_base = esid2base(esid, inter->ua_level);
idx = esid2idx(child->ua_base, inter->ua_level);
inter->u.ua_child[idx] = child;
setbit(&inter->ua_alloc, idx);
powerpc_lwsync();
idx = esid2idx(inter->ua_base, parent->ua_level);
parent->u.ua_child[idx] = inter;
setbit(&parent->ua_alloc, idx);
return (inter);
}
uint64_t
kernel_va_to_slbv(vm_offset_t va)
{
uint64_t slbv;
slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT;
if (hw_direct_map && va > DMAP_BASE_ADDRESS && va < DMAP_MAX_ADDRESS) {
if (mem_valid(DMAP_TO_PHYS(va), 0) == 0)
slbv |= SLBV_L;
} else if (moea64_large_page_size != 0 &&
va >= (vm_offset_t)vm_page_array &&
va <= (uintptr_t)(&vm_page_array[vm_page_array_size]))
slbv |= SLBV_L;
return (slbv);
}
struct slb *
user_va_to_slb_entry(pmap_t pm, vm_offset_t va)
{
uint64_t esid = va >> ADDR_SR_SHFT;
struct slbtnode *ua;
int idx;
ua = pm->pm_slb_tree_root;
for (;;) {
KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!",
ua->ua_base, ua->ua_level));
idx = esid2idx(esid, ua->ua_level);
if (ua->ua_level == UAD_LEAF_LEVEL)
return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ?
&ua->u.slb_entries[idx] : NULL);
ua = ua->u.ua_child[idx];
if (ua == NULL ||
esid2base(esid, ua->ua_level) != ua->ua_base)
return (NULL);
}
return (NULL);
}
uint64_t
va_to_vsid(pmap_t pm, vm_offset_t va)
{
struct slb *entry;
if (pm == kernel_pmap)
return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT));
entry = user_va_to_slb_entry(pm, va);
if (entry == NULL)
return (allocate_user_vsid(pm,
(uintptr_t)va >> ADDR_SR_SHFT, 0));
return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT);
}
uint64_t
allocate_user_vsid(pmap_t pm, uint64_t esid, int large)
{
uint64_t vsid, slbv;
struct slbtnode *ua, *next, *inter;
struct slb *slb;
int idx;
KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID"));
PMAP_LOCK_ASSERT(pm, MA_OWNED);
vsid = moea64_get_unique_vsid();
slbv = vsid << SLBV_VSID_SHIFT;
if (large)
slbv |= SLBV_L;
ua = pm->pm_slb_tree_root;
for (;;) {
KASSERT(uad_baseok(ua),
("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
idx = esid2idx(esid, ua->ua_level);
if (ua->ua_level == UAD_LEAF_LEVEL) {
ua->u.slb_entries[idx].slbv = slbv;
eieio();
ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT)
| SLBE_VALID;
setbit(&ua->ua_alloc, idx);
slb = &ua->u.slb_entries[idx];
break;
}
next = ua->u.ua_child[idx];
if (next == NULL) {
slb = make_new_leaf(esid, slbv, ua);
break;
}
if (esid2base(esid, next->ua_level) != next->ua_base) {
inter = make_intermediate(esid, ua);
slb = make_new_leaf(esid, slbv, inter);
break;
}
ua = next;
}
eieio();
slb_insert_user(pm, slb);
return (vsid);
}
void
free_vsid(pmap_t pm, uint64_t esid, int large)
{
struct slbtnode *ua;
int idx;
PMAP_LOCK_ASSERT(pm, MA_OWNED);
ua = pm->pm_slb_tree_root;
for (;;) {
KASSERT(uad_baseok(ua),
("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level));
idx = esid2idx(esid, ua->ua_level);
if (ua->ua_level == UAD_LEAF_LEVEL) {
ua->u.slb_entries[idx].slbv = 0;
eieio();
ua->u.slb_entries[idx].slbe = 0;
clrbit(&ua->ua_alloc, idx);
return;
}
ua = ua->u.ua_child[idx];
if (ua == NULL ||
esid2base(esid, ua->ua_level) != ua->ua_base) {
KASSERT(0,
("Asked to remove an entry that was never inserted!"));
return;
}
}
}
static void
free_slb_tree_node(struct slbtnode *ua)
{
int idx;
for (idx = 0; idx < 16; idx++) {
if (ua->ua_level != UAD_LEAF_LEVEL) {
if (ua->u.ua_child[idx] != NULL)
free_slb_tree_node(ua->u.ua_child[idx]);
} else {
if (ua->u.slb_entries[idx].slbv != 0)
moea64_release_vsid(ua->u.slb_entries[idx].slbv
>> SLBV_VSID_SHIFT);
}
}
uma_zfree(slbt_zone, ua);
}
void
slb_free_tree(pmap_t pm)
{
free_slb_tree_node(pm->pm_slb_tree_root);
}
struct slbtnode *
slb_alloc_tree(void)
{
struct slbtnode *root;
root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO);
KASSERT(root != NULL, ("unhandled NULL case"));
root->ua_level = UAD_ROOT_LEVEL;
return (root);
}
void
slb_insert_kernel(uint64_t slbe, uint64_t slbv)
{
struct slb *slbcache;
int i;
critical_enter();
slbcache = PCPU_GET(aim.slb);
if (slbcache[USER_SLB_SLOT].slbe == 0) {
for (i = 0; i < n_slbs; i++) {
if (i == USER_SLB_SLOT)
continue;
if (!(slbcache[i].slbe & SLBE_VALID))
goto fillkernslb;
}
if (i == n_slbs)
slbcache[USER_SLB_SLOT].slbe = 1;
}
i = mftb() % n_slbs;
if (i == USER_SLB_SLOT)
i = (i+1) % n_slbs;
fillkernslb:
KASSERT(i != USER_SLB_SLOT,
("Filling user SLB slot with a kernel mapping"));
slbcache[i].slbv = slbv;
slbcache[i].slbe = slbe | (uint64_t)i;
if (pmap_bootstrapped) {
__asm __volatile ("slbmte %0, %1" ::
"r"(slbcache[i].slbv), "r"(slbcache[i].slbe));
}
critical_exit();
}
void
slb_insert_user(pmap_t pm, struct slb *slb)
{
int i;
PMAP_LOCK_ASSERT(pm, MA_OWNED);
if (pm->pm_slb_len < n_slbs) {
i = pm->pm_slb_len;
pm->pm_slb_len++;
} else {
i = mftb() % n_slbs;
}
pm->pm_slb[i] = slb;
}
static void *
slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
u_int8_t *flags, int wait)
{
static vm_offset_t realmax = 0;
void *va;
vm_page_t m;
if (realmax == 0)
realmax = platform_real_maxaddr();
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc_noobj_contig_domain(domain, malloc2vm_flags(wait) |
VM_ALLOC_WIRED, 1, 0, realmax, PAGE_SIZE, PAGE_SIZE,
VM_MEMATTR_DEFAULT);
if (m == NULL)
return (NULL);
if (hw_direct_map)
va = (void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
else {
va = (void *)(VM_PAGE_TO_PHYS(m) | DMAP_BASE_ADDRESS);
pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
}
return (va);
}
static void
slb_zone_init(void *dummy)
{
slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
UMA_ZONE_CONTIG | UMA_ZONE_VM);
slb_cache_zone = uma_zcreate("SLB cache",
(n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_CONTIG | UMA_ZONE_VM);
if (platform_real_maxaddr() != VM_MAX_ADDRESS) {
uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc);
uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc);
}
}
struct slb **
slb_alloc_user_cache(void)
{
return (uma_zalloc(slb_cache_zone, M_WAITOK | M_ZERO));
}
void
slb_free_user_cache(struct slb **slb)
{
uma_zfree(slb_cache_zone, slb);
}
void
handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
{
struct slb *slbcache;
uint64_t slbe, slbv;
uint64_t esid, addr;
int i;
addr = (type == EXC_ISE) ? srr0 : dar;
slbcache = PCPU_GET(aim.slb);
esid = (uintptr_t)addr >> ADDR_SR_SHFT;
slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
for (i = 0; i < n_slbs; i++)
if (slbcache[i].slbe == (slbe | (uint64_t)i))
return;
slbv = kernel_va_to_slbv(addr);
if (slbcache[USER_SLB_SLOT].slbe == 0) {
for (i = 0; i < n_slbs; i++) {
if (i == USER_SLB_SLOT)
continue;
if (!(slbcache[i].slbe & SLBE_VALID))
goto fillkernslb;
}
if (i == n_slbs)
slbcache[USER_SLB_SLOT].slbe = 1;
}
i = mftb() % n_slbs;
if (i == USER_SLB_SLOT)
i = (i+1) % n_slbs;
fillkernslb:
slbcache[i].slbv = slbv;
slbcache[i].slbe = slbe | (uint64_t)i;
}
int
handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
{
struct slb *user_entry;
uint64_t esid;
int i;
if (pm->pm_slb == NULL)
return (-1);
esid = (uintptr_t)addr >> ADDR_SR_SHFT;
PMAP_LOCK(pm);
user_entry = user_va_to_slb_entry(pm, addr);
if (user_entry == NULL) {
(void)allocate_user_vsid(pm, esid, 0);
} else {
for (i = 0; i < pm->pm_slb_len; i++)
if (pm->pm_slb[i] == user_entry)
break;
if (i == pm->pm_slb_len)
slb_insert_user(pm, user_entry);
}
PMAP_UNLOCK(pm);
return (0);
}