#include <linux/highmem.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#if defined(__PAGETABLE_PMD_FOLDED)
#define KVM_MMU_CACHE_MIN_PAGES 1
#else
#define KVM_MMU_CACHE_MIN_PAGES 2
#endif
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}
static void kvm_pgd_init(void *page)
{
unsigned long *p, *end;
unsigned long entry;
#ifdef __PAGETABLE_PMD_FOLDED
entry = (unsigned long)invalid_pte_table;
#else
entry = (unsigned long)invalid_pmd_table;
#endif
p = (unsigned long *)page;
end = p + PTRS_PER_PGD;
do {
p[0] = entry;
p[1] = entry;
p[2] = entry;
p[3] = entry;
p[4] = entry;
p += 8;
p[-3] = entry;
p[-2] = entry;
p[-1] = entry;
} while (p != end);
}
pgd_t *kvm_pgd_alloc(void)
{
pgd_t *ret;
ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER);
if (ret)
kvm_pgd_init(ret);
return ret;
}
static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
unsigned long addr)
{
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pgd += pgd_index(addr);
if (pgd_none(*pgd)) {
BUG();
return NULL;
}
p4d = p4d_offset(pgd, addr);
pud = pud_offset(p4d, addr);
if (pud_none(*pud)) {
pmd_t *new_pmd;
if (!cache)
return NULL;
new_pmd = kvm_mmu_memory_cache_alloc(cache);
pmd_init(new_pmd);
pud_populate(NULL, pud, new_pmd);
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
pte_t *new_pte;
if (!cache)
return NULL;
new_pte = kvm_mmu_memory_cache_alloc(cache);
clear_page(new_pte);
pmd_populate_kernel(NULL, pmd, new_pte);
}
return pte_offset_kernel(pmd, addr);
}
static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
struct kvm_mmu_memory_cache *cache,
unsigned long addr)
{
return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
}
static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
unsigned long end_gpa)
{
int i_min = pte_index(start_gpa);
int i_max = pte_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
int i;
for (i = i_min; i <= i_max; ++i) {
if (!pte_present(pte[i]))
continue;
set_pte(pte + i, __pte(0));
}
return safe_to_remove;
}
static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
unsigned long end_gpa)
{
pte_t *pte;
unsigned long end = ~0ul;
int i_min = pmd_index(start_gpa);
int i_max = pmd_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
int i;
for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
if (!pmd_present(pmd[i]))
continue;
pte = pte_offset_kernel(pmd + i, 0);
if (i == i_max)
end = end_gpa;
if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
pmd_clear(pmd + i);
pte_free_kernel(NULL, pte);
} else {
safe_to_remove = false;
}
}
return safe_to_remove;
}
static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
unsigned long end_gpa)
{
pmd_t *pmd;
unsigned long end = ~0ul;
int i_min = pud_index(start_gpa);
int i_max = pud_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
int i;
for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
if (!pud_present(pud[i]))
continue;
pmd = pmd_offset(pud + i, 0);
if (i == i_max)
end = end_gpa;
if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) {
pud_clear(pud + i);
pmd_free(NULL, pmd);
} else {
safe_to_remove = false;
}
}
return safe_to_remove;
}
static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
unsigned long end_gpa)
{
p4d_t *p4d;
pud_t *pud;
unsigned long end = ~0ul;
int i_min = pgd_index(start_gpa);
int i_max = pgd_index(end_gpa);
bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
int i;
for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
if (!pgd_present(pgd[i]))
continue;
p4d = p4d_offset(pgd, 0);
pud = pud_offset(p4d + i, 0);
if (i == i_max)
end = end_gpa;
if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
pgd_clear(pgd + i);
pud_free(NULL, pud);
} else {
safe_to_remove = false;
}
}
return safe_to_remove;
}
bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
{
return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
start_gfn << PAGE_SHIFT,
end_gfn << PAGE_SHIFT);
}
#define BUILD_PTE_RANGE_OP(name, op) \
static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
int i_min = pte_index(start); \
int i_max = pte_index(end); \
int i; \
pte_t old, new; \
\
for (i = i_min; i <= i_max; ++i) { \
if (!pte_present(pte[i])) \
continue; \
\
old = pte[i]; \
new = op(old); \
if (pte_val(new) == pte_val(old)) \
continue; \
set_pte(pte + i, new); \
ret = 1; \
} \
return ret; \
} \
\
\
static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
pte_t *pte; \
unsigned long cur_end = ~0ul; \
int i_min = pmd_index(start); \
int i_max = pmd_index(end); \
int i; \
\
for (i = i_min; i <= i_max; ++i, start = 0) { \
if (!pmd_present(pmd[i])) \
continue; \
\
pte = pte_offset_kernel(pmd + i, 0); \
if (i == i_max) \
cur_end = end; \
\
ret |= kvm_mips_##name##_pte(pte, start, cur_end); \
} \
return ret; \
} \
\
static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
pmd_t *pmd; \
unsigned long cur_end = ~0ul; \
int i_min = pud_index(start); \
int i_max = pud_index(end); \
int i; \
\
for (i = i_min; i <= i_max; ++i, start = 0) { \
if (!pud_present(pud[i])) \
continue; \
\
pmd = pmd_offset(pud + i, 0); \
if (i == i_max) \
cur_end = end; \
\
ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \
} \
return ret; \
} \
\
static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \
unsigned long end) \
{ \
int ret = 0; \
p4d_t *p4d; \
pud_t *pud; \
unsigned long cur_end = ~0ul; \
int i_min = pgd_index(start); \
int i_max = pgd_index(end); \
int i; \
\
for (i = i_min; i <= i_max; ++i, start = 0) { \
if (!pgd_present(pgd[i])) \
continue; \
\
p4d = p4d_offset(pgd, 0); \
pud = pud_offset(p4d + i, 0); \
if (i == i_max) \
cur_end = end; \
\
ret |= kvm_mips_##name##_pud(pud, start, cur_end); \
} \
return ret; \
}
BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
{
return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
start_gfn << PAGE_SHIFT,
end_gfn << PAGE_SHIFT);
}
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
gfn_t base_gfn = slot->base_gfn + gfn_offset;
gfn_t start = base_gfn + __ffs(mask);
gfn_t end = base_gfn + __fls(mask);
kvm_mips_mkclean_gpa_pt(kvm, start, end);
}
BUILD_PTE_RANGE_OP(mkold, pte_mkold)
static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
gfn_t end_gfn)
{
return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
start_gfn << PAGE_SHIFT,
end_gfn << PAGE_SHIFT);
}
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
kvm_mips_flush_gpa_pt(kvm, range->start, range->end);
return true;
}
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end);
}
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
gpa_t gpa = range->start << PAGE_SHIFT;
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
if (!gpa_pte)
return false;
return pte_young(*gpa_pte);
}
static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
bool write_fault,
pte_t *out_entry, pte_t *out_buddy)
{
struct kvm *kvm = vcpu->kvm;
gfn_t gfn = gpa >> PAGE_SHIFT;
pte_t *ptep;
int ret = 0;
spin_lock(&kvm->mmu_lock);
ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
if (!ptep || !pte_present(*ptep)) {
ret = -EFAULT;
goto out;
}
if (!pte_young(*ptep))
set_pte(ptep, pte_mkyoung(*ptep));
if (write_fault && !pte_dirty(*ptep)) {
if (!pte_write(*ptep)) {
ret = -EFAULT;
goto out;
}
set_pte(ptep, pte_mkdirty(*ptep));
mark_page_dirty(kvm, gfn);
}
if (out_entry)
*out_entry = *ptep;
if (out_buddy)
*out_buddy = *ptep_buddy(ptep);
out:
spin_unlock(&kvm->mmu_lock);
return ret;
}
static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
bool write_fault,
pte_t *out_entry, pte_t *out_buddy)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
gfn_t gfn = gpa >> PAGE_SHIFT;
int srcu_idx, err;
kvm_pfn_t pfn;
pte_t *ptep, entry;
bool writeable;
unsigned long prot_bits;
unsigned long mmu_seq;
struct page *page;
srcu_idx = srcu_read_lock(&kvm->srcu);
err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
out_buddy);
if (!err)
goto out;
err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
if (err)
goto out;
retry:
mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writeable, &page);
if (is_error_noslot_pfn(pfn)) {
err = -EFAULT;
goto out;
}
spin_lock(&kvm->mmu_lock);
if (mmu_invalidate_retry(kvm, mmu_seq)) {
spin_unlock(&kvm->mmu_lock);
kvm_release_page_unused(page);
goto retry;
}
ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default;
if (writeable) {
prot_bits |= _PAGE_WRITE;
if (write_fault) {
prot_bits |= __WRITEABLE;
mark_page_dirty(kvm, gfn);
}
}
entry = pfn_pte(pfn, __pgprot(prot_bits));
set_pte(ptep, entry);
err = 0;
if (out_entry)
*out_entry = *ptep;
if (out_buddy)
*out_buddy = *ptep_buddy(ptep);
kvm_release_faultin_page(kvm, page, false, writeable);
spin_unlock(&kvm->mmu_lock);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return err;
}
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu,
bool write_fault)
{
int ret;
ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
if (ret)
return ret;
return kvm_vz_host_tlb_inv(vcpu, badvaddr);
}
static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
{
if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
hrtimer_restart(&vcpu->arch.comparecount_timer);
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
unsigned long flags;
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
local_irq_save(flags);
vcpu->cpu = cpu;
if (vcpu->arch.last_sched_cpu != cpu) {
kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
kvm_mips_migrate_count(vcpu);
}
kvm_mips_callbacks->vcpu_load(vcpu, cpu);
local_irq_restore(flags);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
unsigned long flags;
int cpu;
local_irq_save(flags);
cpu = smp_processor_id();
vcpu->arch.last_sched_cpu = cpu;
vcpu->cpu = -1;
kvm_mips_callbacks->vcpu_put(vcpu, cpu);
local_irq_restore(flags);
}