Path: blob/master/arch/powerpc/kvm/book3s_64_mmu_host.c
26451 views
// SPDX-License-Identifier: GPL-2.0-only1/*2* Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.3*4* Authors:5* Alexander Graf <[email protected]>6* Kevin Wolf <[email protected]>7*/89#include <linux/kvm_host.h>10#include <linux/pkeys.h>1112#include <asm/kvm_ppc.h>13#include <asm/kvm_book3s.h>14#include <asm/book3s/64/mmu-hash.h>15#include <asm/machdep.h>16#include <asm/mmu_context.h>17#include <asm/hw_irq.h>18#include "trace_pr.h"19#include "book3s.h"2021#define PTE_SIZE 122223void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)24{25mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn,26pte->pagesize, pte->pagesize,27MMU_SEGSIZE_256M, false);28}2930/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using31* a hash, so we don't waste cycles on looping */32static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)33{34return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^35((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^36((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^37((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^38((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^39((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^40((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^41((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));42}434445static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)46{47struct kvmppc_sid_map *map;48u16 sid_map_mask;4950if (kvmppc_get_msr(vcpu) & MSR_PR)51gvsid |= VSID_PR;5253sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);54map = &to_book3s(vcpu)->sid_map[sid_map_mask];55if (map->valid && (map->guest_vsid == gvsid)) {56trace_kvm_book3s_slb_found(gvsid, map->host_vsid);57return map;58}5960map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];61if (map->valid && (map->guest_vsid == gvsid)) {62trace_kvm_book3s_slb_found(gvsid, map->host_vsid);63return map;64}6566trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);67return NULL;68}6970int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,71bool iswrite)72{73unsigned long vpn;74kvm_pfn_t hpaddr;75ulong hash, hpteg;76u64 vsid;77int ret;78int rflags = 0x192;79int vflags = 0;80int attempt = 0;81struct kvmppc_sid_map *map;82int r = 0;83int hpsize = MMU_PAGE_4K;84bool writable;85unsigned long mmu_seq;86struct kvm *kvm = vcpu->kvm;87struct hpte_cache *cpte;88unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;89unsigned long pfn;90struct page *page;9192/* used to check for invalidations in progress */93mmu_seq = kvm->mmu_invalidate_seq;94smp_rmb();9596/* Get host physical address for gpa */97pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable, &page);98if (is_error_noslot_pfn(pfn)) {99printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",100orig_pte->raddr);101r = -EINVAL;102goto out;103}104hpaddr = pfn << PAGE_SHIFT;105106/* and write the mapping ea -> hpa into the pt */107vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);108map = find_sid_vsid(vcpu, vsid);109if (!map) {110ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);111WARN_ON(ret < 0);112map = find_sid_vsid(vcpu, vsid);113}114if (!map) {115printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",116vsid, orig_pte->eaddr);117WARN_ON(true);118r = -EINVAL;119goto out;120}121122vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);123124if (!orig_pte->may_write || !writable)125rflags |= PP_RXRX;126else127mark_page_dirty(vcpu->kvm, gfn);128129if (!orig_pte->may_execute)130rflags |= HPTE_R_N;131else132kvmppc_mmu_flush_icache(pfn);133134rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);135rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;136137/*138* Use 64K pages if possible; otherwise, on 64K page kernels,139* we need to transfer 4 more bits from guest real to host real addr.140*/141if (vsid & VSID_64K)142hpsize = MMU_PAGE_64K;143else144hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);145146hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);147148cpte = kvmppc_mmu_hpte_cache_next(vcpu);149150spin_lock(&kvm->mmu_lock);151if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {152r = -EAGAIN;153goto out_unlock;154}155156map_again:157hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);158159/* In case we tried normal mapping already, let's nuke old entries */160if (attempt > 1)161if (mmu_hash_ops.hpte_remove(hpteg) < 0) {162r = -1;163goto out_unlock;164}165166ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,167hpsize, hpsize, MMU_SEGSIZE_256M);168169if (ret == -1) {170/* If we couldn't map a primary PTE, try a secondary */171hash = ~hash;172vflags ^= HPTE_V_SECONDARY;173attempt++;174goto map_again;175} else if (ret < 0) {176r = -EIO;177goto out_unlock;178} else {179trace_kvm_book3s_64_mmu_map(rflags, hpteg,180vpn, hpaddr, orig_pte);181182/*183* The mmu_hash_ops code may give us a secondary entry even184* though we asked for a primary. Fix up.185*/186if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {187hash = ~hash;188hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);189}190191cpte->slot = hpteg + (ret & 7);192cpte->host_vpn = vpn;193cpte->pte = *orig_pte;194cpte->pfn = pfn;195cpte->pagesize = hpsize;196197kvmppc_mmu_hpte_cache_map(vcpu, cpte);198cpte = NULL;199}200201out_unlock:202/* FIXME: Don't unconditionally pass unused=false. */203kvm_release_faultin_page(kvm, page, false,204orig_pte->may_write && writable);205spin_unlock(&kvm->mmu_lock);206if (cpte)207kvmppc_mmu_hpte_cache_free(cpte);208209out:210return r;211}212213void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)214{215u64 mask = 0xfffffffffULL;216u64 vsid;217218vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);219if (vsid & VSID_64K)220mask = 0xffffffff0ULL;221kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);222}223224static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)225{226unsigned long vsid_bits = VSID_BITS_65_256M;227struct kvmppc_sid_map *map;228struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);229u16 sid_map_mask;230static int backwards_map;231232if (kvmppc_get_msr(vcpu) & MSR_PR)233gvsid |= VSID_PR;234235/* We might get collisions that trap in preceding order, so let's236map them differently */237238sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);239if (backwards_map)240sid_map_mask = SID_MAP_MASK - sid_map_mask;241242map = &to_book3s(vcpu)->sid_map[sid_map_mask];243244/* Make sure we're taking the other map next time */245backwards_map = !backwards_map;246247/* Uh-oh ... out of mappings. Let's flush! */248if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {249vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;250memset(vcpu_book3s->sid_map, 0,251sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);252kvmppc_mmu_pte_flush(vcpu, 0, 0);253kvmppc_mmu_flush_segments(vcpu);254}255256if (mmu_has_feature(MMU_FTR_68_BIT_VA))257vsid_bits = VSID_BITS_256M;258259map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++,260VSID_MULTIPLIER_256M, vsid_bits);261262map->guest_vsid = gvsid;263map->valid = true;264265trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);266267return map;268}269270static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)271{272struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);273int i;274int max_slb_size = 64;275int found_inval = -1;276int r;277278/* Are we overwriting? */279for (i = 0; i < svcpu->slb_max; i++) {280if (!(svcpu->slb[i].esid & SLB_ESID_V))281found_inval = i;282else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {283r = i;284goto out;285}286}287288/* Found a spare entry that was invalidated before */289if (found_inval >= 0) {290r = found_inval;291goto out;292}293294/* No spare invalid entry, so create one */295296if (mmu_slb_size < 64)297max_slb_size = mmu_slb_size;298299/* Overflowing -> purge */300if ((svcpu->slb_max) == max_slb_size)301kvmppc_mmu_flush_segments(vcpu);302303r = svcpu->slb_max;304svcpu->slb_max++;305306out:307svcpu_put(svcpu);308return r;309}310311int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)312{313struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);314u64 esid = eaddr >> SID_SHIFT;315u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;316u64 slb_vsid = SLB_VSID_USER;317u64 gvsid;318int slb_index;319struct kvmppc_sid_map *map;320int r = 0;321322slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);323324if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {325/* Invalidate an entry */326svcpu->slb[slb_index].esid = 0;327r = -ENOENT;328goto out;329}330331map = find_sid_vsid(vcpu, gvsid);332if (!map)333map = create_sid_map(vcpu, gvsid);334335map->guest_esid = esid;336337slb_vsid |= (map->host_vsid << 12);338slb_vsid &= ~SLB_VSID_KP;339slb_esid |= slb_index;340341#ifdef CONFIG_PPC_64K_PAGES342/* Set host segment base page size to 64K if possible */343if (gvsid & VSID_64K)344slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;345#endif346347svcpu->slb[slb_index].esid = slb_esid;348svcpu->slb[slb_index].vsid = slb_vsid;349350trace_kvm_book3s_slbmte(slb_vsid, slb_esid);351352out:353svcpu_put(svcpu);354return r;355}356357void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)358{359struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);360ulong seg_mask = -seg_size;361int i;362363for (i = 0; i < svcpu->slb_max; i++) {364if ((svcpu->slb[i].esid & SLB_ESID_V) &&365(svcpu->slb[i].esid & seg_mask) == ea) {366/* Invalidate this entry */367svcpu->slb[i].esid = 0;368}369}370371svcpu_put(svcpu);372}373374void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)375{376struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);377svcpu->slb_max = 0;378svcpu->slb[0].esid = 0;379svcpu_put(svcpu);380}381382void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)383{384kvmppc_mmu_hpte_destroy(vcpu);385__destroy_context(to_book3s(vcpu)->context_id[0]);386}387388int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)389{390struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);391int err;392393err = hash__alloc_context_id();394if (err < 0)395return -1;396vcpu3s->context_id[0] = err;397398vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)399<< ESID_BITS) - 1;400vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;401vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;402403kvmppc_mmu_hpte_init(vcpu);404405return 0;406}407408409