#ifndef ARCH_KVM_S390_GMAP_H
#define ARCH_KVM_S390_GMAP_H
#include "dat.h"
enum gmap_flags {
GMAP_FLAG_SHADOW = 0,
GMAP_FLAG_OWNS_PAGETABLES,
GMAP_FLAG_IS_UCONTROL,
GMAP_FLAG_ALLOW_HPAGE_1M,
GMAP_FLAG_ALLOW_HPAGE_2G,
GMAP_FLAG_PFAULT_ENABLED,
GMAP_FLAG_USES_SKEYS,
GMAP_FLAG_USES_CMM,
GMAP_FLAG_EXPORT_ON_UNMAP,
};
struct gmap {
unsigned long flags;
unsigned char edat_level;
struct kvm *kvm;
union asce asce;
struct list_head list;
spinlock_t children_lock;
struct list_head children;
struct list_head scb_users;
struct gmap *parent;
union asce guest_asce;
spinlock_t host_to_rmap_lock;
struct radix_tree_root host_to_rmap;
refcount_t refcount;
};
struct gmap_cache {
struct list_head list;
struct gmap *gmap;
};
#define gmap_for_each_rmap_safe(pos, n, head) \
for (pos = (head); n = pos ? pos->next : NULL, pos; pos = n)
int s390_replace_asce(struct gmap *gmap);
bool _gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end, bool hint);
bool gmap_age_gfn(struct gmap *gmap, gfn_t start, gfn_t end);
bool gmap_unmap_gfn_range(struct gmap *gmap, struct kvm_memory_slot *slot, gfn_t start, gfn_t end);
int gmap_try_fixup_minor(struct gmap *gmap, struct guest_fault *fault);
struct gmap *gmap_new(struct kvm *kvm, gfn_t limit);
struct gmap *gmap_new_child(struct gmap *parent, gfn_t limit);
void gmap_remove_child(struct gmap *child);
void gmap_dispose(struct gmap *gmap);
int gmap_link(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, struct guest_fault *fault);
void gmap_sync_dirty_log(struct gmap *gmap, gfn_t start, gfn_t end);
int gmap_set_limit(struct gmap *gmap, gfn_t limit);
int gmap_ucas_translate(struct kvm_s390_mmu_cache *mc, struct gmap *gmap, gpa_t *gaddr);
int gmap_ucas_map(struct gmap *gmap, gfn_t p_gfn, gfn_t c_gfn, unsigned long count);
void gmap_ucas_unmap(struct gmap *gmap, gfn_t c_gfn, unsigned long count);
int gmap_enable_skeys(struct gmap *gmap);
int gmap_pv_destroy_range(struct gmap *gmap, gfn_t start, gfn_t end, bool interruptible);
int gmap_insert_rmap(struct gmap *sg, gfn_t p_gfn, gfn_t r_gfn, int level);
int gmap_protect_rmap(struct kvm_s390_mmu_cache *mc, struct gmap *sg, gfn_t p_gfn, gfn_t r_gfn,
kvm_pfn_t pfn, int level, bool wr);
void gmap_set_cmma_all_dirty(struct gmap *gmap);
void _gmap_handle_vsie_unshadow_event(struct gmap *parent, gfn_t gfn);
struct gmap *gmap_create_shadow(struct kvm_s390_mmu_cache *mc, struct gmap *gmap,
union asce asce, int edat_level);
void gmap_split_huge_pages(struct gmap *gmap);
static inline bool uses_skeys(struct gmap *gmap)
{
return test_bit(GMAP_FLAG_USES_SKEYS, &gmap->flags);
}
static inline bool uses_cmm(struct gmap *gmap)
{
return test_bit(GMAP_FLAG_USES_CMM, &gmap->flags);
}
static inline bool pfault_enabled(struct gmap *gmap)
{
return test_bit(GMAP_FLAG_PFAULT_ENABLED, &gmap->flags);
}
static inline bool is_ucontrol(struct gmap *gmap)
{
return test_bit(GMAP_FLAG_IS_UCONTROL, &gmap->flags);
}
static inline bool is_shadow(struct gmap *gmap)
{
return test_bit(GMAP_FLAG_SHADOW, &gmap->flags);
}
static inline bool owns_page_tables(struct gmap *gmap)
{
return test_bit(GMAP_FLAG_OWNS_PAGETABLES, &gmap->flags);
}
static inline struct gmap *gmap_put(struct gmap *gmap)
{
if (refcount_dec_and_test(&gmap->refcount))
gmap_dispose(gmap);
return NULL;
}
static inline void gmap_get(struct gmap *gmap)
{
WARN_ON_ONCE(unlikely(!refcount_inc_not_zero(&gmap->refcount)));
}
static inline void gmap_handle_vsie_unshadow_event(struct gmap *parent, gfn_t gfn)
{
scoped_guard(spinlock, &parent->children_lock)
_gmap_handle_vsie_unshadow_event(parent, gfn);
}
static inline bool gmap_mkold_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end)
{
return _gmap_unmap_prefix(gmap, gfn, end, true);
}
static inline bool gmap_unmap_prefix(struct gmap *gmap, gfn_t gfn, gfn_t end)
{
return _gmap_unmap_prefix(gmap, gfn, end, false);
}
static inline union pgste _gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, union pte newpte,
union pgste pgste, gfn_t gfn, bool needs_lock)
{
lockdep_assert_held(&gmap->kvm->mmu_lock);
if (!needs_lock)
lockdep_assert_held(&gmap->children_lock);
else
lockdep_assert_not_held(&gmap->children_lock);
if (pgste.prefix_notif && (newpte.h.p || newpte.h.i)) {
pgste.prefix_notif = 0;
gmap_unmap_prefix(gmap, gfn, gfn + 1);
}
if (pgste.vsie_notif && (ptep->h.p != newpte.h.p || newpte.h.i)) {
pgste.vsie_notif = 0;
if (needs_lock)
gmap_handle_vsie_unshadow_event(gmap, gfn);
else
_gmap_handle_vsie_unshadow_event(gmap, gfn);
}
if (!ptep->s.d && newpte.s.d && !newpte.s.s)
SetPageDirty(pfn_to_page(newpte.h.pfra));
return __dat_ptep_xchg(ptep, pgste, newpte, gfn, gmap->asce, uses_skeys(gmap));
}
static inline union pgste gmap_ptep_xchg(struct gmap *gmap, union pte *ptep, union pte newpte,
union pgste pgste, gfn_t gfn)
{
return _gmap_ptep_xchg(gmap, ptep, newpte, pgste, gfn, true);
}
static inline bool __must_check _gmap_crstep_xchg_atomic(struct gmap *gmap, union crste *crstep,
union crste oldcrste, union crste newcrste,
gfn_t gfn, bool needs_lock)
{
unsigned long align = is_pmd(newcrste) ? _PAGE_ENTRIES : _PAGE_ENTRIES * _CRST_ENTRIES;
if (KVM_BUG_ON(crstep->h.tt != oldcrste.h.tt || newcrste.h.tt != oldcrste.h.tt, gmap->kvm))
return true;
lockdep_assert_held(&gmap->kvm->mmu_lock);
if (!needs_lock)
lockdep_assert_held(&gmap->children_lock);
gfn = ALIGN_DOWN(gfn, align);
if (crste_prefix(oldcrste) && (newcrste.h.p || newcrste.h.i || !crste_prefix(newcrste))) {
newcrste.s.fc1.prefix_notif = 0;
gmap_unmap_prefix(gmap, gfn, gfn + align);
}
if (crste_leaf(oldcrste) && oldcrste.s.fc1.vsie_notif &&
(newcrste.h.p || newcrste.h.i || !newcrste.s.fc1.vsie_notif)) {
newcrste.s.fc1.vsie_notif = 0;
if (needs_lock)
gmap_handle_vsie_unshadow_event(gmap, gfn);
else
_gmap_handle_vsie_unshadow_event(gmap, gfn);
}
if (!oldcrste.s.fc1.d && newcrste.s.fc1.d && !newcrste.s.fc1.s)
SetPageDirty(phys_to_page(crste_origin_large(newcrste)));
return dat_crstep_xchg_atomic(crstep, oldcrste, newcrste, gfn, gmap->asce);
}
static inline bool __must_check gmap_crstep_xchg_atomic(struct gmap *gmap, union crste *crstep,
union crste oldcrste, union crste newcrste,
gfn_t gfn)
{
return _gmap_crstep_xchg_atomic(gmap, crstep, oldcrste, newcrste, gfn, true);
}
static inline bool gmap_is_shadow_valid(struct gmap *sg, union asce asce, int edat_level)
{
return sg->guest_asce.val == asce.val && sg->edat_level == edat_level;
}
#endif