#include <linux/cpu.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <kvm/arm_vgic.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_nested.h>
#include "vgic.h"
#define ICH_LRN(n) (ICH_LR0_EL2 + (n))
#define ICH_AP0RN(n) (ICH_AP0R0_EL2 + (n))
#define ICH_AP1RN(n) (ICH_AP1R0_EL2 + (n))
struct mi_state {
u16 eisr;
u16 elrsr;
bool pend;
};
struct shadow_if {
struct vgic_v3_cpu_if cpuif;
unsigned long lr_map;
};
static DEFINE_PER_CPU(struct shadow_if, shadow_if);
static int lr_map_idx_to_shadow_idx(struct shadow_if *shadow_if, int idx)
{
return hweight16(shadow_if->lr_map & (BIT(idx) - 1));
}
bool vgic_state_is_nested(struct kvm_vcpu *vcpu)
{
u64 xmo;
if (is_nested_ctxt(vcpu)) {
xmo = __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_IMO | HCR_FMO);
WARN_ONCE(xmo && xmo != (HCR_IMO | HCR_FMO),
"Separate virtual IRQ/FIQ settings not supported\n");
return !!xmo;
}
return false;
}
static struct shadow_if *get_shadow_if(void)
{
return this_cpu_ptr(&shadow_if);
}
static bool lr_triggers_eoi(u64 lr)
{
return !(lr & (ICH_LR_STATE | ICH_LR_HW)) && (lr & ICH_LR_EOI);
}
static void vgic_compute_mi_state(struct kvm_vcpu *vcpu, struct mi_state *mi_state)
{
u16 eisr = 0, elrsr = 0;
bool pend = false;
for (int i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
if (lr_triggers_eoi(lr))
eisr |= BIT(i);
if (!(lr & ICH_LR_STATE))
elrsr |= BIT(i);
pend |= (lr & ICH_LR_PENDING_BIT);
}
mi_state->eisr = eisr;
mi_state->elrsr = elrsr;
mi_state->pend = pend;
}
u16 vgic_v3_get_eisr(struct kvm_vcpu *vcpu)
{
struct mi_state mi_state;
vgic_compute_mi_state(vcpu, &mi_state);
return mi_state.eisr;
}
u16 vgic_v3_get_elrsr(struct kvm_vcpu *vcpu)
{
struct mi_state mi_state;
vgic_compute_mi_state(vcpu, &mi_state);
return mi_state.elrsr;
}
u64 vgic_v3_get_misr(struct kvm_vcpu *vcpu)
{
struct mi_state mi_state;
u64 reg = 0, hcr, vmcr;
hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
vmcr = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2);
vgic_compute_mi_state(vcpu, &mi_state);
if (mi_state.eisr)
reg |= ICH_MISR_EL2_EOI;
if (__vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_UIE) {
int used_lrs = kvm_vgic_global_state.nr_lr;
used_lrs -= hweight16(mi_state.elrsr);
reg |= (used_lrs <= 1) ? ICH_MISR_EL2_U : 0;
}
if ((hcr & ICH_HCR_EL2_LRENPIE) && FIELD_GET(ICH_HCR_EL2_EOIcount_MASK, hcr))
reg |= ICH_MISR_EL2_LRENP;
if ((hcr & ICH_HCR_EL2_NPIE) && !mi_state.pend)
reg |= ICH_MISR_EL2_NP;
if ((hcr & ICH_HCR_EL2_VGrp0EIE) && (vmcr & ICH_VMCR_ENG0_MASK))
reg |= ICH_MISR_EL2_VGrp0E;
if ((hcr & ICH_HCR_EL2_VGrp0DIE) && !(vmcr & ICH_VMCR_ENG0_MASK))
reg |= ICH_MISR_EL2_VGrp0D;
if ((hcr & ICH_HCR_EL2_VGrp1EIE) && (vmcr & ICH_VMCR_ENG1_MASK))
reg |= ICH_MISR_EL2_VGrp1E;
if ((hcr & ICH_HCR_EL2_VGrp1DIE) && !(vmcr & ICH_VMCR_ENG1_MASK))
reg |= ICH_MISR_EL2_VGrp1D;
return reg;
}
static u64 translate_lr_pintid(struct kvm_vcpu *vcpu, u64 lr)
{
struct vgic_irq *irq;
if (!(lr & ICH_LR_HW))
return lr;
irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
if (!irq || !irq->hw || irq->intid > VGIC_MAX_SPI)
lr &= ~ICH_LR_HW;
if (irq) {
lr &= ~ICH_LR_PHYS_ID_MASK;
lr |= FIELD_PREP(ICH_LR_PHYS_ID_MASK, (u64)irq->hwintid);
vgic_put_irq(vcpu->kvm, irq);
}
return lr;
}
static void vgic_v3_create_shadow_lr(struct kvm_vcpu *vcpu,
struct vgic_v3_cpu_if *s_cpu_if)
{
struct shadow_if *shadow_if;
shadow_if = container_of(s_cpu_if, struct shadow_if, cpuif);
shadow_if->lr_map = 0;
for (int i = 0; i < kvm_vgic_global_state.nr_lr; i++) {
u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
if (!(lr & ICH_LR_STATE))
continue;
lr = translate_lr_pintid(vcpu, lr);
s_cpu_if->vgic_lr[hweight16(shadow_if->lr_map)] = lr;
shadow_if->lr_map |= BIT(i);
}
s_cpu_if->used_lrs = hweight16(shadow_if->lr_map);
}
void vgic_v3_sync_nested(struct kvm_vcpu *vcpu)
{
struct shadow_if *shadow_if = get_shadow_if();
int i;
for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) {
u64 lr = __vcpu_sys_reg(vcpu, ICH_LRN(i));
struct vgic_irq *irq;
if (!(lr & ICH_LR_HW) || !(lr & ICH_LR_STATE))
continue;
irq = vgic_get_vcpu_irq(vcpu, FIELD_GET(ICH_LR_PHYS_ID_MASK, lr));
if (WARN_ON(!irq))
continue;
lr = __gic_v3_get_lr(lr_map_idx_to_shadow_idx(shadow_if, i));
if (!(lr & ICH_LR_STATE))
irq->active = false;
vgic_put_irq(vcpu->kvm, irq);
}
}
static void vgic_v3_create_shadow_state(struct kvm_vcpu *vcpu,
struct vgic_v3_cpu_if *s_cpu_if)
{
struct vgic_v3_cpu_if *host_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 val = 0;
int i;
if (static_branch_unlikely(&vgic_v3_cpuif_trap))
val = host_if->vgic_hcr & (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
ICH_HCR_EL2_TC | ICH_HCR_EL2_TDIR);
s_cpu_if->vgic_hcr = __vcpu_sys_reg(vcpu, ICH_HCR_EL2) | val;
s_cpu_if->vgic_vmcr = __vcpu_sys_reg(vcpu, ICH_VMCR_EL2);
s_cpu_if->vgic_sre = host_if->vgic_sre;
for (i = 0; i < 4; i++) {
s_cpu_if->vgic_ap0r[i] = __vcpu_sys_reg(vcpu, ICH_AP0RN(i));
s_cpu_if->vgic_ap1r[i] = __vcpu_sys_reg(vcpu, ICH_AP1RN(i));
}
vgic_v3_create_shadow_lr(vcpu, s_cpu_if);
}
void vgic_v3_load_nested(struct kvm_vcpu *vcpu)
{
struct shadow_if *shadow_if = get_shadow_if();
struct vgic_v3_cpu_if *cpu_if = &shadow_if->cpuif;
BUG_ON(!vgic_state_is_nested(vcpu));
vgic_v3_create_shadow_state(vcpu, cpu_if);
__vgic_v3_restore_vmcr_aprs(cpu_if);
__vgic_v3_activate_traps(cpu_if);
__vgic_v3_restore_state(cpu_if);
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = cpu_if->used_lrs;
}
void vgic_v3_put_nested(struct kvm_vcpu *vcpu)
{
struct shadow_if *shadow_if = get_shadow_if();
struct vgic_v3_cpu_if *s_cpu_if = &shadow_if->cpuif;
u64 val;
int i;
__vgic_v3_save_vmcr_aprs(s_cpu_if);
__vgic_v3_deactivate_traps(s_cpu_if);
__vgic_v3_save_state(s_cpu_if);
val = __vcpu_sys_reg(vcpu, ICH_HCR_EL2);
val &= ~ICH_HCR_EL2_EOIcount_MASK;
val |= (s_cpu_if->vgic_hcr & ICH_HCR_EL2_EOIcount_MASK);
__vcpu_assign_sys_reg(vcpu, ICH_HCR_EL2, val);
__vcpu_assign_sys_reg(vcpu, ICH_VMCR_EL2, s_cpu_if->vgic_vmcr);
for (i = 0; i < 4; i++) {
__vcpu_assign_sys_reg(vcpu, ICH_AP0RN(i), s_cpu_if->vgic_ap0r[i]);
__vcpu_assign_sys_reg(vcpu, ICH_AP1RN(i), s_cpu_if->vgic_ap1r[i]);
}
for_each_set_bit(i, &shadow_if->lr_map, kvm_vgic_global_state.nr_lr) {
val = __vcpu_sys_reg(vcpu, ICH_LRN(i));
val &= ~ICH_LR_STATE;
val |= s_cpu_if->vgic_lr[lr_map_idx_to_shadow_idx(shadow_if, i)] & ICH_LR_STATE;
__vcpu_assign_sys_reg(vcpu, ICH_LRN(i), val);
}
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = 0;
}
void vgic_v3_handle_nested_maint_irq(struct kvm_vcpu *vcpu)
{
bool state = read_sysreg_s(SYS_ICH_MISR_EL2);
kvm_vgic_inject_irq(vcpu->kvm, vcpu,
vcpu->kvm->arch.vgic.mi_intid, state, vcpu);
sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
}
void vgic_v3_nested_update_mi(struct kvm_vcpu *vcpu)
{
bool level;
level = (__vcpu_sys_reg(vcpu, ICH_HCR_EL2) & ICH_HCR_EL2_En) && vgic_v3_get_misr(vcpu);
kvm_vgic_inject_irq(vcpu->kvm, vcpu,
vcpu->kvm->arch.vgic.mi_intid, level, vcpu);
}