// SPDX-License-Identifier: GPL-2.01/*2* Copyright (C) 2020-2023 Loongson Technology Corporation Limited3*/45#include <linux/kvm_host.h>6#include <asm/kvm_csr.h>7#include <asm/kvm_vcpu.h>89/*10* ktime_to_tick() - Scale ktime_t to timer tick value.11*/12static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)13{14u64 delta;1516delta = ktime_to_ns(now);17return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);18}1920static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)21{22return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);23}2425/* Low level hrtimer wake routine */26enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)27{28struct kvm_vcpu *vcpu;2930vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);31kvm_queue_irq(vcpu, INT_TI);32rcuwait_wake_up(&vcpu->wait);3334return HRTIMER_NORESTART;35}3637/*38* Initialise the timer to the specified frequency, zero it39*/40void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)41{42vcpu->arch.timer_mhz = timer_hz >> 20;4344/* Starting at 0 */45kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);46}4748/*49* Restore soft timer state from saved context.50*/51void kvm_restore_timer(struct kvm_vcpu *vcpu)52{53unsigned long cfg, estat;54unsigned long ticks, delta, period;55ktime_t expire, now;56struct loongarch_csrs *csr = vcpu->arch.csr;5758/*59* Set guest stable timer cfg csr60* Disable timer before restore estat CSR register, avoid to61* get invalid timer interrupt for old timer cfg62*/63cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);6465write_gcsr_timercfg(0);66kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);67kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);68if (!(cfg & CSR_TCFG_EN)) {69/* Guest timer is disabled, just restore timer registers */70kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);71return;72}7374/*75* Freeze the soft-timer and sync the guest stable timer with it.76*/77if (kvm_vcpu_is_blocking(vcpu))78hrtimer_cancel(&vcpu->arch.swtimer);7980/*81* From LoongArch Reference Manual Volume 1 Chapter 7.6.282* If oneshot timer is fired, CSR TVAL will be -1, there are two83* conditions:84* 1) timer is fired during exiting to host85* 2) timer is fired and vm is doing timer irq, and then exiting to86* host. Host should not inject timer irq to avoid spurious87* timer interrupt again88*/89ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);90estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT);91if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) {92/*93* Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq94* and set CSR TVAL with -195*/96write_gcsr_timertick(0);9798/*99* Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear100* timer interrupt, and CSR TVAL keeps unchanged with -1, it101* avoids spurious timer interrupt102*/103if (!(estat & CPU_TIMER))104gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);105return;106}107108/*109* Set remainder tick value if not expired110*/111delta = 0;112now = ktime_get();113expire = vcpu->arch.expire;114if (ktime_before(now, expire))115delta = ktime_to_tick(vcpu, ktime_sub(expire, now));116else if (cfg & CSR_TCFG_PERIOD) {117period = cfg & CSR_TCFG_VAL;118delta = ktime_to_tick(vcpu, ktime_sub(now, expire));119delta = period - (delta % period);120121/*122* Inject timer here though sw timer should inject timer123* interrupt async already, since sw timer may be cancelled124* during injecting intr async125*/126kvm_queue_irq(vcpu, INT_TI);127}128129write_gcsr_timertick(delta);130}131132/*133* Save guest timer state and switch to software emulation of guest134* timer. The hard timer must already be in use, so preemption should be135* disabled.136*/137static void _kvm_save_timer(struct kvm_vcpu *vcpu)138{139unsigned long ticks, delta, cfg;140ktime_t expire;141struct loongarch_csrs *csr = vcpu->arch.csr;142143cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);144ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);145146/*147* From LoongArch Reference Manual Volume 1 Chapter 7.6.2148* If period timer is fired, CSR TVAL will be reloaded from CSR TCFG149* If oneshot timer is fired, CSR TVAL will be -1150* Here judge one-shot timer fired by checking whether TVAL is larger151* than TCFG152*/153if (ticks < cfg)154delta = tick_to_ns(vcpu, ticks);155else156delta = 0;157158expire = ktime_add_ns(ktime_get(), delta);159vcpu->arch.expire = expire;160if (kvm_vcpu_is_blocking(vcpu)) {161162/*163* HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in164* the same physical cpu in next time, and the timer should run165* in hardirq context even in the PREEMPT_RT case.166*/167hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD);168}169}170171/*172* Save guest timer state and switch to soft guest timer if hard timer was in173* use.174*/175void kvm_save_timer(struct kvm_vcpu *vcpu)176{177struct loongarch_csrs *csr = vcpu->arch.csr;178179preempt_disable();180181/* Save hard timer state */182kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);183kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);184if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)185_kvm_save_timer(vcpu);186187/* Save timer-related state to vCPU context */188kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);189preempt_enable();190}191192193