Path: blob/master/arch/arm64/kvm/hyp/include/nvhe/spinlock.h
26532 views
/* SPDX-License-Identifier: GPL-2.0-only */1/*2* A stand-alone ticket spinlock implementation for use by the non-VHE3* KVM hypervisor code running at EL2.4*5* Copyright (C) 2020 Google LLC6* Author: Will Deacon <[email protected]>7*8* Heavily based on the implementation removed by c11090474d70 which was:9* Copyright (C) 2012 ARM Ltd.10*/1112#ifndef __ARM64_KVM_NVHE_SPINLOCK_H__13#define __ARM64_KVM_NVHE_SPINLOCK_H__1415#include <asm/alternative.h>16#include <asm/lse.h>17#include <asm/rwonce.h>1819typedef union hyp_spinlock {20u32 __val;21struct {22#ifdef __AARCH64EB__23u16 next, owner;24#else25u16 owner, next;26#endif27};28} hyp_spinlock_t;2930#define __HYP_SPIN_LOCK_INITIALIZER \31{ .__val = 0 }3233#define __HYP_SPIN_LOCK_UNLOCKED \34((hyp_spinlock_t) __HYP_SPIN_LOCK_INITIALIZER)3536#define DEFINE_HYP_SPINLOCK(x) hyp_spinlock_t x = __HYP_SPIN_LOCK_UNLOCKED3738#define hyp_spin_lock_init(l) \39do { \40*(l) = __HYP_SPIN_LOCK_UNLOCKED; \41} while (0)4243static inline void hyp_spin_lock(hyp_spinlock_t *lock)44{45u32 tmp;46hyp_spinlock_t lockval, newval;4748asm volatile(49/* Atomically increment the next ticket. */50ARM64_LSE_ATOMIC_INSN(51/* LL/SC */52" prfm pstl1strm, %3\n"53"1: ldaxr %w0, %3\n"54" add %w1, %w0, #(1 << 16)\n"55" stxr %w2, %w1, %3\n"56" cbnz %w2, 1b\n",57/* LSE atomics */58" mov %w2, #(1 << 16)\n"59" ldadda %w2, %w0, %3\n"60__nops(3))6162/* Did we get the lock? */63" eor %w1, %w0, %w0, ror #16\n"64" cbz %w1, 3f\n"65/*66* No: spin on the owner. Send a local event to avoid missing an67* unlock before the exclusive load.68*/69" sevl\n"70"2: wfe\n"71" ldaxrh %w2, %4\n"72" eor %w1, %w2, %w0, lsr #16\n"73" cbnz %w1, 2b\n"74/* We got the lock. Critical section starts here. */75"3:"76: "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)77: "Q" (lock->owner)78: "memory");79}8081static inline void hyp_spin_unlock(hyp_spinlock_t *lock)82{83u64 tmp;8485asm volatile(86ARM64_LSE_ATOMIC_INSN(87/* LL/SC */88" ldrh %w1, %0\n"89" add %w1, %w1, #1\n"90" stlrh %w1, %0",91/* LSE atomics */92" mov %w1, #1\n"93" staddlh %w1, %0\n"94__nops(1))95: "=Q" (lock->owner), "=&r" (tmp)96:97: "memory");98}99100static inline bool hyp_spin_is_locked(hyp_spinlock_t *lock)101{102hyp_spinlock_t lockval = READ_ONCE(*lock);103104return lockval.owner != lockval.next;105}106107#ifdef CONFIG_NVHE_EL2_DEBUG108static inline void hyp_assert_lock_held(hyp_spinlock_t *lock)109{110/*111* The __pkvm_init() path accesses protected data-structures without112* holding locks as the other CPUs are guaranteed to not enter EL2113* concurrently at this point in time. The point by which EL2 is114* initialized on all CPUs is reflected in the pkvm static key, so115* wait until it is set before checking the lock state.116*/117if (static_branch_likely(&kvm_protected_mode_initialized))118BUG_ON(!hyp_spin_is_locked(lock));119}120#else121static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) { }122#endif123124#endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */125126127