Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hyp/include/nvhe/pkvm.h
26532 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* Copyright (C) 2021 Google LLC
4
* Author: Fuad Tabba <[email protected]>
5
*/
6
7
#ifndef __ARM64_KVM_NVHE_PKVM_H__
8
#define __ARM64_KVM_NVHE_PKVM_H__
9
10
#include <asm/kvm_pkvm.h>
11
12
#include <nvhe/gfp.h>
13
#include <nvhe/spinlock.h>
14
15
/*
16
* Holds the relevant data for maintaining the vcpu state completely at hyp.
17
*/
18
struct pkvm_hyp_vcpu {
19
struct kvm_vcpu vcpu;
20
21
/* Backpointer to the host's (untrusted) vCPU instance. */
22
struct kvm_vcpu *host_vcpu;
23
24
/*
25
* If this hyp vCPU is loaded, then this is a backpointer to the
26
* per-cpu pointer tracking us. Otherwise, NULL if not loaded.
27
*/
28
struct pkvm_hyp_vcpu **loaded_hyp_vcpu;
29
};
30
31
/*
32
* Holds the relevant data for running a protected vm.
33
*/
34
struct pkvm_hyp_vm {
35
struct kvm kvm;
36
37
/* Backpointer to the host's (untrusted) KVM instance. */
38
struct kvm *host_kvm;
39
40
/* The guest's stage-2 page-table managed by the hypervisor. */
41
struct kvm_pgtable pgt;
42
struct kvm_pgtable_mm_ops mm_ops;
43
struct hyp_pool pool;
44
hyp_spinlock_t lock;
45
46
/* Array of the hyp vCPU structures for this VM. */
47
struct pkvm_hyp_vcpu *vcpus[];
48
};
49
50
extern hyp_spinlock_t vm_table_lock;
51
52
static inline struct pkvm_hyp_vm *
53
pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu)
54
{
55
return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm);
56
}
57
58
static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu)
59
{
60
return vcpu_is_protected(&hyp_vcpu->vcpu);
61
}
62
63
static inline bool pkvm_hyp_vm_is_protected(struct pkvm_hyp_vm *hyp_vm)
64
{
65
return kvm_vm_is_protected(&hyp_vm->kvm);
66
}
67
68
void pkvm_hyp_vm_table_init(void *tbl);
69
70
int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
71
unsigned long pgd_hva);
72
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
73
unsigned long vcpu_hva);
74
int __pkvm_teardown_vm(pkvm_handle_t handle);
75
76
struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
77
unsigned int vcpu_idx);
78
void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu);
79
struct pkvm_hyp_vcpu *pkvm_get_loaded_hyp_vcpu(void);
80
81
struct pkvm_hyp_vm *get_pkvm_hyp_vm(pkvm_handle_t handle);
82
struct pkvm_hyp_vm *get_np_pkvm_hyp_vm(pkvm_handle_t handle);
83
void put_pkvm_hyp_vm(struct pkvm_hyp_vm *hyp_vm);
84
85
bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
86
bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
87
void kvm_init_pvm_id_regs(struct kvm_vcpu *vcpu);
88
int kvm_check_pvm_sysreg_table(void);
89
90
#endif /* __ARM64_KVM_NVHE_PKVM_H__ */
91
92