Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/selftests/kvm/include/arm64/processor.h
49022 views
1
/* SPDX-License-Identifier: GPL-2.0 */
2
/*
3
* AArch64 processor specific defines
4
*
5
* Copyright (C) 2018, Red Hat, Inc.
6
*/
7
#ifndef SELFTEST_KVM_PROCESSOR_H
8
#define SELFTEST_KVM_PROCESSOR_H
9
10
#include "kvm_util.h"
11
#include "ucall_common.h"
12
13
#include <linux/stringify.h>
14
#include <linux/types.h>
15
#include <asm/brk-imm.h>
16
#include <asm/esr.h>
17
#include <asm/sysreg.h>
18
19
20
#define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
21
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
22
23
/*
24
* KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert
25
* SYS_* register definitions in asm/sysreg.h to use in KVM
26
* calls such as vcpu_get_reg() and vcpu_set_reg().
27
*/
28
#define KVM_ARM64_SYS_REG(sys_reg_id) \
29
ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \
30
sys_reg_Op1(sys_reg_id), \
31
sys_reg_CRn(sys_reg_id), \
32
sys_reg_CRm(sys_reg_id), \
33
sys_reg_Op2(sys_reg_id))
34
35
/*
36
* Default MAIR
37
* index attribute
38
* DEVICE_nGnRnE 0 0000:0000
39
* DEVICE_nGnRE 1 0000:0100
40
* DEVICE_GRE 2 0000:1100
41
* NORMAL_NC 3 0100:0100
42
* NORMAL 4 1111:1111
43
* NORMAL_WT 5 1011:1011
44
*/
45
46
/* Linux doesn't use these memory types, so let's define them. */
47
#define MAIR_ATTR_DEVICE_GRE UL(0x0c)
48
#define MAIR_ATTR_NORMAL_WT UL(0xbb)
49
50
#define MT_DEVICE_nGnRnE 0
51
#define MT_DEVICE_nGnRE 1
52
#define MT_DEVICE_GRE 2
53
#define MT_NORMAL_NC 3
54
#define MT_NORMAL 4
55
#define MT_NORMAL_WT 5
56
57
#define DEFAULT_MAIR_EL1 \
58
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
59
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
60
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
61
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
62
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
63
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
64
65
/* TCR_EL1 specific flags */
66
#define TCR_T0SZ_OFFSET 0
67
#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET)
68
69
#define TCR_IRGN0_SHIFT 8
70
#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT)
71
#define TCR_IRGN0_NC (UL(0) << TCR_IRGN0_SHIFT)
72
#define TCR_IRGN0_WBWA (UL(1) << TCR_IRGN0_SHIFT)
73
#define TCR_IRGN0_WT (UL(2) << TCR_IRGN0_SHIFT)
74
#define TCR_IRGN0_WBnWA (UL(3) << TCR_IRGN0_SHIFT)
75
76
#define TCR_ORGN0_SHIFT 10
77
#define TCR_ORGN0_MASK (UL(3) << TCR_ORGN0_SHIFT)
78
#define TCR_ORGN0_NC (UL(0) << TCR_ORGN0_SHIFT)
79
#define TCR_ORGN0_WBWA (UL(1) << TCR_ORGN0_SHIFT)
80
#define TCR_ORGN0_WT (UL(2) << TCR_ORGN0_SHIFT)
81
#define TCR_ORGN0_WBnWA (UL(3) << TCR_ORGN0_SHIFT)
82
83
#define TCR_SH0_SHIFT 12
84
#define TCR_SH0_MASK (UL(3) << TCR_SH0_SHIFT)
85
#define TCR_SH0_INNER (UL(3) << TCR_SH0_SHIFT)
86
87
#define TCR_TG0_SHIFT 14
88
#define TCR_TG0_MASK (UL(3) << TCR_TG0_SHIFT)
89
#define TCR_TG0_4K (UL(0) << TCR_TG0_SHIFT)
90
#define TCR_TG0_64K (UL(1) << TCR_TG0_SHIFT)
91
#define TCR_TG0_16K (UL(2) << TCR_TG0_SHIFT)
92
93
#define TCR_IPS_SHIFT 32
94
#define TCR_IPS_MASK (UL(7) << TCR_IPS_SHIFT)
95
#define TCR_IPS_52_BITS (UL(6) << TCR_IPS_SHIFT)
96
#define TCR_IPS_48_BITS (UL(5) << TCR_IPS_SHIFT)
97
#define TCR_IPS_40_BITS (UL(2) << TCR_IPS_SHIFT)
98
#define TCR_IPS_36_BITS (UL(1) << TCR_IPS_SHIFT)
99
100
#define TCR_HA (UL(1) << 39)
101
#define TCR_DS (UL(1) << 59)
102
103
/*
104
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
105
*/
106
#define PTE_ATTRINDX(t) ((t) << 2)
107
#define PTE_ATTRINDX_MASK GENMASK(4, 2)
108
#define PTE_ATTRINDX_SHIFT 2
109
110
#define PTE_VALID BIT(0)
111
#define PGD_TYPE_TABLE BIT(1)
112
#define PUD_TYPE_TABLE BIT(1)
113
#define PMD_TYPE_TABLE BIT(1)
114
#define PTE_TYPE_PAGE BIT(1)
115
116
#define PTE_SHARED (UL(3) << 8) /* SH[1:0], inner shareable */
117
#define PTE_AF BIT(10)
118
119
#define PTE_ADDR_MASK(page_shift) GENMASK(47, (page_shift))
120
#define PTE_ADDR_51_48 GENMASK(15, 12)
121
#define PTE_ADDR_51_48_SHIFT 12
122
#define PTE_ADDR_MASK_LPA2(page_shift) GENMASK(49, (page_shift))
123
#define PTE_ADDR_51_50_LPA2 GENMASK(9, 8)
124
#define PTE_ADDR_51_50_LPA2_SHIFT 8
125
126
void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init);
127
struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
128
struct kvm_vcpu_init *init, void *guest_code);
129
130
struct ex_regs {
131
u64 regs[31];
132
u64 sp;
133
u64 pc;
134
u64 pstate;
135
};
136
137
#define VECTOR_NUM 16
138
139
enum {
140
VECTOR_SYNC_CURRENT_SP0,
141
VECTOR_IRQ_CURRENT_SP0,
142
VECTOR_FIQ_CURRENT_SP0,
143
VECTOR_ERROR_CURRENT_SP0,
144
145
VECTOR_SYNC_CURRENT,
146
VECTOR_IRQ_CURRENT,
147
VECTOR_FIQ_CURRENT,
148
VECTOR_ERROR_CURRENT,
149
150
VECTOR_SYNC_LOWER_64,
151
VECTOR_IRQ_LOWER_64,
152
VECTOR_FIQ_LOWER_64,
153
VECTOR_ERROR_LOWER_64,
154
155
VECTOR_SYNC_LOWER_32,
156
VECTOR_IRQ_LOWER_32,
157
VECTOR_FIQ_LOWER_32,
158
VECTOR_ERROR_LOWER_32,
159
};
160
161
#define VECTOR_IS_SYNC(v) ((v) == VECTOR_SYNC_CURRENT_SP0 || \
162
(v) == VECTOR_SYNC_CURRENT || \
163
(v) == VECTOR_SYNC_LOWER_64 || \
164
(v) == VECTOR_SYNC_LOWER_32)
165
166
void aarch64_get_supported_page_sizes(uint32_t ipa, uint32_t *ipa4k,
167
uint32_t *ipa16k, uint32_t *ipa64k);
168
169
void vm_init_descriptor_tables(struct kvm_vm *vm);
170
void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu);
171
172
typedef void(*handler_fn)(struct ex_regs *);
173
void vm_install_exception_handler(struct kvm_vm *vm,
174
int vector, handler_fn handler);
175
void vm_install_sync_handler(struct kvm_vm *vm,
176
int vector, int ec, handler_fn handler);
177
178
uint64_t *virt_get_pte_hva_at_level(struct kvm_vm *vm, vm_vaddr_t gva, int level);
179
uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva);
180
181
static inline void cpu_relax(void)
182
{
183
asm volatile("yield" ::: "memory");
184
}
185
186
#define isb() asm volatile("isb" : : : "memory")
187
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
188
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
189
190
#define dma_wmb() dmb(oshst)
191
#define __iowmb() dma_wmb()
192
193
#define dma_rmb() dmb(oshld)
194
195
#define __iormb(v) \
196
({ \
197
unsigned long tmp; \
198
\
199
dma_rmb(); \
200
\
201
/* \
202
* Courtesy of arch/arm64/include/asm/io.h: \
203
* Create a dummy control dependency from the IO read to any \
204
* later instructions. This ensures that a subsequent call \
205
* to udelay() will be ordered due to the ISB in __delay(). \
206
*/ \
207
asm volatile("eor %0, %1, %1\n" \
208
"cbnz %0, ." \
209
: "=r" (tmp) : "r" ((unsigned long)(v)) \
210
: "memory"); \
211
})
212
213
static __always_inline void __raw_writel(u32 val, volatile void *addr)
214
{
215
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
216
}
217
218
static __always_inline u32 __raw_readl(const volatile void *addr)
219
{
220
u32 val;
221
asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
222
return val;
223
}
224
225
static __always_inline void __raw_writeq(u64 val, volatile void *addr)
226
{
227
asm volatile("str %0, [%1]" : : "rZ" (val), "r" (addr));
228
}
229
230
static __always_inline u64 __raw_readq(const volatile void *addr)
231
{
232
u64 val;
233
asm volatile("ldr %0, [%1]" : "=r" (val) : "r" (addr));
234
return val;
235
}
236
237
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
238
#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
239
#define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
240
#define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
241
242
#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));})
243
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
244
#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c));})
245
#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
246
247
248
static inline void local_irq_enable(void)
249
{
250
asm volatile("msr daifclr, #3" : : : "memory");
251
}
252
253
static inline void local_irq_disable(void)
254
{
255
asm volatile("msr daifset, #3" : : : "memory");
256
}
257
258
static inline void local_serror_enable(void)
259
{
260
asm volatile("msr daifclr, #4" : : : "memory");
261
}
262
263
static inline void local_serror_disable(void)
264
{
265
asm volatile("msr daifset, #4" : : : "memory");
266
}
267
268
/**
269
* struct arm_smccc_res - Result from SMC/HVC call
270
* @a0-a3 result values from registers 0 to 3
271
*/
272
struct arm_smccc_res {
273
unsigned long a0;
274
unsigned long a1;
275
unsigned long a2;
276
unsigned long a3;
277
};
278
279
/**
280
* smccc_hvc - Invoke a SMCCC function using the hvc conduit
281
* @function_id: the SMCCC function to be called
282
* @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7
283
* @res: pointer to write the return values from registers x0-x3
284
*
285
*/
286
void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
287
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
288
uint64_t arg6, struct arm_smccc_res *res);
289
290
/**
291
* smccc_smc - Invoke a SMCCC function using the smc conduit
292
* @function_id: the SMCCC function to be called
293
* @arg0-arg6: SMCCC function arguments, corresponding to registers x1-x7
294
* @res: pointer to write the return values from registers x0-x3
295
*
296
*/
297
void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1,
298
uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5,
299
uint64_t arg6, struct arm_smccc_res *res);
300
301
/* Execute a Wait For Interrupt instruction. */
302
void wfi(void);
303
304
void test_wants_mte(void);
305
void test_disable_default_vgic(void);
306
307
bool vm_supports_el2(struct kvm_vm *vm);
308
309
static inline bool test_supports_el2(void)
310
{
311
struct kvm_vm *vm = vm_create(1);
312
bool supported = vm_supports_el2(vm);
313
314
kvm_vm_free(vm);
315
return supported;
316
}
317
318
static inline bool vcpu_has_el2(struct kvm_vcpu *vcpu)
319
{
320
return vcpu->init.features[0] & BIT(KVM_ARM_VCPU_HAS_EL2);
321
}
322
323
#define MAPPED_EL2_SYSREG(el2, el1) \
324
case SYS_##el1: \
325
if (vcpu_has_el2(vcpu)) \
326
alias = SYS_##el2; \
327
break
328
329
330
static __always_inline u64 ctxt_reg_alias(struct kvm_vcpu *vcpu, u32 encoding)
331
{
332
u32 alias = encoding;
333
334
BUILD_BUG_ON(!__builtin_constant_p(encoding));
335
336
switch (encoding) {
337
MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1);
338
MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1);
339
MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1);
340
MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1);
341
MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1);
342
MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1);
343
MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1);
344
MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1);
345
MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1);
346
MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1);
347
MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1);
348
MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1);
349
MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1);
350
MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1);
351
MAPPED_EL2_SYSREG(POR_EL2, POR_EL1);
352
MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1);
353
MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1);
354
MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1);
355
MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1);
356
MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1);
357
MAPPED_EL2_SYSREG(SCTLR2_EL2, SCTLR2_EL1);
358
MAPPED_EL2_SYSREG(CNTHCTL_EL2, CNTKCTL_EL1);
359
case SYS_SP_EL1:
360
if (!vcpu_has_el2(vcpu))
361
return ARM64_CORE_REG(sp_el1);
362
363
alias = SYS_SP_EL2;
364
break;
365
default:
366
BUILD_BUG();
367
}
368
369
return KVM_ARM64_SYS_REG(alias);
370
}
371
372
void kvm_get_default_vcpu_target(struct kvm_vm *vm, struct kvm_vcpu_init *init);
373
374
static inline unsigned int get_current_el(void)
375
{
376
return (read_sysreg(CurrentEL) >> 2) & 0x3;
377
}
378
379
#define do_smccc(...) \
380
do { \
381
if (get_current_el() == 2) \
382
smccc_smc(__VA_ARGS__); \
383
else \
384
smccc_hvc(__VA_ARGS__); \
385
} while (0)
386
387
#endif /* SELFTEST_KVM_PROCESSOR_H */
388
389