Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/stacktrace.c
26424 views
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
* KVM nVHE hypervisor stack tracing support.
4
*
5
* The unwinder implementation depends on the nVHE mode:
6
*
7
* 1) Non-protected nVHE mode - the host can directly access the
8
* HYP stack pages and unwind the HYP stack in EL1. This saves having
9
* to allocate shared buffers for the host to read the unwinded
10
* stacktrace.
11
*
12
* 2) pKVM (protected nVHE) mode - the host cannot directly access
13
* the HYP memory. The stack is unwinded in EL2 and dumped to a shared
14
* buffer where the host can read and print the stacktrace.
15
*
16
* Copyright (C) 2022 Google LLC
17
*/
18
19
#include <linux/kvm.h>
20
#include <linux/kvm_host.h>
21
22
#include <asm/kvm_mmu.h>
23
#include <asm/stacktrace/nvhe.h>
24
25
static struct stack_info stackinfo_get_overflow(void)
26
{
27
struct kvm_nvhe_stacktrace_info *stacktrace_info
28
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
29
unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
30
unsigned long high = low + OVERFLOW_STACK_SIZE;
31
32
return (struct stack_info) {
33
.low = low,
34
.high = high,
35
};
36
}
37
38
static struct stack_info stackinfo_get_overflow_kern_va(void)
39
{
40
unsigned long low = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
41
unsigned long high = low + OVERFLOW_STACK_SIZE;
42
43
return (struct stack_info) {
44
.low = low,
45
.high = high,
46
};
47
}
48
49
static struct stack_info stackinfo_get_hyp(void)
50
{
51
struct kvm_nvhe_stacktrace_info *stacktrace_info
52
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
53
unsigned long low = (unsigned long)stacktrace_info->stack_base;
54
unsigned long high = low + NVHE_STACK_SIZE;
55
56
return (struct stack_info) {
57
.low = low,
58
.high = high,
59
};
60
}
61
62
static struct stack_info stackinfo_get_hyp_kern_va(void)
63
{
64
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_base);
65
unsigned long high = low + NVHE_STACK_SIZE;
66
67
return (struct stack_info) {
68
.low = low,
69
.high = high,
70
};
71
}
72
73
/*
74
* kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
75
*
76
* The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
77
* allow for guard pages below the stack. Consequently, the fixed offset address
78
* translation macros won't work here.
79
*
80
* The kernel VA is calculated as an offset from the kernel VA of the hypervisor
81
* stack base.
82
*
83
* Returns true on success and updates @addr to its corresponding kernel VA;
84
* otherwise returns false.
85
*/
86
static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size)
87
{
88
struct stack_info stack_hyp, stack_kern;
89
90
stack_hyp = stackinfo_get_hyp();
91
stack_kern = stackinfo_get_hyp_kern_va();
92
if (stackinfo_on_stack(&stack_hyp, *addr, size))
93
goto found;
94
95
stack_hyp = stackinfo_get_overflow();
96
stack_kern = stackinfo_get_overflow_kern_va();
97
if (stackinfo_on_stack(&stack_hyp, *addr, size))
98
goto found;
99
100
return false;
101
102
found:
103
*addr = *addr - stack_hyp.low + stack_kern.low;
104
return true;
105
}
106
107
/*
108
* Convert a KVN nVHE HYP frame record address to a kernel VA
109
*/
110
static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr)
111
{
112
return kvm_nvhe_stack_kern_va(addr, 16);
113
}
114
115
static int unwind_next(struct unwind_state *state)
116
{
117
/*
118
* The FP is in the hypervisor VA space. Convert it to the kernel VA
119
* space so it can be unwound by the regular unwind functions.
120
*/
121
if (!kvm_nvhe_stack_kern_record_va(&state->fp))
122
return -EINVAL;
123
124
return unwind_next_frame_record(state);
125
}
126
127
static void unwind(struct unwind_state *state,
128
stack_trace_consume_fn consume_entry, void *cookie)
129
{
130
while (1) {
131
int ret;
132
133
if (!consume_entry(cookie, state->pc))
134
break;
135
ret = unwind_next(state);
136
if (ret < 0)
137
break;
138
}
139
}
140
141
/*
142
* kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry
143
*
144
* @arg : the hypervisor offset, used for address translation
145
* @where : the program counter corresponding to the stack frame
146
*/
147
static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
148
{
149
unsigned long va_mask = GENMASK_ULL(__hyp_va_bits - 1, 0);
150
unsigned long hyp_offset = (unsigned long)arg;
151
152
/* Mask tags and convert to kern addr */
153
where = (where & va_mask) + hyp_offset;
154
kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
155
156
return true;
157
}
158
159
static void kvm_nvhe_dump_backtrace_start(void)
160
{
161
kvm_err("nVHE call trace:\n");
162
}
163
164
static void kvm_nvhe_dump_backtrace_end(void)
165
{
166
kvm_err("---[ end nVHE call trace ]---\n");
167
}
168
169
/*
170
* hyp_dump_backtrace - Dump the non-protected nVHE backtrace.
171
*
172
* @hyp_offset: hypervisor offset, used for address translation.
173
*
174
* The host can directly access HYP stack pages in non-protected
175
* mode, so the unwinding is done directly from EL1. This removes
176
* the need for shared buffers between host and hypervisor for
177
* the stacktrace.
178
*/
179
static void hyp_dump_backtrace(unsigned long hyp_offset)
180
{
181
struct kvm_nvhe_stacktrace_info *stacktrace_info;
182
struct stack_info stacks[] = {
183
stackinfo_get_overflow_kern_va(),
184
stackinfo_get_hyp_kern_va(),
185
};
186
struct unwind_state state = {
187
.stacks = stacks,
188
.nr_stacks = ARRAY_SIZE(stacks),
189
};
190
191
stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
192
193
kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
194
195
kvm_nvhe_dump_backtrace_start();
196
unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
197
kvm_nvhe_dump_backtrace_end();
198
}
199
200
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
201
DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
202
pkvm_stacktrace);
203
204
/*
205
* pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
206
*
207
* @hyp_offset: hypervisor offset, used for address translation.
208
*
209
* Dumping of the pKVM HYP backtrace is done by reading the
210
* stack addresses from the shared stacktrace buffer, since the
211
* host cannot directly access hypervisor memory in protected
212
* mode.
213
*/
214
static void pkvm_dump_backtrace(unsigned long hyp_offset)
215
{
216
unsigned long *stacktrace
217
= (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
218
int i;
219
220
kvm_nvhe_dump_backtrace_start();
221
/* The saved stacktrace is terminated by a null entry */
222
for (i = 0;
223
i < ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace)) && stacktrace[i];
224
i++)
225
kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
226
kvm_nvhe_dump_backtrace_end();
227
}
228
#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
229
static void pkvm_dump_backtrace(unsigned long hyp_offset)
230
{
231
kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
232
}
233
#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
234
235
/*
236
* kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
237
*
238
* @hyp_offset: hypervisor offset, used for address translation.
239
*/
240
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
241
{
242
if (is_protected_kvm_enabled())
243
pkvm_dump_backtrace(hyp_offset);
244
else
245
hyp_dump_backtrace(hyp_offset);
246
}
247
248