Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/arm64/kvm/hyp/nvhe/trace.c
170998 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2025 Google LLC
4
* Author: Vincent Donnefort <[email protected]>
5
*/
6
7
#include <nvhe/clock.h>
8
#include <nvhe/mem_protect.h>
9
#include <nvhe/mm.h>
10
#include <nvhe/trace.h>
11
12
#include <asm/percpu.h>
13
#include <asm/kvm_mmu.h>
14
#include <asm/local.h>
15
16
#include "simple_ring_buffer.c"
17
18
static DEFINE_PER_CPU(struct simple_rb_per_cpu, __simple_rbs);
19
20
static struct hyp_trace_buffer {
21
struct simple_rb_per_cpu __percpu *simple_rbs;
22
void *bpages_backing_start;
23
size_t bpages_backing_size;
24
hyp_spinlock_t lock;
25
} trace_buffer = {
26
.simple_rbs = &__simple_rbs,
27
.lock = __HYP_SPIN_LOCK_UNLOCKED,
28
};
29
30
static bool hyp_trace_buffer_loaded(struct hyp_trace_buffer *trace_buffer)
31
{
32
return trace_buffer->bpages_backing_size > 0;
33
}
34
35
void *tracing_reserve_entry(unsigned long length)
36
{
37
return simple_ring_buffer_reserve(this_cpu_ptr(trace_buffer.simple_rbs), length,
38
trace_clock());
39
}
40
41
void tracing_commit_entry(void)
42
{
43
simple_ring_buffer_commit(this_cpu_ptr(trace_buffer.simple_rbs));
44
}
45
46
static int __admit_host_mem(void *start, u64 size)
47
{
48
if (!PAGE_ALIGNED(start) || !PAGE_ALIGNED(size) || !size)
49
return -EINVAL;
50
51
if (!is_protected_kvm_enabled())
52
return 0;
53
54
return __pkvm_host_donate_hyp(hyp_virt_to_pfn(start), size >> PAGE_SHIFT);
55
}
56
57
static void __release_host_mem(void *start, u64 size)
58
{
59
if (!is_protected_kvm_enabled())
60
return;
61
62
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(start), size >> PAGE_SHIFT));
63
}
64
65
static int hyp_trace_buffer_load_bpage_backing(struct hyp_trace_buffer *trace_buffer,
66
struct hyp_trace_desc *desc)
67
{
68
void *start = (void *)kern_hyp_va(desc->bpages_backing_start);
69
size_t size = desc->bpages_backing_size;
70
int ret;
71
72
ret = __admit_host_mem(start, size);
73
if (ret)
74
return ret;
75
76
memset(start, 0, size);
77
78
trace_buffer->bpages_backing_start = start;
79
trace_buffer->bpages_backing_size = size;
80
81
return 0;
82
}
83
84
static void hyp_trace_buffer_unload_bpage_backing(struct hyp_trace_buffer *trace_buffer)
85
{
86
void *start = trace_buffer->bpages_backing_start;
87
size_t size = trace_buffer->bpages_backing_size;
88
89
if (!size)
90
return;
91
92
memset(start, 0, size);
93
94
__release_host_mem(start, size);
95
96
trace_buffer->bpages_backing_start = 0;
97
trace_buffer->bpages_backing_size = 0;
98
}
99
100
static void *__pin_shared_page(unsigned long kern_va)
101
{
102
void *va = kern_hyp_va((void *)kern_va);
103
104
if (!is_protected_kvm_enabled())
105
return va;
106
107
return hyp_pin_shared_mem(va, va + PAGE_SIZE) ? NULL : va;
108
}
109
110
static void __unpin_shared_page(void *va)
111
{
112
if (!is_protected_kvm_enabled())
113
return;
114
115
hyp_unpin_shared_mem(va, va + PAGE_SIZE);
116
}
117
118
static void hyp_trace_buffer_unload(struct hyp_trace_buffer *trace_buffer)
119
{
120
int cpu;
121
122
hyp_assert_lock_held(&trace_buffer->lock);
123
124
if (!hyp_trace_buffer_loaded(trace_buffer))
125
return;
126
127
for (cpu = 0; cpu < hyp_nr_cpus; cpu++)
128
simple_ring_buffer_unload_mm(per_cpu_ptr(trace_buffer->simple_rbs, cpu),
129
__unpin_shared_page);
130
131
hyp_trace_buffer_unload_bpage_backing(trace_buffer);
132
}
133
134
static int hyp_trace_buffer_load(struct hyp_trace_buffer *trace_buffer,
135
struct hyp_trace_desc *desc)
136
{
137
struct simple_buffer_page *bpages;
138
struct ring_buffer_desc *rb_desc;
139
int ret, cpu;
140
141
hyp_assert_lock_held(&trace_buffer->lock);
142
143
if (hyp_trace_buffer_loaded(trace_buffer))
144
return -EINVAL;
145
146
ret = hyp_trace_buffer_load_bpage_backing(trace_buffer, desc);
147
if (ret)
148
return ret;
149
150
bpages = trace_buffer->bpages_backing_start;
151
for_each_ring_buffer_desc(rb_desc, cpu, &desc->trace_buffer_desc) {
152
ret = simple_ring_buffer_init_mm(per_cpu_ptr(trace_buffer->simple_rbs, cpu),
153
bpages, rb_desc, __pin_shared_page,
154
__unpin_shared_page);
155
if (ret)
156
break;
157
158
bpages += rb_desc->nr_page_va;
159
}
160
161
if (ret)
162
hyp_trace_buffer_unload(trace_buffer);
163
164
return ret;
165
}
166
167
static bool hyp_trace_desc_validate(struct hyp_trace_desc *desc, size_t desc_size)
168
{
169
struct ring_buffer_desc *rb_desc;
170
unsigned int cpu;
171
size_t nr_bpages;
172
void *desc_end;
173
174
/*
175
* Both desc_size and bpages_backing_size are untrusted host-provided
176
* values. We rely on __pkvm_host_donate_hyp() to enforce their validity.
177
*/
178
desc_end = (void *)desc + desc_size;
179
nr_bpages = desc->bpages_backing_size / sizeof(struct simple_buffer_page);
180
181
for_each_ring_buffer_desc(rb_desc, cpu, &desc->trace_buffer_desc) {
182
/* Can we read nr_page_va? */
183
if ((void *)rb_desc + struct_size(rb_desc, page_va, 0) > desc_end)
184
return false;
185
186
/* Overflow desc? */
187
if ((void *)rb_desc + struct_size(rb_desc, page_va, rb_desc->nr_page_va) > desc_end)
188
return false;
189
190
/* Overflow bpages backing memory? */
191
if (nr_bpages < rb_desc->nr_page_va)
192
return false;
193
194
if (cpu >= hyp_nr_cpus)
195
return false;
196
197
if (cpu != rb_desc->cpu)
198
return false;
199
200
nr_bpages -= rb_desc->nr_page_va;
201
}
202
203
return true;
204
}
205
206
int __tracing_load(unsigned long desc_hva, size_t desc_size)
207
{
208
struct hyp_trace_desc *desc = (struct hyp_trace_desc *)kern_hyp_va(desc_hva);
209
int ret;
210
211
ret = __admit_host_mem(desc, desc_size);
212
if (ret)
213
return ret;
214
215
if (!hyp_trace_desc_validate(desc, desc_size))
216
goto err_release_desc;
217
218
hyp_spin_lock(&trace_buffer.lock);
219
220
ret = hyp_trace_buffer_load(&trace_buffer, desc);
221
222
hyp_spin_unlock(&trace_buffer.lock);
223
224
err_release_desc:
225
__release_host_mem(desc, desc_size);
226
return ret;
227
}
228
229
void __tracing_unload(void)
230
{
231
hyp_spin_lock(&trace_buffer.lock);
232
hyp_trace_buffer_unload(&trace_buffer);
233
hyp_spin_unlock(&trace_buffer.lock);
234
}
235
236
int __tracing_enable(bool enable)
237
{
238
int cpu, ret = enable ? -EINVAL : 0;
239
240
hyp_spin_lock(&trace_buffer.lock);
241
242
if (!hyp_trace_buffer_loaded(&trace_buffer))
243
goto unlock;
244
245
for (cpu = 0; cpu < hyp_nr_cpus; cpu++)
246
simple_ring_buffer_enable_tracing(per_cpu_ptr(trace_buffer.simple_rbs, cpu),
247
enable);
248
249
ret = 0;
250
251
unlock:
252
hyp_spin_unlock(&trace_buffer.lock);
253
254
return ret;
255
}
256
257
int __tracing_swap_reader(unsigned int cpu)
258
{
259
int ret = -ENODEV;
260
261
if (cpu >= hyp_nr_cpus)
262
return -EINVAL;
263
264
hyp_spin_lock(&trace_buffer.lock);
265
266
if (hyp_trace_buffer_loaded(&trace_buffer))
267
ret = simple_ring_buffer_swap_reader_page(
268
per_cpu_ptr(trace_buffer.simple_rbs, cpu));
269
270
hyp_spin_unlock(&trace_buffer.lock);
271
272
return ret;
273
}
274
275
void __tracing_update_clock(u32 mult, u32 shift, u64 epoch_ns, u64 epoch_cyc)
276
{
277
int cpu;
278
279
/* After this loop, all CPUs are observing the new bank... */
280
for (cpu = 0; cpu < hyp_nr_cpus; cpu++) {
281
struct simple_rb_per_cpu *simple_rb = per_cpu_ptr(trace_buffer.simple_rbs, cpu);
282
283
while (READ_ONCE(simple_rb->status) == SIMPLE_RB_WRITING)
284
;
285
}
286
287
/* ...we can now override the old one and swap. */
288
trace_clock_update(mult, shift, epoch_ns, epoch_cyc);
289
}
290
291
int __tracing_reset(unsigned int cpu)
292
{
293
int ret = -ENODEV;
294
295
if (cpu >= hyp_nr_cpus)
296
return -EINVAL;
297
298
hyp_spin_lock(&trace_buffer.lock);
299
300
if (hyp_trace_buffer_loaded(&trace_buffer))
301
ret = simple_ring_buffer_reset(per_cpu_ptr(trace_buffer.simple_rbs, cpu));
302
303
hyp_spin_unlock(&trace_buffer.lock);
304
305
return ret;
306
}
307
308