Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/riscv/vmm/vmm_fence.c
108307 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2025 Ruslan Bukin <[email protected]>
5
*
6
* This software was developed by the University of Cambridge Computer
7
* Laboratory (Department of Computer Science and Technology) under Innovate
8
* UK project 105694, "Digital Security by Design (DSbD) Technology Platform
9
* Prototype".
10
*
11
* Redistribution and use in source and binary forms, with or without
12
* modification, are permitted provided that the following conditions
13
* are met:
14
* 1. Redistributions of source code must retain the above copyright
15
* notice, this list of conditions and the following disclaimer.
16
* 2. Redistributions in binary form must reproduce the above copyright
17
* notice, this list of conditions and the following disclaimer in the
18
* documentation and/or other materials provided with the distribution.
19
*
20
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
24
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30
* SUCH DAMAGE.
31
*/
32
33
#include <sys/param.h>
34
#include <sys/systm.h>
35
#include <sys/smp.h>
36
#include <sys/kernel.h>
37
#include <sys/malloc.h>
38
#include <sys/lock.h>
39
#include <sys/mutex.h>
40
#include <sys/bus.h>
41
42
#include <machine/vmm.h>
43
44
#include <dev/vmm/vmm_vm.h>
45
46
#include "riscv.h"
47
#include "vmm_fence.h"
48
49
static bool
50
vmm_fence_dequeue(struct hypctx *hypctx, struct vmm_fence *new_fence)
51
{
52
struct vmm_fence *queue;
53
struct vmm_fence *fence;
54
55
mtx_lock_spin(&hypctx->fence_queue_mtx);
56
queue = hypctx->fence_queue;
57
fence = &queue[hypctx->fence_queue_head];
58
if (fence->type != VMM_RISCV_FENCE_INVALID) {
59
*new_fence = *fence;
60
fence->type = VMM_RISCV_FENCE_INVALID;
61
hypctx->fence_queue_head =
62
(hypctx->fence_queue_head + 1) % VMM_FENCE_QUEUE_SIZE;
63
} else {
64
mtx_unlock_spin(&hypctx->fence_queue_mtx);
65
return (false);
66
}
67
mtx_unlock_spin(&hypctx->fence_queue_mtx);
68
69
return (true);
70
}
71
72
static bool
73
vmm_fence_enqueue(struct hypctx *hypctx, struct vmm_fence *new_fence)
74
{
75
struct vmm_fence *queue;
76
struct vmm_fence *fence;
77
78
mtx_lock_spin(&hypctx->fence_queue_mtx);
79
queue = hypctx->fence_queue;
80
fence = &queue[hypctx->fence_queue_tail];
81
if (fence->type == VMM_RISCV_FENCE_INVALID) {
82
*fence = *new_fence;
83
hypctx->fence_queue_tail =
84
(hypctx->fence_queue_tail + 1) % VMM_FENCE_QUEUE_SIZE;
85
} else {
86
mtx_unlock_spin(&hypctx->fence_queue_mtx);
87
return (false);
88
}
89
mtx_unlock_spin(&hypctx->fence_queue_mtx);
90
91
return (true);
92
}
93
94
static void
95
vmm_fence_process_one(struct vmm_fence *fence)
96
{
97
uint64_t va;
98
99
KASSERT(fence->type == VMM_RISCV_FENCE_VMA ||
100
fence->type == VMM_RISCV_FENCE_VMA_ASID,
101
("%s: wrong fence type %d", __func__, fence->type));
102
103
switch (fence->type) {
104
case VMM_RISCV_FENCE_VMA:
105
for (va = fence->start; va < fence->start + fence->size;
106
va += PAGE_SIZE)
107
sfence_vma_page(va);
108
break;
109
case VMM_RISCV_FENCE_VMA_ASID:
110
if ((fence->start == 0 && fence->size == 0) ||
111
fence->size == -1)
112
sfence_vma_asid(fence->asid);
113
else
114
for (va = fence->start; va < fence->start + fence->size;
115
va += PAGE_SIZE)
116
sfence_vma_asid_page(fence->asid, va);
117
break;
118
default:
119
break;
120
}
121
}
122
123
void
124
vmm_fence_process(struct hypctx *hypctx)
125
{
126
struct vmm_fence fence;
127
int pending;
128
129
pending = atomic_readandclear_32(&hypctx->fence_req);
130
131
KASSERT((pending & ~(FENCE_REQ_I | FENCE_REQ_VMA)) == 0,
132
("wrong fence bit mask"));
133
134
if (pending & FENCE_REQ_I)
135
fence_i();
136
137
if (pending & FENCE_REQ_VMA)
138
sfence_vma();
139
140
while (vmm_fence_dequeue(hypctx, &fence) == true)
141
vmm_fence_process_one(&fence);
142
}
143
144
void
145
vmm_fence_add(struct vm *vm, cpuset_t *cpus, struct vmm_fence *fence)
146
{
147
struct hypctx *hypctx;
148
cpuset_t running_cpus;
149
struct vcpu *vcpu;
150
uint16_t maxcpus;
151
int hostcpu;
152
bool enq;
153
int i;
154
155
CPU_ZERO(&running_cpus);
156
157
maxcpus = vm_get_maxcpus(vm);
158
for (i = 0; i < maxcpus; i++) {
159
if (!CPU_ISSET(i, cpus))
160
continue;
161
vcpu = vm_vcpu(vm, i);
162
hypctx = vcpu_get_cookie(vcpu);
163
164
enq = false;
165
166
/* No need to enqueue fences i and vma global. */
167
switch (fence->type) {
168
case VMM_RISCV_FENCE_I:
169
atomic_set_32(&hypctx->fence_req, FENCE_REQ_I);
170
break;
171
case VMM_RISCV_FENCE_VMA:
172
if ((fence->start == 0 && fence->size == 0) ||
173
fence->size == -1)
174
atomic_set_32(&hypctx->fence_req,
175
FENCE_REQ_VMA);
176
else
177
enq = true;
178
break;
179
case VMM_RISCV_FENCE_VMA_ASID:
180
enq = true;
181
break;
182
default:
183
KASSERT(0, ("%s: wrong fence type %d", __func__,
184
fence->type));
185
break;
186
}
187
188
/*
189
* Try to enqueue. In case of failure use more conservative
190
* request.
191
*/
192
if (enq)
193
if (vmm_fence_enqueue(hypctx, fence) == false)
194
atomic_set_32(&hypctx->fence_req,
195
FENCE_REQ_VMA);
196
197
mb();
198
199
if (vcpu_is_running(vcpu, &hostcpu))
200
CPU_SET(hostcpu, &running_cpus);
201
}
202
203
/*
204
* Interrupt other cores. On reception of IPI they will leave guest.
205
* On entry back to the guest they will process fence request.
206
*
207
* If vcpu migrates to another cpu right here, it should process
208
* all fences on entry to the guest as well.
209
*/
210
if (!CPU_EMPTY(&running_cpus))
211
smp_rendezvous_cpus(running_cpus, NULL, NULL, NULL, NULL);
212
}
213
214