Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm64/vmm/vmm_mmu.c
39478 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (C) 2017 Alexandru Elisei <[email protected]>
5
*
6
* This software was developed by Alexandru Elisei under sponsorship
7
* from the FreeBSD Foundation.
8
*
9
* Redistribution and use in source and binary forms, with or without
10
* modification, are permitted provided that the following conditions
11
* are met:
12
* 1. Redistributions of source code must retain the above copyright
13
* notice, this list of conditions and the following disclaimer.
14
* 2. Redistributions in binary form must reproduce the above copyright
15
* notice, this list of conditions and the following disclaimer in the
16
* documentation and/or other materials provided with the distribution.
17
*
18
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
22
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28
* SUCH DAMAGE.
29
*/
30
31
#include <sys/cdefs.h>
32
33
#include <sys/types.h>
34
#include <sys/malloc.h>
35
#include <sys/lock.h>
36
#include <sys/mutex.h>
37
#include <vm/vm.h>
38
#include <vm/pmap.h>
39
#include <vm/vm_page.h>
40
#include <vm/vm_param.h>
41
#include <vm/vm_phys.h>
42
43
#include <machine/atomic.h>
44
#include <machine/machdep.h>
45
#include <machine/vm.h>
46
#include <machine/vmm.h>
47
#include <machine/vmparam.h>
48
49
#include "mmu.h"
50
#include "arm64.h"
51
52
static struct mtx vmmpmap_mtx;
53
static pt_entry_t *l0;
54
static vm_paddr_t l0_paddr;
55
56
bool
57
vmmpmap_init(void)
58
{
59
vm_page_t m;
60
61
m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO);
62
if (m == NULL)
63
return (false);
64
65
l0_paddr = VM_PAGE_TO_PHYS(m);
66
l0 = (pd_entry_t *)PHYS_TO_DMAP(l0_paddr);
67
68
mtx_init(&vmmpmap_mtx, "vmm pmap", NULL, MTX_DEF);
69
70
return (true);
71
}
72
73
static void
74
vmmpmap_release_l3(pd_entry_t l2e)
75
{
76
pt_entry_t *l3 __diagused;
77
vm_page_t m;
78
int i;
79
80
l3 = (pd_entry_t *)PHYS_TO_DMAP(l2e & ~ATTR_MASK);
81
for (i = 0; i < Ln_ENTRIES; i++) {
82
KASSERT(l3[i] == 0, ("%s: l3 still mapped: %p %lx", __func__,
83
&l3[i], l3[i]));
84
}
85
86
m = PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK);
87
vm_page_unwire_noq(m);
88
vm_page_free(m);
89
}
90
91
static void
92
vmmpmap_release_l2(pd_entry_t l1e)
93
{
94
pt_entry_t *l2;
95
vm_page_t m;
96
int i;
97
98
l2 = (pd_entry_t *)PHYS_TO_DMAP(l1e & ~ATTR_MASK);
99
for (i = 0; i < Ln_ENTRIES; i++) {
100
if (l2[i] != 0) {
101
vmmpmap_release_l3(l2[i]);
102
}
103
}
104
105
m = PHYS_TO_VM_PAGE(l1e & ~ATTR_MASK);
106
vm_page_unwire_noq(m);
107
vm_page_free(m);
108
}
109
110
static void
111
vmmpmap_release_l1(pd_entry_t l0e)
112
{
113
pt_entry_t *l1;
114
vm_page_t m;
115
int i;
116
117
l1 = (pd_entry_t *)PHYS_TO_DMAP(l0e & ~ATTR_MASK);
118
for (i = 0; i < Ln_ENTRIES; i++) {
119
if (l1[i] != 0) {
120
vmmpmap_release_l2(l1[i]);
121
}
122
}
123
124
m = PHYS_TO_VM_PAGE(l0e & ~ATTR_MASK);
125
vm_page_unwire_noq(m);
126
vm_page_free(m);
127
}
128
129
void
130
vmmpmap_fini(void)
131
{
132
vm_page_t m;
133
int i;
134
135
/* Remove the remaining entries */
136
for (i = 0; i < L0_ENTRIES; i++) {
137
if (l0[i] != 0) {
138
vmmpmap_release_l1(l0[i]);
139
}
140
}
141
142
m = PHYS_TO_VM_PAGE(l0_paddr);
143
vm_page_unwire_noq(m);
144
vm_page_free(m);
145
146
mtx_destroy(&vmmpmap_mtx);
147
}
148
149
uint64_t
150
vmmpmap_to_ttbr0(void)
151
{
152
153
return (l0_paddr);
154
}
155
156
/* Returns a pointer to the level 1 table, allocating if needed. */
157
static pt_entry_t *
158
vmmpmap_l1_table(vm_offset_t va)
159
{
160
pt_entry_t new_l0e, l0e, *l1;
161
vm_page_t m;
162
int rv;
163
164
m = NULL;
165
again:
166
l0e = atomic_load_64(&l0[pmap_l0_index(va)]);
167
if ((l0e & ATTR_DESCR_VALID) == 0) {
168
/* Allocate a page for the level 1 table */
169
if (m == NULL) {
170
m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO);
171
if (m == NULL)
172
return (NULL);
173
}
174
175
new_l0e = VM_PAGE_TO_PHYS(m) | L0_TABLE;
176
177
mtx_lock(&vmmpmap_mtx);
178
rv = atomic_cmpset_64(&l0[pmap_l0_index(va)], l0e, new_l0e);
179
mtx_unlock(&vmmpmap_mtx);
180
/* We may have raced another thread, try again */
181
if (rv == 0)
182
goto again;
183
184
/* The cmpset succeeded */
185
l0e = new_l0e;
186
} else if (m != NULL) {
187
/* We allocated a page that wasn't used */
188
vm_page_unwire_noq(m);
189
vm_page_free_zero(m);
190
}
191
192
l1 = (pd_entry_t *)PHYS_TO_DMAP(l0e & ~ATTR_MASK);
193
return (l1);
194
}
195
196
static pt_entry_t *
197
vmmpmap_l2_table(vm_offset_t va)
198
{
199
pt_entry_t new_l1e, l1e, *l1, *l2;
200
vm_page_t m;
201
int rv;
202
203
l1 = vmmpmap_l1_table(va);
204
if (l1 == NULL)
205
return (NULL);
206
207
m = NULL;
208
again:
209
l1e = atomic_load_64(&l1[pmap_l1_index(va)]);
210
if ((l1e & ATTR_DESCR_VALID) == 0) {
211
/* Allocate a page for the level 2 table */
212
if (m == NULL) {
213
m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO);
214
if (m == NULL)
215
return (NULL);
216
}
217
218
new_l1e = VM_PAGE_TO_PHYS(m) | L1_TABLE;
219
220
mtx_lock(&vmmpmap_mtx);
221
rv = atomic_cmpset_64(&l1[pmap_l1_index(va)], l1e, new_l1e);
222
mtx_unlock(&vmmpmap_mtx);
223
/* We may have raced another thread, try again */
224
if (rv == 0)
225
goto again;
226
227
/* The cmpset succeeded */
228
l1e = new_l1e;
229
} else if (m != NULL) {
230
/* We allocated a page that wasn't used */
231
vm_page_unwire_noq(m);
232
vm_page_free_zero(m);
233
}
234
235
l2 = (pd_entry_t *)PHYS_TO_DMAP(l1e & ~ATTR_MASK);
236
return (l2);
237
}
238
239
static pd_entry_t *
240
vmmpmap_l3_table(vm_offset_t va)
241
{
242
pt_entry_t new_l2e, l2e, *l2, *l3;
243
vm_page_t m;
244
int rv;
245
246
l2 = vmmpmap_l2_table(va);
247
if (l2 == NULL)
248
return (NULL);
249
250
m = NULL;
251
again:
252
l2e = atomic_load_64(&l2[pmap_l2_index(va)]);
253
if ((l2e & ATTR_DESCR_VALID) == 0) {
254
/* Allocate a page for the level 3 table */
255
if (m == NULL) {
256
m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_ZERO);
257
if (m == NULL)
258
return (NULL);
259
}
260
261
new_l2e = VM_PAGE_TO_PHYS(m) | L2_TABLE;
262
263
mtx_lock(&vmmpmap_mtx);
264
rv = atomic_cmpset_64(&l2[pmap_l2_index(va)], l2e, new_l2e);
265
mtx_unlock(&vmmpmap_mtx);
266
/* We may have raced another thread, try again */
267
if (rv == 0)
268
goto again;
269
270
/* The cmpset succeeded */
271
l2e = new_l2e;
272
} else if (m != NULL) {
273
/* We allocated a page that wasn't used */
274
vm_page_unwire_noq(m);
275
vm_page_free_zero(m);
276
}
277
278
l3 = (pt_entry_t *)PHYS_TO_DMAP(l2e & ~ATTR_MASK);
279
return (l3);
280
}
281
282
/*
283
* Creates an EL2 entry in the hyp_pmap. Similar to pmap_kenter.
284
*/
285
bool
286
vmmpmap_enter(vm_offset_t va, vm_size_t size, vm_paddr_t pa, vm_prot_t prot)
287
{
288
pd_entry_t l3e, *l3;
289
290
KASSERT((pa & L3_OFFSET) == 0,
291
("%s: Invalid physical address", __func__));
292
KASSERT((va & L3_OFFSET) == 0,
293
("%s: Invalid virtual address", __func__));
294
KASSERT((size & PAGE_MASK) == 0,
295
("%s: Mapping is not page-sized", __func__));
296
297
l3e = ATTR_AF | ATTR_SH(ATTR_SH_IS) | L3_PAGE;
298
/* This bit is res1 at EL2 */
299
l3e |= ATTR_S1_AP(ATTR_S1_AP_USER);
300
/* Only normal memory is used at EL2 */
301
l3e |= ATTR_S1_IDX(VM_MEMATTR_DEFAULT);
302
303
if ((prot & VM_PROT_EXECUTE) == 0) {
304
/* PXN is res0 at EL2. UXN is XN */
305
l3e |= ATTR_S1_UXN;
306
}
307
if ((prot & VM_PROT_WRITE) == 0) {
308
l3e |= ATTR_S1_AP(ATTR_S1_AP_RO);
309
}
310
311
while (size > 0) {
312
l3 = vmmpmap_l3_table(va);
313
if (l3 == NULL)
314
return (false);
315
316
#ifdef INVARIANTS
317
/*
318
* Ensure no other threads can write to l3 between the KASSERT
319
* and store.
320
*/
321
mtx_lock(&vmmpmap_mtx);
322
#endif
323
KASSERT(atomic_load_64(&l3[pmap_l3_index(va)]) == 0,
324
("%s: VA already mapped", __func__));
325
326
atomic_store_64(&l3[pmap_l3_index(va)], l3e | pa);
327
#ifdef INVARIANTS
328
mtx_unlock(&vmmpmap_mtx);
329
#endif
330
331
size -= PAGE_SIZE;
332
pa += PAGE_SIZE;
333
va += PAGE_SIZE;
334
}
335
336
return (true);
337
}
338
339
void
340
vmmpmap_remove(vm_offset_t va, vm_size_t size, bool invalidate)
341
{
342
pt_entry_t l0e, *l1, l1e, *l2, l2e;
343
pd_entry_t *l3, l3e, **l3_list;
344
vm_offset_t eva, va_next, sva;
345
size_t i;
346
347
KASSERT((va & L3_OFFSET) == 0,
348
("%s: Invalid virtual address", __func__));
349
KASSERT((size & PAGE_MASK) == 0,
350
("%s: Mapping is not page-sized", __func__));
351
352
if (invalidate) {
353
l3_list = malloc((size / PAGE_SIZE) * sizeof(l3_list[0]),
354
M_TEMP, M_WAITOK | M_ZERO);
355
}
356
357
sva = va;
358
eva = va + size;
359
mtx_lock(&vmmpmap_mtx);
360
for (i = 0; va < eva; va = va_next) {
361
l0e = atomic_load_64(&l0[pmap_l0_index(va)]);
362
if (l0e == 0) {
363
va_next = (va + L0_SIZE) & ~L0_OFFSET;
364
if (va_next < va)
365
va_next = eva;
366
continue;
367
}
368
MPASS((l0e & ATTR_DESCR_MASK) == L0_TABLE);
369
370
l1 = (pd_entry_t *)PHYS_TO_DMAP(l0e & ~ATTR_MASK);
371
l1e = atomic_load_64(&l1[pmap_l1_index(va)]);
372
if (l1e == 0) {
373
va_next = (va + L1_SIZE) & ~L1_OFFSET;
374
if (va_next < va)
375
va_next = eva;
376
continue;
377
}
378
MPASS((l1e & ATTR_DESCR_MASK) == L1_TABLE);
379
380
l2 = (pd_entry_t *)PHYS_TO_DMAP(l1e & ~ATTR_MASK);
381
l2e = atomic_load_64(&l2[pmap_l2_index(va)]);
382
if (l2e == 0) {
383
va_next = (va + L2_SIZE) & ~L2_OFFSET;
384
if (va_next < va)
385
va_next = eva;
386
continue;
387
}
388
MPASS((l2e & ATTR_DESCR_MASK) == L2_TABLE);
389
390
l3 = (pd_entry_t *)PHYS_TO_DMAP(l2e & ~ATTR_MASK);
391
if (invalidate) {
392
l3e = atomic_load_64(&l3[pmap_l3_index(va)]);
393
MPASS(l3e != 0);
394
/*
395
* Mark memory as read-only so we can invalidate
396
* the cache.
397
*/
398
l3e &= ~ATTR_S1_AP_MASK;
399
l3e |= ATTR_S1_AP(ATTR_S1_AP_RO);
400
atomic_store_64(&l3[pmap_l3_index(va)], l3e);
401
402
l3_list[i] = &l3[pmap_l3_index(va)];
403
i++;
404
} else {
405
/*
406
* The caller is responsible for clearing the cache &
407
* handling the TLB
408
*/
409
atomic_store_64(&l3[pmap_l3_index(va)], 0);
410
}
411
412
va_next = (va + L3_SIZE) & ~L3_OFFSET;
413
if (va_next < va)
414
va_next = eva;
415
}
416
mtx_unlock(&vmmpmap_mtx);
417
418
if (invalidate) {
419
/* Invalidate the memory from the D-cache */
420
vmm_call_hyp(HYP_DC_CIVAC, sva, size);
421
422
for (i = 0; i < (size / PAGE_SIZE); i++) {
423
atomic_store_64(l3_list[i], 0);
424
}
425
426
vmm_call_hyp(HYP_EL2_TLBI, HYP_EL2_TLBI_VA, sva, size);
427
428
free(l3_list, M_TEMP);
429
}
430
}
431
432