Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/kvm/book3s_32_mmu_host.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4
*
5
* Authors:
6
* Alexander Graf <[email protected]>
7
*/
8
9
#include <linux/kvm_host.h>
10
11
#include <asm/kvm_ppc.h>
12
#include <asm/kvm_book3s.h>
13
#include <asm/book3s/32/mmu-hash.h>
14
#include <asm/machdep.h>
15
#include <asm/mmu_context.h>
16
#include <asm/hw_irq.h>
17
#include "book3s.h"
18
19
/* #define DEBUG_MMU */
20
/* #define DEBUG_SR */
21
22
#ifdef DEBUG_MMU
23
#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
24
#else
25
#define dprintk_mmu(a, ...) do { } while(0)
26
#endif
27
28
#ifdef DEBUG_SR
29
#define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
30
#else
31
#define dprintk_sr(a, ...) do { } while(0)
32
#endif
33
34
#if PAGE_SHIFT != 12
35
#error Unknown page size
36
#endif
37
38
#ifdef CONFIG_SMP
39
#error XXX need to grab mmu_hash_lock
40
#endif
41
42
#ifdef CONFIG_PTE_64BIT
43
#error Only 32 bit pages are supported for now
44
#endif
45
46
static ulong htab;
47
static u32 htabmask;
48
49
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
50
{
51
volatile u32 *pteg;
52
53
/* Remove from host HTAB */
54
pteg = (u32*)pte->slot;
55
pteg[0] = 0;
56
57
/* And make sure it's gone from the TLB too */
58
asm volatile ("sync");
59
asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
60
asm volatile ("sync");
61
asm volatile ("tlbsync");
62
}
63
64
/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
65
* a hash, so we don't waste cycles on looping */
66
static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
67
{
68
return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
69
((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
70
((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
71
((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
72
((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
73
((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
74
((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
75
((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
76
}
77
78
79
static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
80
{
81
struct kvmppc_sid_map *map;
82
u16 sid_map_mask;
83
84
if (kvmppc_get_msr(vcpu) & MSR_PR)
85
gvsid |= VSID_PR;
86
87
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
88
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
89
if (map->guest_vsid == gvsid) {
90
dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
91
gvsid, map->host_vsid);
92
return map;
93
}
94
95
map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
96
if (map->guest_vsid == gvsid) {
97
dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
98
gvsid, map->host_vsid);
99
return map;
100
}
101
102
dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
103
return NULL;
104
}
105
106
static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
107
bool primary)
108
{
109
u32 page, hash;
110
ulong pteg = htab;
111
112
page = (eaddr & ~ESID_MASK) >> 12;
113
114
hash = ((vsid ^ page) << 6);
115
if (!primary)
116
hash = ~hash;
117
118
hash &= htabmask;
119
120
pteg |= hash;
121
122
dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
123
htab, hash, htabmask, pteg);
124
125
return (u32*)pteg;
126
}
127
128
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
129
bool iswrite)
130
{
131
struct page *page;
132
kvm_pfn_t hpaddr;
133
u64 vpn;
134
u64 vsid;
135
struct kvmppc_sid_map *map;
136
volatile u32 *pteg;
137
u32 eaddr = orig_pte->eaddr;
138
u32 pteg0, pteg1;
139
register int rr = 0;
140
bool primary = false;
141
bool evict = false;
142
struct hpte_cache *pte;
143
int r = 0;
144
bool writable;
145
146
/* Get host physical address for gpa */
147
hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable, &page);
148
if (is_error_noslot_pfn(hpaddr)) {
149
printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
150
orig_pte->raddr);
151
r = -EINVAL;
152
goto out;
153
}
154
hpaddr <<= PAGE_SHIFT;
155
156
/* and write the mapping ea -> hpa into the pt */
157
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
158
map = find_sid_vsid(vcpu, vsid);
159
if (!map) {
160
kvmppc_mmu_map_segment(vcpu, eaddr);
161
map = find_sid_vsid(vcpu, vsid);
162
}
163
BUG_ON(!map);
164
165
vsid = map->host_vsid;
166
vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
167
((eaddr & ~ESID_MASK) >> VPN_SHIFT);
168
next_pteg:
169
if (rr == 16) {
170
primary = !primary;
171
evict = true;
172
rr = 0;
173
}
174
175
pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
176
177
/* not evicting yet */
178
if (!evict && (pteg[rr] & PTE_V)) {
179
rr += 2;
180
goto next_pteg;
181
}
182
183
dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
184
dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
185
dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
186
dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
187
dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
188
dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
189
dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
190
dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
191
dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
192
193
pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
194
(primary ? 0 : PTE_SEC);
195
pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
196
197
if (orig_pte->may_write && writable) {
198
pteg1 |= PP_RWRW;
199
mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
200
} else {
201
pteg1 |= PP_RWRX;
202
}
203
204
if (orig_pte->may_execute)
205
kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
206
207
local_irq_disable();
208
209
if (pteg[rr]) {
210
pteg[rr] = 0;
211
asm volatile ("sync");
212
}
213
pteg[rr + 1] = pteg1;
214
pteg[rr] = pteg0;
215
asm volatile ("sync");
216
217
local_irq_enable();
218
219
dprintk_mmu("KVM: new PTEG: %p\n", pteg);
220
dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
221
dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
222
dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
223
dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
224
dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
225
dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
226
dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
227
dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
228
229
230
/* Now tell our Shadow PTE code about the new page */
231
232
pte = kvmppc_mmu_hpte_cache_next(vcpu);
233
if (!pte) {
234
kvm_release_page_unused(page);
235
r = -EAGAIN;
236
goto out;
237
}
238
239
dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
240
orig_pte->may_write ? 'w' : '-',
241
orig_pte->may_execute ? 'x' : '-',
242
orig_pte->eaddr, (ulong)pteg, vpn,
243
orig_pte->vpage, hpaddr);
244
245
pte->slot = (ulong)&pteg[rr];
246
pte->host_vpn = vpn;
247
pte->pte = *orig_pte;
248
pte->pfn = hpaddr >> PAGE_SHIFT;
249
250
kvmppc_mmu_hpte_cache_map(vcpu, pte);
251
252
kvm_release_page_clean(page);
253
out:
254
return r;
255
}
256
257
void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
258
{
259
kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
260
}
261
262
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
263
{
264
struct kvmppc_sid_map *map;
265
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
266
u16 sid_map_mask;
267
static int backwards_map = 0;
268
269
if (kvmppc_get_msr(vcpu) & MSR_PR)
270
gvsid |= VSID_PR;
271
272
/* We might get collisions that trap in preceding order, so let's
273
map them differently */
274
275
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
276
if (backwards_map)
277
sid_map_mask = SID_MAP_MASK - sid_map_mask;
278
279
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
280
281
/* Make sure we're taking the other map next time */
282
backwards_map = !backwards_map;
283
284
/* Uh-oh ... out of mappings. Let's flush! */
285
if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
286
vcpu_book3s->vsid_next = 0;
287
memset(vcpu_book3s->sid_map, 0,
288
sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
289
kvmppc_mmu_pte_flush(vcpu, 0, 0);
290
kvmppc_mmu_flush_segments(vcpu);
291
}
292
map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
293
vcpu_book3s->vsid_next++;
294
295
map->guest_vsid = gvsid;
296
map->valid = true;
297
298
return map;
299
}
300
301
int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
302
{
303
u32 esid = eaddr >> SID_SHIFT;
304
u64 gvsid;
305
u32 sr;
306
struct kvmppc_sid_map *map;
307
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
308
int r = 0;
309
310
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
311
/* Invalidate an entry */
312
svcpu->sr[esid] = SR_INVALID;
313
r = -ENOENT;
314
goto out;
315
}
316
317
map = find_sid_vsid(vcpu, gvsid);
318
if (!map)
319
map = create_sid_map(vcpu, gvsid);
320
321
map->guest_esid = esid;
322
sr = map->host_vsid | SR_KP;
323
svcpu->sr[esid] = sr;
324
325
dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
326
327
out:
328
svcpu_put(svcpu);
329
return r;
330
}
331
332
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
333
{
334
int i;
335
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
336
337
dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
338
for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
339
svcpu->sr[i] = SR_INVALID;
340
341
svcpu_put(svcpu);
342
}
343
344
void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
345
{
346
int i;
347
348
kvmppc_mmu_hpte_destroy(vcpu);
349
preempt_disable();
350
for (i = 0; i < SID_CONTEXTS; i++)
351
__destroy_context(to_book3s(vcpu)->context_id[i]);
352
preempt_enable();
353
}
354
355
int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
356
{
357
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
358
int err;
359
ulong sdr1;
360
int i;
361
int j;
362
363
for (i = 0; i < SID_CONTEXTS; i++) {
364
err = __init_new_context();
365
if (err < 0)
366
goto init_fail;
367
vcpu3s->context_id[i] = err;
368
369
/* Remember context id for this combination */
370
for (j = 0; j < 16; j++)
371
vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
372
}
373
374
vcpu3s->vsid_next = 0;
375
376
/* Remember where the HTAB is */
377
asm ( "mfsdr1 %0" : "=r"(sdr1) );
378
htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
379
htab = (ulong)__va(sdr1 & 0xffff0000);
380
381
kvmppc_mmu_hpte_init(vcpu);
382
383
return 0;
384
385
init_fail:
386
for (j = 0; j < i; j++) {
387
if (!vcpu3s->context_id[j])
388
continue;
389
390
__destroy_context(to_book3s(vcpu)->context_id[j]);
391
}
392
393
return -1;
394
}
395
396