Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/powerpc/kvm/book3s_mmu_hpte.c
10820 views
1
/*
2
* Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3
*
4
* Authors:
5
* Alexander Graf <[email protected]>
6
*
7
* This program is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License, version 2, as
9
* published by the Free Software Foundation.
10
*
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
15
*
16
* You should have received a copy of the GNU General Public License
17
* along with this program; if not, write to the Free Software
18
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19
*/
20
21
#include <linux/kvm_host.h>
22
#include <linux/hash.h>
23
#include <linux/slab.h>
24
#include "trace.h"
25
26
#include <asm/kvm_ppc.h>
27
#include <asm/kvm_book3s.h>
28
#include <asm/machdep.h>
29
#include <asm/mmu_context.h>
30
#include <asm/hw_irq.h>
31
32
#define PTE_SIZE 12
33
34
static struct kmem_cache *hpte_cache;
35
36
static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
37
{
38
return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
39
}
40
41
static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
42
{
43
return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
44
HPTEG_HASH_BITS_PTE_LONG);
45
}
46
47
static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
48
{
49
return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
50
}
51
52
static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
53
{
54
return hash_64((vpage & 0xffffff000ULL) >> 12,
55
HPTEG_HASH_BITS_VPTE_LONG);
56
}
57
58
void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
59
{
60
u64 index;
61
62
trace_kvm_book3s_mmu_map(pte);
63
64
spin_lock(&vcpu->arch.mmu_lock);
65
66
/* Add to ePTE list */
67
index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
68
hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]);
69
70
/* Add to ePTE_long list */
71
index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
72
hlist_add_head_rcu(&pte->list_pte_long,
73
&vcpu->arch.hpte_hash_pte_long[index]);
74
75
/* Add to vPTE list */
76
index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
77
hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]);
78
79
/* Add to vPTE_long list */
80
index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
81
hlist_add_head_rcu(&pte->list_vpte_long,
82
&vcpu->arch.hpte_hash_vpte_long[index]);
83
84
spin_unlock(&vcpu->arch.mmu_lock);
85
}
86
87
static void free_pte_rcu(struct rcu_head *head)
88
{
89
struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
90
kmem_cache_free(hpte_cache, pte);
91
}
92
93
static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
94
{
95
trace_kvm_book3s_mmu_invalidate(pte);
96
97
/* Different for 32 and 64 bit */
98
kvmppc_mmu_invalidate_pte(vcpu, pte);
99
100
spin_lock(&vcpu->arch.mmu_lock);
101
102
/* pte already invalidated in between? */
103
if (hlist_unhashed(&pte->list_pte)) {
104
spin_unlock(&vcpu->arch.mmu_lock);
105
return;
106
}
107
108
hlist_del_init_rcu(&pte->list_pte);
109
hlist_del_init_rcu(&pte->list_pte_long);
110
hlist_del_init_rcu(&pte->list_vpte);
111
hlist_del_init_rcu(&pte->list_vpte_long);
112
113
if (pte->pte.may_write)
114
kvm_release_pfn_dirty(pte->pfn);
115
else
116
kvm_release_pfn_clean(pte->pfn);
117
118
spin_unlock(&vcpu->arch.mmu_lock);
119
120
vcpu->arch.hpte_cache_count--;
121
call_rcu(&pte->rcu_head, free_pte_rcu);
122
}
123
124
static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
125
{
126
struct hpte_cache *pte;
127
struct hlist_node *node;
128
int i;
129
130
rcu_read_lock();
131
132
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
133
struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
134
135
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
136
invalidate_pte(vcpu, pte);
137
}
138
139
rcu_read_unlock();
140
}
141
142
static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
143
{
144
struct hlist_head *list;
145
struct hlist_node *node;
146
struct hpte_cache *pte;
147
148
/* Find the list of entries in the map */
149
list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
150
151
rcu_read_lock();
152
153
/* Check the list for matching entries and invalidate */
154
hlist_for_each_entry_rcu(pte, node, list, list_pte)
155
if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
156
invalidate_pte(vcpu, pte);
157
158
rcu_read_unlock();
159
}
160
161
static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
162
{
163
struct hlist_head *list;
164
struct hlist_node *node;
165
struct hpte_cache *pte;
166
167
/* Find the list of entries in the map */
168
list = &vcpu->arch.hpte_hash_pte_long[
169
kvmppc_mmu_hash_pte_long(guest_ea)];
170
171
rcu_read_lock();
172
173
/* Check the list for matching entries and invalidate */
174
hlist_for_each_entry_rcu(pte, node, list, list_pte_long)
175
if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
176
invalidate_pte(vcpu, pte);
177
178
rcu_read_unlock();
179
}
180
181
void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
182
{
183
trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
184
guest_ea &= ea_mask;
185
186
switch (ea_mask) {
187
case ~0xfffUL:
188
kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
189
break;
190
case 0x0ffff000:
191
kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
192
break;
193
case 0:
194
/* Doing a complete flush -> start from scratch */
195
kvmppc_mmu_pte_flush_all(vcpu);
196
break;
197
default:
198
WARN_ON(1);
199
break;
200
}
201
}
202
203
/* Flush with mask 0xfffffffff */
204
static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
205
{
206
struct hlist_head *list;
207
struct hlist_node *node;
208
struct hpte_cache *pte;
209
u64 vp_mask = 0xfffffffffULL;
210
211
list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
212
213
rcu_read_lock();
214
215
/* Check the list for matching entries and invalidate */
216
hlist_for_each_entry_rcu(pte, node, list, list_vpte)
217
if ((pte->pte.vpage & vp_mask) == guest_vp)
218
invalidate_pte(vcpu, pte);
219
220
rcu_read_unlock();
221
}
222
223
/* Flush with mask 0xffffff000 */
224
static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
225
{
226
struct hlist_head *list;
227
struct hlist_node *node;
228
struct hpte_cache *pte;
229
u64 vp_mask = 0xffffff000ULL;
230
231
list = &vcpu->arch.hpte_hash_vpte_long[
232
kvmppc_mmu_hash_vpte_long(guest_vp)];
233
234
rcu_read_lock();
235
236
/* Check the list for matching entries and invalidate */
237
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
238
if ((pte->pte.vpage & vp_mask) == guest_vp)
239
invalidate_pte(vcpu, pte);
240
241
rcu_read_unlock();
242
}
243
244
void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
245
{
246
trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
247
guest_vp &= vp_mask;
248
249
switch(vp_mask) {
250
case 0xfffffffffULL:
251
kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
252
break;
253
case 0xffffff000ULL:
254
kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
255
break;
256
default:
257
WARN_ON(1);
258
return;
259
}
260
}
261
262
void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
263
{
264
struct hlist_node *node;
265
struct hpte_cache *pte;
266
int i;
267
268
trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
269
270
rcu_read_lock();
271
272
for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
273
struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i];
274
275
hlist_for_each_entry_rcu(pte, node, list, list_vpte_long)
276
if ((pte->pte.raddr >= pa_start) &&
277
(pte->pte.raddr < pa_end))
278
invalidate_pte(vcpu, pte);
279
}
280
281
rcu_read_unlock();
282
}
283
284
struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
285
{
286
struct hpte_cache *pte;
287
288
pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
289
vcpu->arch.hpte_cache_count++;
290
291
if (vcpu->arch.hpte_cache_count == HPTEG_CACHE_NUM)
292
kvmppc_mmu_pte_flush_all(vcpu);
293
294
return pte;
295
}
296
297
void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
298
{
299
kvmppc_mmu_pte_flush(vcpu, 0, 0);
300
}
301
302
static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
303
{
304
int i;
305
306
for (i = 0; i < len; i++)
307
INIT_HLIST_HEAD(&hash_list[i]);
308
}
309
310
int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
311
{
312
/* init hpte lookup hashes */
313
kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte,
314
ARRAY_SIZE(vcpu->arch.hpte_hash_pte));
315
kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long,
316
ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long));
317
kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte,
318
ARRAY_SIZE(vcpu->arch.hpte_hash_vpte));
319
kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long,
320
ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long));
321
322
spin_lock_init(&vcpu->arch.mmu_lock);
323
324
return 0;
325
}
326
327
int kvmppc_mmu_hpte_sysinit(void)
328
{
329
/* init hpte slab cache */
330
hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
331
sizeof(struct hpte_cache), 0, NULL);
332
333
return 0;
334
}
335
336
void kvmppc_mmu_hpte_sysexit(void)
337
{
338
kmem_cache_destroy(hpte_cache);
339
}
340
341