Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/virt/kvm/ioapic.c
10817 views
1
/*
2
* Copyright (C) 2001 MandrakeSoft S.A.
3
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
4
*
5
* MandrakeSoft S.A.
6
* 43, rue d'Aboukir
7
* 75002 Paris - France
8
* http://www.linux-mandrake.com/
9
* http://www.mandrakesoft.com/
10
*
11
* This library is free software; you can redistribute it and/or
12
* modify it under the terms of the GNU Lesser General Public
13
* License as published by the Free Software Foundation; either
14
* version 2 of the License, or (at your option) any later version.
15
*
16
* This library is distributed in the hope that it will be useful,
17
* but WITHOUT ANY WARRANTY; without even the implied warranty of
18
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19
* Lesser General Public License for more details.
20
*
21
* You should have received a copy of the GNU Lesser General Public
22
* License along with this library; if not, write to the Free Software
23
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24
*
25
* Yunhong Jiang <[email protected]>
26
* Yaozu (Eddie) Dong <[email protected]>
27
* Based on Xen 3.1 code.
28
*/
29
30
#include <linux/kvm_host.h>
31
#include <linux/kvm.h>
32
#include <linux/mm.h>
33
#include <linux/highmem.h>
34
#include <linux/smp.h>
35
#include <linux/hrtimer.h>
36
#include <linux/io.h>
37
#include <linux/slab.h>
38
#include <asm/processor.h>
39
#include <asm/page.h>
40
#include <asm/current.h>
41
#include <trace/events/kvm.h>
42
43
#include "ioapic.h"
44
#include "lapic.h"
45
#include "irq.h"
46
47
#if 0
48
#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
49
#else
50
#define ioapic_debug(fmt, arg...)
51
#endif
52
static int ioapic_deliver(struct kvm_ioapic *vioapic, int irq);
53
54
static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
55
unsigned long addr,
56
unsigned long length)
57
{
58
unsigned long result = 0;
59
60
switch (ioapic->ioregsel) {
61
case IOAPIC_REG_VERSION:
62
result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
63
| (IOAPIC_VERSION_ID & 0xff));
64
break;
65
66
case IOAPIC_REG_APIC_ID:
67
case IOAPIC_REG_ARB_ID:
68
result = ((ioapic->id & 0xf) << 24);
69
break;
70
71
default:
72
{
73
u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
74
u64 redir_content;
75
76
ASSERT(redir_index < IOAPIC_NUM_PINS);
77
78
redir_content = ioapic->redirtbl[redir_index].bits;
79
result = (ioapic->ioregsel & 0x1) ?
80
(redir_content >> 32) & 0xffffffff :
81
redir_content & 0xffffffff;
82
break;
83
}
84
}
85
86
return result;
87
}
88
89
static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
90
{
91
union kvm_ioapic_redirect_entry *pent;
92
int injected = -1;
93
94
pent = &ioapic->redirtbl[idx];
95
96
if (!pent->fields.mask) {
97
injected = ioapic_deliver(ioapic, idx);
98
if (injected && pent->fields.trig_mode == IOAPIC_LEVEL_TRIG)
99
pent->fields.remote_irr = 1;
100
}
101
102
return injected;
103
}
104
105
static void update_handled_vectors(struct kvm_ioapic *ioapic)
106
{
107
DECLARE_BITMAP(handled_vectors, 256);
108
int i;
109
110
memset(handled_vectors, 0, sizeof(handled_vectors));
111
for (i = 0; i < IOAPIC_NUM_PINS; ++i)
112
__set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
113
memcpy(ioapic->handled_vectors, handled_vectors,
114
sizeof(handled_vectors));
115
smp_wmb();
116
}
117
118
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
119
{
120
unsigned index;
121
bool mask_before, mask_after;
122
union kvm_ioapic_redirect_entry *e;
123
124
switch (ioapic->ioregsel) {
125
case IOAPIC_REG_VERSION:
126
/* Writes are ignored. */
127
break;
128
129
case IOAPIC_REG_APIC_ID:
130
ioapic->id = (val >> 24) & 0xf;
131
break;
132
133
case IOAPIC_REG_ARB_ID:
134
break;
135
136
default:
137
index = (ioapic->ioregsel - 0x10) >> 1;
138
139
ioapic_debug("change redir index %x val %x\n", index, val);
140
if (index >= IOAPIC_NUM_PINS)
141
return;
142
e = &ioapic->redirtbl[index];
143
mask_before = e->fields.mask;
144
if (ioapic->ioregsel & 1) {
145
e->bits &= 0xffffffff;
146
e->bits |= (u64) val << 32;
147
} else {
148
e->bits &= ~0xffffffffULL;
149
e->bits |= (u32) val;
150
e->fields.remote_irr = 0;
151
}
152
update_handled_vectors(ioapic);
153
mask_after = e->fields.mask;
154
if (mask_before != mask_after)
155
kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
156
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
157
&& ioapic->irr & (1 << index))
158
ioapic_service(ioapic, index);
159
break;
160
}
161
}
162
163
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
164
{
165
union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
166
struct kvm_lapic_irq irqe;
167
168
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
169
"vector=%x trig_mode=%x\n",
170
entry->fields.dest_id, entry->fields.dest_mode,
171
entry->fields.delivery_mode, entry->fields.vector,
172
entry->fields.trig_mode);
173
174
irqe.dest_id = entry->fields.dest_id;
175
irqe.vector = entry->fields.vector;
176
irqe.dest_mode = entry->fields.dest_mode;
177
irqe.trig_mode = entry->fields.trig_mode;
178
irqe.delivery_mode = entry->fields.delivery_mode << 8;
179
irqe.level = 1;
180
irqe.shorthand = 0;
181
182
#ifdef CONFIG_X86
183
/* Always delivery PIT interrupt to vcpu 0 */
184
if (irq == 0) {
185
irqe.dest_mode = 0; /* Physical mode. */
186
/* need to read apic_id from apic regiest since
187
* it can be rewritten */
188
irqe.dest_id = ioapic->kvm->bsp_vcpu->vcpu_id;
189
}
190
#endif
191
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
192
}
193
194
int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
195
{
196
u32 old_irr;
197
u32 mask = 1 << irq;
198
union kvm_ioapic_redirect_entry entry;
199
int ret = 1;
200
201
spin_lock(&ioapic->lock);
202
old_irr = ioapic->irr;
203
if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
204
entry = ioapic->redirtbl[irq];
205
level ^= entry.fields.polarity;
206
if (!level)
207
ioapic->irr &= ~mask;
208
else {
209
int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
210
ioapic->irr |= mask;
211
if ((edge && old_irr != ioapic->irr) ||
212
(!edge && !entry.fields.remote_irr))
213
ret = ioapic_service(ioapic, irq);
214
else
215
ret = 0; /* report coalesced interrupt */
216
}
217
trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
218
}
219
spin_unlock(&ioapic->lock);
220
221
return ret;
222
}
223
224
static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
225
int trigger_mode)
226
{
227
int i;
228
229
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
230
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
231
232
if (ent->fields.vector != vector)
233
continue;
234
235
/*
236
* We are dropping lock while calling ack notifiers because ack
237
* notifier callbacks for assigned devices call into IOAPIC
238
* recursively. Since remote_irr is cleared only after call
239
* to notifiers if the same vector will be delivered while lock
240
* is dropped it will be put into irr and will be delivered
241
* after ack notifier returns.
242
*/
243
spin_unlock(&ioapic->lock);
244
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
245
spin_lock(&ioapic->lock);
246
247
if (trigger_mode != IOAPIC_LEVEL_TRIG)
248
continue;
249
250
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
251
ent->fields.remote_irr = 0;
252
if (!ent->fields.mask && (ioapic->irr & (1 << i)))
253
ioapic_service(ioapic, i);
254
}
255
}
256
257
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
258
{
259
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
260
261
smp_rmb();
262
if (!test_bit(vector, ioapic->handled_vectors))
263
return;
264
spin_lock(&ioapic->lock);
265
__kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
266
spin_unlock(&ioapic->lock);
267
}
268
269
static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
270
{
271
return container_of(dev, struct kvm_ioapic, dev);
272
}
273
274
static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
275
{
276
return ((addr >= ioapic->base_address &&
277
(addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
278
}
279
280
static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
281
void *val)
282
{
283
struct kvm_ioapic *ioapic = to_ioapic(this);
284
u32 result;
285
if (!ioapic_in_range(ioapic, addr))
286
return -EOPNOTSUPP;
287
288
ioapic_debug("addr %lx\n", (unsigned long)addr);
289
ASSERT(!(addr & 0xf)); /* check alignment */
290
291
addr &= 0xff;
292
spin_lock(&ioapic->lock);
293
switch (addr) {
294
case IOAPIC_REG_SELECT:
295
result = ioapic->ioregsel;
296
break;
297
298
case IOAPIC_REG_WINDOW:
299
result = ioapic_read_indirect(ioapic, addr, len);
300
break;
301
302
default:
303
result = 0;
304
break;
305
}
306
spin_unlock(&ioapic->lock);
307
308
switch (len) {
309
case 8:
310
*(u64 *) val = result;
311
break;
312
case 1:
313
case 2:
314
case 4:
315
memcpy(val, (char *)&result, len);
316
break;
317
default:
318
printk(KERN_WARNING "ioapic: wrong length %d\n", len);
319
}
320
return 0;
321
}
322
323
static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
324
const void *val)
325
{
326
struct kvm_ioapic *ioapic = to_ioapic(this);
327
u32 data;
328
if (!ioapic_in_range(ioapic, addr))
329
return -EOPNOTSUPP;
330
331
ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
332
(void*)addr, len, val);
333
ASSERT(!(addr & 0xf)); /* check alignment */
334
335
if (len == 4 || len == 8)
336
data = *(u32 *) val;
337
else {
338
printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
339
return 0;
340
}
341
342
addr &= 0xff;
343
spin_lock(&ioapic->lock);
344
switch (addr) {
345
case IOAPIC_REG_SELECT:
346
ioapic->ioregsel = data;
347
break;
348
349
case IOAPIC_REG_WINDOW:
350
ioapic_write_indirect(ioapic, data);
351
break;
352
#ifdef CONFIG_IA64
353
case IOAPIC_REG_EOI:
354
__kvm_ioapic_update_eoi(ioapic, data, IOAPIC_LEVEL_TRIG);
355
break;
356
#endif
357
358
default:
359
break;
360
}
361
spin_unlock(&ioapic->lock);
362
return 0;
363
}
364
365
void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
366
{
367
int i;
368
369
for (i = 0; i < IOAPIC_NUM_PINS; i++)
370
ioapic->redirtbl[i].fields.mask = 1;
371
ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
372
ioapic->ioregsel = 0;
373
ioapic->irr = 0;
374
ioapic->id = 0;
375
update_handled_vectors(ioapic);
376
}
377
378
static const struct kvm_io_device_ops ioapic_mmio_ops = {
379
.read = ioapic_mmio_read,
380
.write = ioapic_mmio_write,
381
};
382
383
int kvm_ioapic_init(struct kvm *kvm)
384
{
385
struct kvm_ioapic *ioapic;
386
int ret;
387
388
ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
389
if (!ioapic)
390
return -ENOMEM;
391
spin_lock_init(&ioapic->lock);
392
kvm->arch.vioapic = ioapic;
393
kvm_ioapic_reset(ioapic);
394
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
395
ioapic->kvm = kvm;
396
mutex_lock(&kvm->slots_lock);
397
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
398
mutex_unlock(&kvm->slots_lock);
399
if (ret < 0) {
400
kvm->arch.vioapic = NULL;
401
kfree(ioapic);
402
}
403
404
return ret;
405
}
406
407
void kvm_ioapic_destroy(struct kvm *kvm)
408
{
409
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
410
411
if (ioapic) {
412
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
413
kvm->arch.vioapic = NULL;
414
kfree(ioapic);
415
}
416
}
417
418
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
419
{
420
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
421
if (!ioapic)
422
return -EINVAL;
423
424
spin_lock(&ioapic->lock);
425
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
426
spin_unlock(&ioapic->lock);
427
return 0;
428
}
429
430
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
431
{
432
struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
433
if (!ioapic)
434
return -EINVAL;
435
436
spin_lock(&ioapic->lock);
437
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
438
update_handled_vectors(ioapic);
439
spin_unlock(&ioapic->lock);
440
return 0;
441
}
442
443