Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/ioapic.c
26424 views
1
/*
2
* Copyright (C) 2001 MandrakeSoft S.A.
3
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
4
*
5
* MandrakeSoft S.A.
6
* 43, rue d'Aboukir
7
* 75002 Paris - France
8
* http://www.linux-mandrake.com/
9
* http://www.mandrakesoft.com/
10
*
11
* This library is free software; you can redistribute it and/or
12
* modify it under the terms of the GNU Lesser General Public
13
* License as published by the Free Software Foundation; either
14
* version 2 of the License, or (at your option) any later version.
15
*
16
* This library is distributed in the hope that it will be useful,
17
* but WITHOUT ANY WARRANTY; without even the implied warranty of
18
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19
* Lesser General Public License for more details.
20
*
21
* You should have received a copy of the GNU Lesser General Public
22
* License along with this library; if not, write to the Free Software
23
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24
*
25
* Yunhong Jiang <[email protected]>
26
* Yaozu (Eddie) Dong <[email protected]>
27
* Based on Xen 3.1 code.
28
*/
29
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31
#include <linux/kvm_host.h>
32
#include <linux/kvm.h>
33
#include <linux/mm.h>
34
#include <linux/highmem.h>
35
#include <linux/smp.h>
36
#include <linux/hrtimer.h>
37
#include <linux/io.h>
38
#include <linux/slab.h>
39
#include <linux/export.h>
40
#include <linux/nospec.h>
41
#include <asm/processor.h>
42
#include <asm/page.h>
43
#include <asm/current.h>
44
45
#include "ioapic.h"
46
#include "lapic.h"
47
#include "irq.h"
48
#include "trace.h"
49
50
static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
51
bool line_status);
52
53
static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
54
struct kvm_ioapic *ioapic,
55
int trigger_mode,
56
int pin);
57
58
static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic)
59
{
60
unsigned long result = 0;
61
62
switch (ioapic->ioregsel) {
63
case IOAPIC_REG_VERSION:
64
result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
65
| (IOAPIC_VERSION_ID & 0xff));
66
break;
67
68
case IOAPIC_REG_APIC_ID:
69
case IOAPIC_REG_ARB_ID:
70
result = ((ioapic->id & 0xf) << 24);
71
break;
72
73
default:
74
{
75
u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
76
u64 redir_content = ~0ULL;
77
78
if (redir_index < IOAPIC_NUM_PINS) {
79
u32 index = array_index_nospec(
80
redir_index, IOAPIC_NUM_PINS);
81
82
redir_content = ioapic->redirtbl[index].bits;
83
}
84
85
result = (ioapic->ioregsel & 0x1) ?
86
(redir_content >> 32) & 0xffffffff :
87
redir_content & 0xffffffff;
88
break;
89
}
90
}
91
92
return result;
93
}
94
95
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
96
{
97
ioapic->rtc_status.pending_eoi = 0;
98
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_IDS);
99
}
100
101
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
102
103
static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
104
{
105
if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
106
kvm_rtc_eoi_tracking_restore_all(ioapic);
107
}
108
109
static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
110
{
111
bool new_val, old_val;
112
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
113
struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
114
union kvm_ioapic_redirect_entry *e;
115
116
e = &ioapic->redirtbl[RTC_GSI];
117
if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
118
e->fields.dest_id,
119
kvm_lapic_irq_dest_mode(!!e->fields.dest_mode)))
120
return;
121
122
new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
123
old_val = test_bit(vcpu->vcpu_id, dest_map->map);
124
125
if (new_val == old_val)
126
return;
127
128
if (new_val) {
129
__set_bit(vcpu->vcpu_id, dest_map->map);
130
dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
131
ioapic->rtc_status.pending_eoi++;
132
} else {
133
__clear_bit(vcpu->vcpu_id, dest_map->map);
134
ioapic->rtc_status.pending_eoi--;
135
rtc_status_pending_eoi_check_valid(ioapic);
136
}
137
}
138
139
void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
140
{
141
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
142
143
spin_lock(&ioapic->lock);
144
__rtc_irq_eoi_tracking_restore_one(vcpu);
145
spin_unlock(&ioapic->lock);
146
}
147
148
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
149
{
150
struct kvm_vcpu *vcpu;
151
unsigned long i;
152
153
if (RTC_GSI >= IOAPIC_NUM_PINS)
154
return;
155
156
rtc_irq_eoi_tracking_reset(ioapic);
157
kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
158
__rtc_irq_eoi_tracking_restore_one(vcpu);
159
}
160
161
static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
162
int vector)
163
{
164
struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
165
166
/* RTC special handling */
167
if (test_bit(vcpu->vcpu_id, dest_map->map) &&
168
(vector == dest_map->vectors[vcpu->vcpu_id]) &&
169
(test_and_clear_bit(vcpu->vcpu_id,
170
ioapic->rtc_status.dest_map.map))) {
171
--ioapic->rtc_status.pending_eoi;
172
rtc_status_pending_eoi_check_valid(ioapic);
173
}
174
}
175
176
static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
177
{
178
if (ioapic->rtc_status.pending_eoi > 0)
179
return true; /* coalesced */
180
181
return false;
182
}
183
184
static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
185
{
186
unsigned long i;
187
struct kvm_vcpu *vcpu;
188
union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
189
190
kvm_for_each_vcpu(i, vcpu, ioapic->kvm) {
191
if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
192
entry->fields.dest_id,
193
entry->fields.dest_mode) ||
194
kvm_apic_pending_eoi(vcpu, entry->fields.vector))
195
continue;
196
197
/*
198
* If no longer has pending EOI in LAPICs, update
199
* EOI for this vector.
200
*/
201
rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
202
break;
203
}
204
}
205
206
static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
207
int irq_level, bool line_status)
208
{
209
union kvm_ioapic_redirect_entry entry;
210
u32 mask = 1 << irq;
211
u32 old_irr;
212
int edge, ret;
213
214
entry = ioapic->redirtbl[irq];
215
edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
216
217
if (!irq_level) {
218
ioapic->irr &= ~mask;
219
ret = 1;
220
goto out;
221
}
222
223
/*
224
* AMD SVM AVIC accelerate EOI write iff the interrupt is edge
225
* triggered, in which case the in-kernel IOAPIC will not be able
226
* to receive the EOI. In this case, we do a lazy update of the
227
* pending EOI when trying to set IOAPIC irq.
228
*/
229
if (edge && kvm_apicv_activated(ioapic->kvm))
230
ioapic_lazy_update_eoi(ioapic, irq);
231
232
/*
233
* Return 0 for coalesced interrupts; for edge-triggered interrupts,
234
* this only happens if a previous edge has not been delivered due
235
* to masking. For level interrupts, the remote_irr field tells
236
* us if the interrupt is waiting for an EOI.
237
*
238
* RTC is special: it is edge-triggered, but userspace likes to know
239
* if it has been already ack-ed via EOI because coalesced RTC
240
* interrupts lead to time drift in Windows guests. So we track
241
* EOI manually for the RTC interrupt.
242
*/
243
if (irq == RTC_GSI && line_status &&
244
rtc_irq_check_coalesced(ioapic)) {
245
ret = 0;
246
goto out;
247
}
248
249
old_irr = ioapic->irr;
250
ioapic->irr |= mask;
251
if (edge) {
252
ioapic->irr_delivered &= ~mask;
253
if (old_irr == ioapic->irr) {
254
ret = 0;
255
goto out;
256
}
257
}
258
259
ret = ioapic_service(ioapic, irq, line_status);
260
261
out:
262
trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
263
return ret;
264
}
265
266
static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
267
{
268
u32 idx;
269
270
rtc_irq_eoi_tracking_reset(ioapic);
271
for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
272
ioapic_set_irq(ioapic, idx, 1, true);
273
274
kvm_rtc_eoi_tracking_restore_all(ioapic);
275
}
276
277
278
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
279
{
280
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
281
struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
282
union kvm_ioapic_redirect_entry *e;
283
int index;
284
285
spin_lock(&ioapic->lock);
286
287
/* Make sure we see any missing RTC EOI */
288
if (test_bit(vcpu->vcpu_id, dest_map->map))
289
__set_bit(dest_map->vectors[vcpu->vcpu_id],
290
ioapic_handled_vectors);
291
292
for (index = 0; index < IOAPIC_NUM_PINS; index++) {
293
e = &ioapic->redirtbl[index];
294
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
295
kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
296
index == RTC_GSI) {
297
u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
298
299
kvm_scan_ioapic_irq(vcpu, e->fields.dest_id, dm,
300
e->fields.vector, ioapic_handled_vectors);
301
}
302
}
303
spin_unlock(&ioapic->lock);
304
}
305
306
void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
307
{
308
if (!ioapic_in_kernel(kvm))
309
return;
310
kvm_make_scan_ioapic_request(kvm);
311
}
312
313
void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
314
struct kvm_irq_mask_notifier *kimn)
315
{
316
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
317
318
mutex_lock(&kvm->irq_lock);
319
kimn->irq = irq;
320
hlist_add_head_rcu(&kimn->link, &ioapic->mask_notifier_list);
321
mutex_unlock(&kvm->irq_lock);
322
}
323
324
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
325
struct kvm_irq_mask_notifier *kimn)
326
{
327
mutex_lock(&kvm->irq_lock);
328
hlist_del_rcu(&kimn->link);
329
mutex_unlock(&kvm->irq_lock);
330
synchronize_srcu(&kvm->irq_srcu);
331
}
332
333
void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
334
bool mask)
335
{
336
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
337
struct kvm_irq_mask_notifier *kimn;
338
int idx, gsi;
339
340
idx = srcu_read_lock(&kvm->irq_srcu);
341
gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
342
if (gsi != -1)
343
hlist_for_each_entry_rcu(kimn, &ioapic->mask_notifier_list, link)
344
if (kimn->irq == gsi)
345
kimn->func(kimn, mask);
346
srcu_read_unlock(&kvm->irq_srcu, idx);
347
}
348
349
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
350
{
351
unsigned index;
352
bool mask_before, mask_after;
353
union kvm_ioapic_redirect_entry *e;
354
int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
355
DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
356
357
switch (ioapic->ioregsel) {
358
case IOAPIC_REG_VERSION:
359
/* Writes are ignored. */
360
break;
361
362
case IOAPIC_REG_APIC_ID:
363
ioapic->id = (val >> 24) & 0xf;
364
break;
365
366
case IOAPIC_REG_ARB_ID:
367
break;
368
369
default:
370
index = (ioapic->ioregsel - 0x10) >> 1;
371
372
if (index >= IOAPIC_NUM_PINS)
373
return;
374
index = array_index_nospec(index, IOAPIC_NUM_PINS);
375
e = &ioapic->redirtbl[index];
376
mask_before = e->fields.mask;
377
/* Preserve read-only fields */
378
old_remote_irr = e->fields.remote_irr;
379
old_delivery_status = e->fields.delivery_status;
380
old_dest_id = e->fields.dest_id;
381
old_dest_mode = e->fields.dest_mode;
382
if (ioapic->ioregsel & 1) {
383
e->bits &= 0xffffffff;
384
e->bits |= (u64) val << 32;
385
} else {
386
e->bits &= ~0xffffffffULL;
387
e->bits |= (u32) val;
388
}
389
e->fields.remote_irr = old_remote_irr;
390
e->fields.delivery_status = old_delivery_status;
391
392
/*
393
* Some OSes (Linux, Xen) assume that Remote IRR bit will
394
* be cleared by IOAPIC hardware when the entry is configured
395
* as edge-triggered. This behavior is used to simulate an
396
* explicit EOI on IOAPICs that don't have the EOI register.
397
*/
398
if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
399
e->fields.remote_irr = 0;
400
401
mask_after = e->fields.mask;
402
if (mask_before != mask_after)
403
kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
404
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
405
ioapic->irr & (1 << index) && !e->fields.mask && !e->fields.remote_irr) {
406
/*
407
* Pending status in irr may be outdated: the IRQ line may have
408
* already been deasserted by a device while the IRQ was masked.
409
* This occurs, for instance, if the interrupt is handled in a
410
* Linux guest as a oneshot interrupt (IRQF_ONESHOT). In this
411
* case the guest acknowledges the interrupt to the device in
412
* its threaded irq handler, i.e. after the EOI but before
413
* unmasking, so at the time of unmasking the IRQ line is
414
* already down but our pending irr bit is still set. In such
415
* cases, injecting this pending interrupt to the guest is
416
* buggy: the guest will receive an extra unwanted interrupt.
417
*
418
* So we need to check here if the IRQ is actually still pending.
419
* As we are generally not able to probe the IRQ line status
420
* directly, we do it through irqfd resampler. Namely, we clear
421
* the pending status and notify the resampler that this interrupt
422
* is done, without actually injecting it into the guest. If the
423
* IRQ line is actually already deasserted, we are done. If it is
424
* still asserted, a new interrupt will be shortly triggered
425
* through irqfd and injected into the guest.
426
*
427
* If, however, it's not possible to resample (no irqfd resampler
428
* registered for this irq), then unconditionally inject this
429
* pending interrupt into the guest, so the guest will not miss
430
* an interrupt, although may get an extra unwanted interrupt.
431
*/
432
if (kvm_notify_irqfd_resampler(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index))
433
ioapic->irr &= ~(1 << index);
434
else
435
ioapic_service(ioapic, index, false);
436
}
437
if (e->fields.delivery_mode == APIC_DM_FIXED) {
438
struct kvm_lapic_irq irq;
439
440
irq.vector = e->fields.vector;
441
irq.delivery_mode = e->fields.delivery_mode << 8;
442
irq.dest_mode =
443
kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
444
irq.level = false;
445
irq.trig_mode = e->fields.trig_mode;
446
irq.shorthand = APIC_DEST_NOSHORT;
447
irq.dest_id = e->fields.dest_id;
448
irq.msi_redir_hint = false;
449
bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
450
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
451
vcpu_bitmap);
452
if (old_dest_mode != e->fields.dest_mode ||
453
old_dest_id != e->fields.dest_id) {
454
/*
455
* Update vcpu_bitmap with vcpus specified in
456
* the previous request as well. This is done to
457
* keep ioapic_handled_vectors synchronized.
458
*/
459
irq.dest_id = old_dest_id;
460
irq.dest_mode =
461
kvm_lapic_irq_dest_mode(
462
!!e->fields.dest_mode);
463
kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
464
vcpu_bitmap);
465
}
466
kvm_make_scan_ioapic_request_mask(ioapic->kvm,
467
vcpu_bitmap);
468
} else {
469
kvm_make_scan_ioapic_request(ioapic->kvm);
470
}
471
break;
472
}
473
}
474
475
static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
476
{
477
union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
478
struct kvm_lapic_irq irqe;
479
int ret;
480
481
if (entry->fields.mask ||
482
(entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
483
entry->fields.remote_irr))
484
return -1;
485
486
irqe.dest_id = entry->fields.dest_id;
487
irqe.vector = entry->fields.vector;
488
irqe.dest_mode = kvm_lapic_irq_dest_mode(!!entry->fields.dest_mode);
489
irqe.trig_mode = entry->fields.trig_mode;
490
irqe.delivery_mode = entry->fields.delivery_mode << 8;
491
irqe.level = 1;
492
irqe.shorthand = APIC_DEST_NOSHORT;
493
irqe.msi_redir_hint = false;
494
495
if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
496
ioapic->irr_delivered |= 1 << irq;
497
498
if (irq == RTC_GSI && line_status) {
499
/*
500
* pending_eoi cannot ever become negative (see
501
* rtc_status_pending_eoi_check_valid) and the caller
502
* ensures that it is only called if it is >= zero, namely
503
* if rtc_irq_check_coalesced returns false).
504
*/
505
BUG_ON(ioapic->rtc_status.pending_eoi != 0);
506
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
507
&ioapic->rtc_status.dest_map);
508
ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
509
} else
510
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
511
512
if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
513
entry->fields.remote_irr = 1;
514
515
return ret;
516
}
517
518
int kvm_ioapic_set_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
519
int irq_source_id, int level, bool line_status)
520
{
521
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
522
int irq = e->irqchip.pin;
523
int ret, irq_level;
524
525
BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
526
527
spin_lock(&ioapic->lock);
528
irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
529
irq_source_id, level);
530
ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
531
532
spin_unlock(&ioapic->lock);
533
534
return ret;
535
}
536
537
static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
538
{
539
int i;
540
struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
541
eoi_inject.work);
542
spin_lock(&ioapic->lock);
543
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
544
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
545
546
if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
547
continue;
548
549
if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
550
ioapic_service(ioapic, i, false);
551
}
552
spin_unlock(&ioapic->lock);
553
}
554
555
#define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
556
static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
557
struct kvm_ioapic *ioapic,
558
int trigger_mode,
559
int pin)
560
{
561
struct kvm_lapic *apic = vcpu->arch.apic;
562
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[pin];
563
564
/*
565
* We are dropping lock while calling ack notifiers because ack
566
* notifier callbacks for assigned devices call into IOAPIC
567
* recursively. Since remote_irr is cleared only after call
568
* to notifiers if the same vector will be delivered while lock
569
* is dropped it will be put into irr and will be delivered
570
* after ack notifier returns.
571
*/
572
spin_unlock(&ioapic->lock);
573
kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin);
574
spin_lock(&ioapic->lock);
575
576
if (trigger_mode != IOAPIC_LEVEL_TRIG ||
577
kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
578
return;
579
580
ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
581
ent->fields.remote_irr = 0;
582
if (!ent->fields.mask && (ioapic->irr & (1 << pin))) {
583
++ioapic->irq_eoi[pin];
584
if (ioapic->irq_eoi[pin] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
585
/*
586
* Real hardware does not deliver the interrupt
587
* immediately during eoi broadcast, and this
588
* lets a buggy guest make slow progress
589
* even if it does not correctly handle a
590
* level-triggered interrupt. Emulate this
591
* behavior if we detect an interrupt storm.
592
*/
593
schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
594
ioapic->irq_eoi[pin] = 0;
595
trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
596
} else {
597
ioapic_service(ioapic, pin, false);
598
}
599
} else {
600
ioapic->irq_eoi[pin] = 0;
601
}
602
}
603
604
void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
605
{
606
int i;
607
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
608
609
spin_lock(&ioapic->lock);
610
rtc_irq_eoi(ioapic, vcpu, vector);
611
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
612
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
613
614
if (ent->fields.vector != vector)
615
continue;
616
kvm_ioapic_update_eoi_one(vcpu, ioapic, trigger_mode, i);
617
}
618
spin_unlock(&ioapic->lock);
619
}
620
621
static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
622
{
623
return container_of(dev, struct kvm_ioapic, dev);
624
}
625
626
static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
627
{
628
return ((addr >= ioapic->base_address &&
629
(addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
630
}
631
632
static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
633
gpa_t addr, int len, void *val)
634
{
635
struct kvm_ioapic *ioapic = to_ioapic(this);
636
u32 result;
637
if (!ioapic_in_range(ioapic, addr))
638
return -EOPNOTSUPP;
639
640
ASSERT(!(addr & 0xf)); /* check alignment */
641
642
addr &= 0xff;
643
spin_lock(&ioapic->lock);
644
switch (addr) {
645
case IOAPIC_REG_SELECT:
646
result = ioapic->ioregsel;
647
break;
648
649
case IOAPIC_REG_WINDOW:
650
result = ioapic_read_indirect(ioapic);
651
break;
652
653
default:
654
result = 0;
655
break;
656
}
657
spin_unlock(&ioapic->lock);
658
659
switch (len) {
660
case 8:
661
*(u64 *) val = result;
662
break;
663
case 1:
664
case 2:
665
case 4:
666
memcpy(val, (char *)&result, len);
667
break;
668
default:
669
printk(KERN_WARNING "ioapic: wrong length %d\n", len);
670
}
671
return 0;
672
}
673
674
static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
675
gpa_t addr, int len, const void *val)
676
{
677
struct kvm_ioapic *ioapic = to_ioapic(this);
678
u32 data;
679
if (!ioapic_in_range(ioapic, addr))
680
return -EOPNOTSUPP;
681
682
ASSERT(!(addr & 0xf)); /* check alignment */
683
684
switch (len) {
685
case 8:
686
case 4:
687
data = *(u32 *) val;
688
break;
689
case 2:
690
data = *(u16 *) val;
691
break;
692
case 1:
693
data = *(u8 *) val;
694
break;
695
default:
696
printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
697
return 0;
698
}
699
700
addr &= 0xff;
701
spin_lock(&ioapic->lock);
702
switch (addr) {
703
case IOAPIC_REG_SELECT:
704
ioapic->ioregsel = data & 0xFF; /* 8-bit register */
705
break;
706
707
case IOAPIC_REG_WINDOW:
708
ioapic_write_indirect(ioapic, data);
709
break;
710
711
default:
712
break;
713
}
714
spin_unlock(&ioapic->lock);
715
return 0;
716
}
717
718
static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
719
{
720
int i;
721
722
cancel_delayed_work_sync(&ioapic->eoi_inject);
723
for (i = 0; i < IOAPIC_NUM_PINS; i++)
724
ioapic->redirtbl[i].fields.mask = 1;
725
ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
726
ioapic->ioregsel = 0;
727
ioapic->irr = 0;
728
ioapic->irr_delivered = 0;
729
ioapic->id = 0;
730
memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
731
rtc_irq_eoi_tracking_reset(ioapic);
732
}
733
734
static const struct kvm_io_device_ops ioapic_mmio_ops = {
735
.read = ioapic_mmio_read,
736
.write = ioapic_mmio_write,
737
};
738
739
int kvm_ioapic_init(struct kvm *kvm)
740
{
741
struct kvm_ioapic *ioapic;
742
int ret;
743
744
ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL_ACCOUNT);
745
if (!ioapic)
746
return -ENOMEM;
747
spin_lock_init(&ioapic->lock);
748
INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
749
INIT_HLIST_HEAD(&ioapic->mask_notifier_list);
750
kvm->arch.vioapic = ioapic;
751
kvm_ioapic_reset(ioapic);
752
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
753
ioapic->kvm = kvm;
754
mutex_lock(&kvm->slots_lock);
755
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
756
IOAPIC_MEM_LENGTH, &ioapic->dev);
757
mutex_unlock(&kvm->slots_lock);
758
if (ret < 0) {
759
kvm->arch.vioapic = NULL;
760
kfree(ioapic);
761
}
762
763
return ret;
764
}
765
766
void kvm_ioapic_destroy(struct kvm *kvm)
767
{
768
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
769
770
if (!ioapic)
771
return;
772
773
cancel_delayed_work_sync(&ioapic->eoi_inject);
774
mutex_lock(&kvm->slots_lock);
775
kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
776
mutex_unlock(&kvm->slots_lock);
777
kvm->arch.vioapic = NULL;
778
kfree(ioapic);
779
}
780
781
void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
782
{
783
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
784
785
spin_lock(&ioapic->lock);
786
memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
787
state->irr &= ~ioapic->irr_delivered;
788
spin_unlock(&ioapic->lock);
789
}
790
791
void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
792
{
793
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
794
795
spin_lock(&ioapic->lock);
796
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
797
ioapic->irr = 0;
798
ioapic->irr_delivered = 0;
799
kvm_make_scan_ioapic_request(kvm);
800
kvm_ioapic_inject_all(ioapic, state->irr);
801
spin_unlock(&ioapic->lock);
802
}
803
804