Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/x86/kvm/i8254.c
26451 views
1
/*
2
* 8253/8254 interval timer emulation
3
*
4
* Copyright (c) 2003-2004 Fabrice Bellard
5
* Copyright (c) 2006 Intel Corporation
6
* Copyright (c) 2007 Keir Fraser, XenSource Inc
7
* Copyright (c) 2008 Intel Corporation
8
* Copyright 2009 Red Hat, Inc. and/or its affiliates.
9
*
10
* Permission is hereby granted, free of charge, to any person obtaining a copy
11
* of this software and associated documentation files (the "Software"), to deal
12
* in the Software without restriction, including without limitation the rights
13
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14
* copies of the Software, and to permit persons to whom the Software is
15
* furnished to do so, subject to the following conditions:
16
*
17
* The above copyright notice and this permission notice shall be included in
18
* all copies or substantial portions of the Software.
19
*
20
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26
* THE SOFTWARE.
27
*
28
* Authors:
29
* Sheng Yang <[email protected]>
30
* Based on QEMU and Xen.
31
*/
32
33
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35
#include <linux/kvm_host.h>
36
#include <linux/slab.h>
37
38
#include "ioapic.h"
39
#include "irq.h"
40
#include "i8254.h"
41
#include "x86.h"
42
43
#ifndef CONFIG_X86_64
44
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
45
#else
46
#define mod_64(x, y) ((x) % (y))
47
#endif
48
49
#define RW_STATE_LSB 1
50
#define RW_STATE_MSB 2
51
#define RW_STATE_WORD0 3
52
#define RW_STATE_WORD1 4
53
54
static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val)
55
{
56
struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
57
58
switch (c->mode) {
59
default:
60
case 0:
61
case 4:
62
/* XXX: just disable/enable counting */
63
break;
64
case 1:
65
case 2:
66
case 3:
67
case 5:
68
/* Restart counting on rising edge. */
69
if (c->gate < val)
70
c->count_load_time = ktime_get();
71
break;
72
}
73
74
c->gate = val;
75
}
76
77
static int pit_get_gate(struct kvm_pit *pit, int channel)
78
{
79
return pit->pit_state.channels[channel].gate;
80
}
81
82
static s64 __kpit_elapsed(struct kvm_pit *pit)
83
{
84
s64 elapsed;
85
ktime_t remaining;
86
struct kvm_kpit_state *ps = &pit->pit_state;
87
88
if (!ps->period)
89
return 0;
90
91
/*
92
* The Counter does not stop when it reaches zero. In
93
* Modes 0, 1, 4, and 5 the Counter ``wraps around'' to
94
* the highest count, either FFFF hex for binary counting
95
* or 9999 for BCD counting, and continues counting.
96
* Modes 2 and 3 are periodic; the Counter reloads
97
* itself with the initial count and continues counting
98
* from there.
99
*/
100
remaining = hrtimer_get_remaining(&ps->timer);
101
elapsed = ps->period - ktime_to_ns(remaining);
102
103
return elapsed;
104
}
105
106
static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c,
107
int channel)
108
{
109
if (channel == 0)
110
return __kpit_elapsed(pit);
111
112
return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
113
}
114
115
static int pit_get_count(struct kvm_pit *pit, int channel)
116
{
117
struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
118
s64 d, t;
119
int counter;
120
121
t = kpit_elapsed(pit, c, channel);
122
d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
123
124
switch (c->mode) {
125
case 0:
126
case 1:
127
case 4:
128
case 5:
129
counter = (c->count - d) & 0xffff;
130
break;
131
case 3:
132
/* XXX: may be incorrect for odd counts */
133
counter = c->count - (mod_64((2 * d), c->count));
134
break;
135
default:
136
counter = c->count - mod_64(d, c->count);
137
break;
138
}
139
return counter;
140
}
141
142
static int pit_get_out(struct kvm_pit *pit, int channel)
143
{
144
struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
145
s64 d, t;
146
int out;
147
148
t = kpit_elapsed(pit, c, channel);
149
d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
150
151
switch (c->mode) {
152
default:
153
case 0:
154
out = (d >= c->count);
155
break;
156
case 1:
157
out = (d < c->count);
158
break;
159
case 2:
160
out = ((mod_64(d, c->count) == 0) && (d != 0));
161
break;
162
case 3:
163
out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
164
break;
165
case 4:
166
case 5:
167
out = (d == c->count);
168
break;
169
}
170
171
return out;
172
}
173
174
static void pit_latch_count(struct kvm_pit *pit, int channel)
175
{
176
struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
177
178
if (!c->count_latched) {
179
c->latched_count = pit_get_count(pit, channel);
180
c->count_latched = c->rw_mode;
181
}
182
}
183
184
static void pit_latch_status(struct kvm_pit *pit, int channel)
185
{
186
struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
187
188
if (!c->status_latched) {
189
/* TODO: Return NULL COUNT (bit 6). */
190
c->status = ((pit_get_out(pit, channel) << 7) |
191
(c->rw_mode << 4) |
192
(c->mode << 1) |
193
c->bcd);
194
c->status_latched = 1;
195
}
196
}
197
198
static inline struct kvm_pit *pit_state_to_pit(struct kvm_kpit_state *ps)
199
{
200
return container_of(ps, struct kvm_pit, pit_state);
201
}
202
203
static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
204
{
205
struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
206
irq_ack_notifier);
207
struct kvm_pit *pit = pit_state_to_pit(ps);
208
209
atomic_set(&ps->irq_ack, 1);
210
/* irq_ack should be set before pending is read. Order accesses with
211
* inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work.
212
*/
213
smp_mb();
214
if (atomic_dec_if_positive(&ps->pending) > 0)
215
kthread_queue_work(pit->worker, &pit->expired);
216
}
217
218
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
219
{
220
struct kvm_pit *pit = vcpu->kvm->arch.vpit;
221
struct hrtimer *timer;
222
223
/* Somewhat arbitrarily make vcpu0 the owner of the PIT. */
224
if (vcpu->vcpu_id || !pit)
225
return;
226
227
timer = &pit->pit_state.timer;
228
mutex_lock(&pit->pit_state.lock);
229
if (hrtimer_cancel(timer))
230
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
231
mutex_unlock(&pit->pit_state.lock);
232
}
233
234
static void destroy_pit_timer(struct kvm_pit *pit)
235
{
236
hrtimer_cancel(&pit->pit_state.timer);
237
kthread_flush_work(&pit->expired);
238
}
239
240
static void pit_do_work(struct kthread_work *work)
241
{
242
struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
243
struct kvm *kvm = pit->kvm;
244
struct kvm_vcpu *vcpu;
245
unsigned long i;
246
struct kvm_kpit_state *ps = &pit->pit_state;
247
248
if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
249
return;
250
251
kvm_set_irq(kvm, KVM_PIT_IRQ_SOURCE_ID, 0, 1, false);
252
kvm_set_irq(kvm, KVM_PIT_IRQ_SOURCE_ID, 0, 0, false);
253
254
/*
255
* Provides NMI watchdog support via Virtual Wire mode.
256
* The route is: PIT -> LVT0 in NMI mode.
257
*
258
* Note: Our Virtual Wire implementation does not follow
259
* the MP specification. We propagate a PIT interrupt to all
260
* VCPUs and only when LVT0 is in NMI mode. The interrupt can
261
* also be simultaneously delivered through PIC and IOAPIC.
262
*/
263
if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
264
kvm_for_each_vcpu(i, vcpu, kvm)
265
kvm_apic_nmi_wd_deliver(vcpu);
266
}
267
268
static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
269
{
270
struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
271
struct kvm_pit *pt = pit_state_to_pit(ps);
272
273
if (atomic_read(&ps->reinject))
274
atomic_inc(&ps->pending);
275
276
kthread_queue_work(pt->worker, &pt->expired);
277
278
if (ps->is_periodic) {
279
hrtimer_add_expires_ns(&ps->timer, ps->period);
280
return HRTIMER_RESTART;
281
} else
282
return HRTIMER_NORESTART;
283
}
284
285
static inline void kvm_pit_reset_reinject(struct kvm_pit *pit)
286
{
287
atomic_set(&pit->pit_state.pending, 0);
288
atomic_set(&pit->pit_state.irq_ack, 1);
289
}
290
291
static void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
292
{
293
struct kvm_kpit_state *ps = &pit->pit_state;
294
struct kvm *kvm = pit->kvm;
295
296
if (atomic_read(&ps->reinject) == reinject)
297
return;
298
299
/*
300
* AMD SVM AVIC accelerates EOI write and does not trap.
301
* This cause in-kernel PIT re-inject mode to fail
302
* since it checks ps->irq_ack before kvm_set_irq()
303
* and relies on the ack notifier to timely queue
304
* the pt->worker work iterm and reinject the missed tick.
305
* So, deactivate APICv when PIT is in reinject mode.
306
*/
307
if (reinject) {
308
kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
309
/* The initial state is preserved while ps->reinject == 0. */
310
kvm_pit_reset_reinject(pit);
311
kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
312
kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
313
} else {
314
kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PIT_REINJ);
315
kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
316
kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
317
}
318
319
atomic_set(&ps->reinject, reinject);
320
}
321
322
static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
323
{
324
struct kvm_kpit_state *ps = &pit->pit_state;
325
struct kvm *kvm = pit->kvm;
326
s64 interval;
327
328
if (!ioapic_in_kernel(kvm) ||
329
ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
330
return;
331
332
interval = mul_u64_u32_div(val, NSEC_PER_SEC, KVM_PIT_FREQ);
333
334
pr_debug("create pit timer, interval is %llu nsec\n", interval);
335
336
/* TODO The new value only affected after the retriggered */
337
hrtimer_cancel(&ps->timer);
338
kthread_flush_work(&pit->expired);
339
ps->period = interval;
340
ps->is_periodic = is_period;
341
342
kvm_pit_reset_reinject(pit);
343
344
/*
345
* Do not allow the guest to program periodic timers with small
346
* interval, since the hrtimers are not throttled by the host
347
* scheduler.
348
*/
349
if (ps->is_periodic) {
350
s64 min_period = min_timer_period_us * 1000LL;
351
352
if (ps->period < min_period) {
353
pr_info_ratelimited(
354
"requested %lld ns "
355
"i8254 timer period limited to %lld ns\n",
356
ps->period, min_period);
357
ps->period = min_period;
358
}
359
}
360
361
hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval),
362
HRTIMER_MODE_ABS);
363
}
364
365
static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
366
{
367
struct kvm_kpit_state *ps = &pit->pit_state;
368
369
pr_debug("load_count val is %u, channel is %d\n", val, channel);
370
371
/*
372
* The largest possible initial count is 0; this is equivalent
373
* to 216 for binary counting and 104 for BCD counting.
374
*/
375
if (val == 0)
376
val = 0x10000;
377
378
ps->channels[channel].count = val;
379
380
if (channel != 0) {
381
ps->channels[channel].count_load_time = ktime_get();
382
return;
383
}
384
385
/* Two types of timer
386
* mode 1 is one shot, mode 2 is period, otherwise del timer */
387
switch (ps->channels[0].mode) {
388
case 0:
389
case 1:
390
/* FIXME: enhance mode 4 precision */
391
case 4:
392
create_pit_timer(pit, val, 0);
393
break;
394
case 2:
395
case 3:
396
create_pit_timer(pit, val, 1);
397
break;
398
default:
399
destroy_pit_timer(pit);
400
}
401
}
402
403
static void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
404
int hpet_legacy_start)
405
{
406
u8 saved_mode;
407
408
WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock));
409
410
if (hpet_legacy_start) {
411
/* save existing mode for later reenablement */
412
WARN_ON(channel != 0);
413
saved_mode = pit->pit_state.channels[0].mode;
414
pit->pit_state.channels[0].mode = 0xff; /* disable timer */
415
pit_load_count(pit, channel, val);
416
pit->pit_state.channels[0].mode = saved_mode;
417
} else {
418
pit_load_count(pit, channel, val);
419
}
420
}
421
422
static inline struct kvm_pit *dev_to_pit(struct kvm_io_device *dev)
423
{
424
return container_of(dev, struct kvm_pit, dev);
425
}
426
427
static inline struct kvm_pit *speaker_to_pit(struct kvm_io_device *dev)
428
{
429
return container_of(dev, struct kvm_pit, speaker_dev);
430
}
431
432
static inline int pit_in_range(gpa_t addr)
433
{
434
return ((addr >= KVM_PIT_BASE_ADDRESS) &&
435
(addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
436
}
437
438
static int pit_ioport_write(struct kvm_vcpu *vcpu,
439
struct kvm_io_device *this,
440
gpa_t addr, int len, const void *data)
441
{
442
struct kvm_pit *pit = dev_to_pit(this);
443
struct kvm_kpit_state *pit_state = &pit->pit_state;
444
int channel, access;
445
struct kvm_kpit_channel_state *s;
446
u32 val = *(u32 *) data;
447
if (!pit_in_range(addr))
448
return -EOPNOTSUPP;
449
450
val &= 0xff;
451
addr &= KVM_PIT_CHANNEL_MASK;
452
453
mutex_lock(&pit_state->lock);
454
455
if (val != 0)
456
pr_debug("write addr is 0x%x, len is %d, val is 0x%x\n",
457
(unsigned int)addr, len, val);
458
459
if (addr == 3) {
460
channel = val >> 6;
461
if (channel == 3) {
462
/* Read-Back Command. */
463
for (channel = 0; channel < 3; channel++) {
464
if (val & (2 << channel)) {
465
if (!(val & 0x20))
466
pit_latch_count(pit, channel);
467
if (!(val & 0x10))
468
pit_latch_status(pit, channel);
469
}
470
}
471
} else {
472
/* Select Counter <channel>. */
473
s = &pit_state->channels[channel];
474
access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
475
if (access == 0) {
476
pit_latch_count(pit, channel);
477
} else {
478
s->rw_mode = access;
479
s->read_state = access;
480
s->write_state = access;
481
s->mode = (val >> 1) & 7;
482
if (s->mode > 5)
483
s->mode -= 4;
484
s->bcd = val & 1;
485
}
486
}
487
} else {
488
/* Write Count. */
489
s = &pit_state->channels[addr];
490
switch (s->write_state) {
491
default:
492
case RW_STATE_LSB:
493
pit_load_count(pit, addr, val);
494
break;
495
case RW_STATE_MSB:
496
pit_load_count(pit, addr, val << 8);
497
break;
498
case RW_STATE_WORD0:
499
s->write_latch = val;
500
s->write_state = RW_STATE_WORD1;
501
break;
502
case RW_STATE_WORD1:
503
pit_load_count(pit, addr, s->write_latch | (val << 8));
504
s->write_state = RW_STATE_WORD0;
505
break;
506
}
507
}
508
509
mutex_unlock(&pit_state->lock);
510
return 0;
511
}
512
513
static int pit_ioport_read(struct kvm_vcpu *vcpu,
514
struct kvm_io_device *this,
515
gpa_t addr, int len, void *data)
516
{
517
struct kvm_pit *pit = dev_to_pit(this);
518
struct kvm_kpit_state *pit_state = &pit->pit_state;
519
int ret, count;
520
struct kvm_kpit_channel_state *s;
521
if (!pit_in_range(addr))
522
return -EOPNOTSUPP;
523
524
addr &= KVM_PIT_CHANNEL_MASK;
525
if (addr == 3)
526
return 0;
527
528
s = &pit_state->channels[addr];
529
530
mutex_lock(&pit_state->lock);
531
532
if (s->status_latched) {
533
s->status_latched = 0;
534
ret = s->status;
535
} else if (s->count_latched) {
536
switch (s->count_latched) {
537
default:
538
case RW_STATE_LSB:
539
ret = s->latched_count & 0xff;
540
s->count_latched = 0;
541
break;
542
case RW_STATE_MSB:
543
ret = s->latched_count >> 8;
544
s->count_latched = 0;
545
break;
546
case RW_STATE_WORD0:
547
ret = s->latched_count & 0xff;
548
s->count_latched = RW_STATE_MSB;
549
break;
550
}
551
} else {
552
switch (s->read_state) {
553
default:
554
case RW_STATE_LSB:
555
count = pit_get_count(pit, addr);
556
ret = count & 0xff;
557
break;
558
case RW_STATE_MSB:
559
count = pit_get_count(pit, addr);
560
ret = (count >> 8) & 0xff;
561
break;
562
case RW_STATE_WORD0:
563
count = pit_get_count(pit, addr);
564
ret = count & 0xff;
565
s->read_state = RW_STATE_WORD1;
566
break;
567
case RW_STATE_WORD1:
568
count = pit_get_count(pit, addr);
569
ret = (count >> 8) & 0xff;
570
s->read_state = RW_STATE_WORD0;
571
break;
572
}
573
}
574
575
if (len > sizeof(ret))
576
len = sizeof(ret);
577
memcpy(data, (char *)&ret, len);
578
579
mutex_unlock(&pit_state->lock);
580
return 0;
581
}
582
583
static int speaker_ioport_write(struct kvm_vcpu *vcpu,
584
struct kvm_io_device *this,
585
gpa_t addr, int len, const void *data)
586
{
587
struct kvm_pit *pit = speaker_to_pit(this);
588
struct kvm_kpit_state *pit_state = &pit->pit_state;
589
u32 val = *(u32 *) data;
590
if (addr != KVM_SPEAKER_BASE_ADDRESS)
591
return -EOPNOTSUPP;
592
593
mutex_lock(&pit_state->lock);
594
if (val & (1 << 1))
595
pit_state->flags |= KVM_PIT_FLAGS_SPEAKER_DATA_ON;
596
else
597
pit_state->flags &= ~KVM_PIT_FLAGS_SPEAKER_DATA_ON;
598
pit_set_gate(pit, 2, val & 1);
599
mutex_unlock(&pit_state->lock);
600
return 0;
601
}
602
603
static int speaker_ioport_read(struct kvm_vcpu *vcpu,
604
struct kvm_io_device *this,
605
gpa_t addr, int len, void *data)
606
{
607
struct kvm_pit *pit = speaker_to_pit(this);
608
struct kvm_kpit_state *pit_state = &pit->pit_state;
609
unsigned int refresh_clock;
610
int ret;
611
if (addr != KVM_SPEAKER_BASE_ADDRESS)
612
return -EOPNOTSUPP;
613
614
/* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
615
refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
616
617
mutex_lock(&pit_state->lock);
618
ret = (!!(pit_state->flags & KVM_PIT_FLAGS_SPEAKER_DATA_ON) << 1) |
619
pit_get_gate(pit, 2) | (pit_get_out(pit, 2) << 5) |
620
(refresh_clock << 4);
621
if (len > sizeof(ret))
622
len = sizeof(ret);
623
memcpy(data, (char *)&ret, len);
624
mutex_unlock(&pit_state->lock);
625
return 0;
626
}
627
628
static void kvm_pit_reset(struct kvm_pit *pit)
629
{
630
int i;
631
struct kvm_kpit_channel_state *c;
632
633
pit->pit_state.flags = 0;
634
for (i = 0; i < 3; i++) {
635
c = &pit->pit_state.channels[i];
636
c->mode = 0xff;
637
c->gate = (i != 2);
638
pit_load_count(pit, i, 0);
639
}
640
641
kvm_pit_reset_reinject(pit);
642
}
643
644
static void pit_mask_notifier(struct kvm_irq_mask_notifier *kimn, bool mask)
645
{
646
struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
647
648
if (!mask)
649
kvm_pit_reset_reinject(pit);
650
}
651
652
int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
653
{
654
struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
655
656
BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
657
658
mutex_lock(&kps->lock);
659
memcpy(ps, &kps->channels, sizeof(*ps));
660
mutex_unlock(&kps->lock);
661
return 0;
662
}
663
664
int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
665
{
666
int i;
667
struct kvm_pit *pit = kvm->arch.vpit;
668
669
mutex_lock(&pit->pit_state.lock);
670
memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
671
for (i = 0; i < 3; i++)
672
kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
673
mutex_unlock(&pit->pit_state.lock);
674
return 0;
675
}
676
677
int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
678
{
679
mutex_lock(&kvm->arch.vpit->pit_state.lock);
680
memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
681
sizeof(ps->channels));
682
ps->flags = kvm->arch.vpit->pit_state.flags;
683
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
684
memset(&ps->reserved, 0, sizeof(ps->reserved));
685
return 0;
686
}
687
688
int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
689
{
690
int start = 0;
691
int i;
692
u32 prev_legacy, cur_legacy;
693
struct kvm_pit *pit = kvm->arch.vpit;
694
695
mutex_lock(&pit->pit_state.lock);
696
prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
697
cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
698
if (!prev_legacy && cur_legacy)
699
start = 1;
700
memcpy(&pit->pit_state.channels, &ps->channels,
701
sizeof(pit->pit_state.channels));
702
pit->pit_state.flags = ps->flags;
703
for (i = 0; i < 3; i++)
704
kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
705
start && i == 0);
706
mutex_unlock(&pit->pit_state.lock);
707
return 0;
708
}
709
710
int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control)
711
{
712
struct kvm_pit *pit = kvm->arch.vpit;
713
714
/* pit->pit_state.lock was overloaded to prevent userspace from getting
715
* an inconsistent state after running multiple KVM_REINJECT_CONTROL
716
* ioctls in parallel. Use a separate lock if that ioctl isn't rare.
717
*/
718
mutex_lock(&pit->pit_state.lock);
719
kvm_pit_set_reinject(pit, control->pit_reinject);
720
mutex_unlock(&pit->pit_state.lock);
721
722
return 0;
723
}
724
725
static const struct kvm_io_device_ops pit_dev_ops = {
726
.read = pit_ioport_read,
727
.write = pit_ioport_write,
728
};
729
730
static const struct kvm_io_device_ops speaker_dev_ops = {
731
.read = speaker_ioport_read,
732
.write = speaker_ioport_write,
733
};
734
735
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
736
{
737
struct kvm_pit *pit;
738
struct kvm_kpit_state *pit_state;
739
struct pid *pid;
740
pid_t pid_nr;
741
int ret;
742
743
pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL_ACCOUNT);
744
if (!pit)
745
return NULL;
746
747
mutex_init(&pit->pit_state.lock);
748
749
pid = get_pid(task_tgid(current));
750
pid_nr = pid_vnr(pid);
751
put_pid(pid);
752
753
pit->worker = kthread_run_worker(0, "kvm-pit/%d", pid_nr);
754
if (IS_ERR(pit->worker))
755
goto fail_kthread;
756
757
kthread_init_work(&pit->expired, pit_do_work);
758
759
pit->kvm = kvm;
760
761
pit_state = &pit->pit_state;
762
hrtimer_setup(&pit_state->timer, pit_timer_fn, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
763
764
pit_state->irq_ack_notifier.gsi = 0;
765
pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
766
pit->mask_notifier.func = pit_mask_notifier;
767
768
kvm_pit_reset(pit);
769
770
kvm_pit_set_reinject(pit, true);
771
772
mutex_lock(&kvm->slots_lock);
773
kvm_iodevice_init(&pit->dev, &pit_dev_ops);
774
ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
775
KVM_PIT_MEM_LENGTH, &pit->dev);
776
if (ret < 0)
777
goto fail_register_pit;
778
779
if (flags & KVM_PIT_SPEAKER_DUMMY) {
780
kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
781
ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
782
KVM_SPEAKER_BASE_ADDRESS, 4,
783
&pit->speaker_dev);
784
if (ret < 0)
785
goto fail_register_speaker;
786
}
787
mutex_unlock(&kvm->slots_lock);
788
789
return pit;
790
791
fail_register_speaker:
792
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
793
fail_register_pit:
794
mutex_unlock(&kvm->slots_lock);
795
kvm_pit_set_reinject(pit, false);
796
kthread_destroy_worker(pit->worker);
797
fail_kthread:
798
kfree(pit);
799
return NULL;
800
}
801
802
void kvm_free_pit(struct kvm *kvm)
803
{
804
struct kvm_pit *pit = kvm->arch.vpit;
805
806
if (pit) {
807
mutex_lock(&kvm->slots_lock);
808
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
809
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
810
mutex_unlock(&kvm->slots_lock);
811
kvm_pit_set_reinject(pit, false);
812
hrtimer_cancel(&pit->pit_state.timer);
813
kthread_destroy_worker(pit->worker);
814
kfree(pit);
815
}
816
}
817
818