Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/dev/acpica/acpi_hpet.c
39507 views
1
/*-
2
* Copyright (c) 2005 Poul-Henning Kamp
3
* Copyright (c) 2010 Alexander Motin <[email protected]>
4
* All rights reserved.
5
*
6
* Redistribution and use in source and binary forms, with or without
7
* modification, are permitted provided that the following conditions
8
* are met:
9
* 1. Redistributions of source code must retain the above copyright
10
* notice, this list of conditions and the following disclaimer.
11
* 2. Redistributions in binary form must reproduce the above copyright
12
* notice, this list of conditions and the following disclaimer in the
13
* documentation and/or other materials provided with the distribution.
14
*
15
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25
* SUCH DAMAGE.
26
*/
27
28
#include <sys/cdefs.h>
29
#include "opt_acpi.h"
30
31
#if defined(__amd64__)
32
#define DEV_APIC
33
#else
34
#include "opt_apic.h"
35
#endif
36
#include <sys/param.h>
37
#include <sys/conf.h>
38
#include <sys/bus.h>
39
#include <sys/kernel.h>
40
#include <sys/module.h>
41
#include <sys/proc.h>
42
#include <sys/rman.h>
43
#include <sys/mman.h>
44
#include <sys/time.h>
45
#include <sys/smp.h>
46
#include <sys/sysctl.h>
47
#include <sys/timeet.h>
48
#include <sys/timetc.h>
49
#include <sys/vdso.h>
50
51
#include <contrib/dev/acpica/include/acpi.h>
52
#include <contrib/dev/acpica/include/accommon.h>
53
54
#include <dev/acpica/acpivar.h>
55
#include <dev/acpica/acpi_hpet.h>
56
57
#ifdef DEV_APIC
58
#include "pcib_if.h"
59
#endif
60
61
#define HPET_VENDID_AMD 0x4353
62
#define HPET_VENDID_AMD2 0x1022
63
#define HPET_VENDID_HYGON 0x1d94
64
#define HPET_VENDID_INTEL 0x8086
65
#define HPET_VENDID_NVIDIA 0x10de
66
#define HPET_VENDID_SW 0x1166
67
68
ACPI_SERIAL_DECL(hpet, "ACPI HPET support");
69
70
/* ACPI CA debugging */
71
#define _COMPONENT ACPI_TIMER
72
ACPI_MODULE_NAME("HPET")
73
74
struct hpet_softc {
75
device_t dev;
76
int mem_rid;
77
int intr_rid;
78
int irq;
79
int useirq;
80
int legacy_route;
81
int per_cpu;
82
uint32_t allowed_irqs;
83
struct resource *mem_res;
84
struct resource *intr_res;
85
void *intr_handle;
86
ACPI_HANDLE handle;
87
uint32_t acpi_uid;
88
uint64_t freq;
89
uint32_t caps;
90
struct timecounter tc;
91
struct hpet_timer {
92
struct eventtimer et;
93
struct hpet_softc *sc;
94
int num;
95
int mode;
96
#define TIMER_STOPPED 0
97
#define TIMER_PERIODIC 1
98
#define TIMER_ONESHOT 2
99
int intr_rid;
100
int irq;
101
int pcpu_cpu;
102
int pcpu_misrouted;
103
int pcpu_master;
104
int pcpu_slaves[MAXCPU];
105
struct resource *intr_res;
106
void *intr_handle;
107
uint32_t caps;
108
uint32_t vectors;
109
uint32_t div;
110
uint32_t next;
111
char name[8];
112
} t[32];
113
int num_timers;
114
struct cdev *pdev;
115
int mmap_allow;
116
int mmap_allow_write;
117
};
118
119
static d_open_t hpet_open;
120
static d_mmap_t hpet_mmap;
121
122
static struct cdevsw hpet_cdevsw = {
123
.d_version = D_VERSION,
124
.d_name = "hpet",
125
.d_open = hpet_open,
126
.d_mmap = hpet_mmap,
127
};
128
129
static u_int hpet_get_timecount(struct timecounter *tc);
130
static void hpet_test(struct hpet_softc *sc);
131
132
static char *hpet_ids[] = { "PNP0103", NULL };
133
134
/* Knob to disable acpi_hpet device */
135
bool acpi_hpet_disabled = false;
136
137
static u_int
138
hpet_get_timecount(struct timecounter *tc)
139
{
140
struct hpet_softc *sc;
141
142
sc = tc->tc_priv;
143
return (bus_read_4(sc->mem_res, HPET_MAIN_COUNTER));
144
}
145
146
uint32_t
147
hpet_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc)
148
{
149
struct hpet_softc *sc;
150
151
sc = tc->tc_priv;
152
vdso_th->th_algo = VDSO_TH_ALGO_X86_HPET;
153
vdso_th->th_x86_shift = 0;
154
vdso_th->th_x86_hpet_idx = device_get_unit(sc->dev);
155
vdso_th->th_x86_pvc_last_systime = 0;
156
vdso_th->th_x86_pvc_stable_mask = 0;
157
bzero(vdso_th->th_res, sizeof(vdso_th->th_res));
158
return (sc->mmap_allow != 0);
159
}
160
161
#ifdef COMPAT_FREEBSD32
162
uint32_t
163
hpet_vdso_timehands32(struct vdso_timehands32 *vdso_th32,
164
struct timecounter *tc)
165
{
166
struct hpet_softc *sc;
167
168
sc = tc->tc_priv;
169
vdso_th32->th_algo = VDSO_TH_ALGO_X86_HPET;
170
vdso_th32->th_x86_shift = 0;
171
vdso_th32->th_x86_hpet_idx = device_get_unit(sc->dev);
172
vdso_th32->th_x86_pvc_last_systime[0] = 0;
173
vdso_th32->th_x86_pvc_last_systime[1] = 0;
174
vdso_th32->th_x86_pvc_stable_mask = 0;
175
bzero(vdso_th32->th_res, sizeof(vdso_th32->th_res));
176
return (sc->mmap_allow != 0);
177
}
178
#endif
179
180
static void
181
hpet_enable(struct hpet_softc *sc)
182
{
183
uint32_t val;
184
185
val = bus_read_4(sc->mem_res, HPET_CONFIG);
186
if (sc->legacy_route)
187
val |= HPET_CNF_LEG_RT;
188
else
189
val &= ~HPET_CNF_LEG_RT;
190
val |= HPET_CNF_ENABLE;
191
bus_write_4(sc->mem_res, HPET_CONFIG, val);
192
}
193
194
static void
195
hpet_disable(struct hpet_softc *sc)
196
{
197
uint32_t val;
198
199
val = bus_read_4(sc->mem_res, HPET_CONFIG);
200
val &= ~HPET_CNF_ENABLE;
201
bus_write_4(sc->mem_res, HPET_CONFIG, val);
202
}
203
204
static int
205
hpet_start(struct eventtimer *et, sbintime_t first, sbintime_t period)
206
{
207
struct hpet_timer *mt = (struct hpet_timer *)et->et_priv;
208
struct hpet_timer *t;
209
struct hpet_softc *sc = mt->sc;
210
uint32_t fdiv, now;
211
212
t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]];
213
if (period != 0) {
214
t->mode = TIMER_PERIODIC;
215
t->div = (sc->freq * period) >> 32;
216
} else {
217
t->mode = TIMER_ONESHOT;
218
t->div = 0;
219
}
220
if (first != 0)
221
fdiv = (sc->freq * first) >> 32;
222
else
223
fdiv = t->div;
224
if (t->irq < 0)
225
bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num);
226
t->caps |= HPET_TCNF_INT_ENB;
227
now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
228
restart:
229
t->next = now + fdiv;
230
if (t->mode == TIMER_PERIODIC && (t->caps & HPET_TCAP_PER_INT)) {
231
t->caps |= HPET_TCNF_TYPE;
232
bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
233
t->caps | HPET_TCNF_VAL_SET);
234
bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
235
t->next);
236
bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
237
t->div);
238
} else {
239
t->caps &= ~HPET_TCNF_TYPE;
240
bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
241
t->caps);
242
bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
243
t->next);
244
}
245
now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
246
if ((int32_t)(now - t->next + HPET_MIN_CYCLES) >= 0) {
247
fdiv *= 2;
248
goto restart;
249
}
250
return (0);
251
}
252
253
static int
254
hpet_stop(struct eventtimer *et)
255
{
256
struct hpet_timer *mt = (struct hpet_timer *)et->et_priv;
257
struct hpet_timer *t;
258
struct hpet_softc *sc = mt->sc;
259
260
t = (mt->pcpu_master < 0) ? mt : &sc->t[mt->pcpu_slaves[curcpu]];
261
t->mode = TIMER_STOPPED;
262
t->caps &= ~(HPET_TCNF_INT_ENB | HPET_TCNF_TYPE);
263
bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps);
264
return (0);
265
}
266
267
static int
268
hpet_intr_single(void *arg)
269
{
270
struct hpet_timer *t = (struct hpet_timer *)arg;
271
struct hpet_timer *mt;
272
struct hpet_softc *sc = t->sc;
273
uint32_t now;
274
275
if (t->mode == TIMER_STOPPED)
276
return (FILTER_STRAY);
277
/* Check that per-CPU timer interrupt reached right CPU. */
278
if (t->pcpu_cpu >= 0 && t->pcpu_cpu != curcpu) {
279
if ((++t->pcpu_misrouted) % 32 == 0) {
280
printf("HPET interrupt routed to the wrong CPU"
281
" (timer %d CPU %d -> %d)!\n",
282
t->num, t->pcpu_cpu, curcpu);
283
}
284
285
/*
286
* Reload timer, hoping that next time may be more lucky
287
* (system will manage proper interrupt binding).
288
*/
289
if ((t->mode == TIMER_PERIODIC &&
290
(t->caps & HPET_TCAP_PER_INT) == 0) ||
291
t->mode == TIMER_ONESHOT) {
292
t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER) +
293
sc->freq / 8;
294
bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
295
t->next);
296
}
297
return (FILTER_HANDLED);
298
}
299
if (t->mode == TIMER_PERIODIC &&
300
(t->caps & HPET_TCAP_PER_INT) == 0) {
301
t->next += t->div;
302
now = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
303
if ((int32_t)((now + t->div / 2) - t->next) > 0)
304
t->next = now + t->div / 2;
305
bus_write_4(sc->mem_res,
306
HPET_TIMER_COMPARATOR(t->num), t->next);
307
} else if (t->mode == TIMER_ONESHOT)
308
t->mode = TIMER_STOPPED;
309
mt = (t->pcpu_master < 0) ? t : &sc->t[t->pcpu_master];
310
if (mt->et.et_active)
311
mt->et.et_event_cb(&mt->et, mt->et.et_arg);
312
return (FILTER_HANDLED);
313
}
314
315
static int
316
hpet_intr(void *arg)
317
{
318
struct hpet_softc *sc = (struct hpet_softc *)arg;
319
int i;
320
uint32_t val;
321
322
val = bus_read_4(sc->mem_res, HPET_ISR);
323
if (val) {
324
bus_write_4(sc->mem_res, HPET_ISR, val);
325
val &= sc->useirq;
326
for (i = 0; i < sc->num_timers; i++) {
327
if ((val & (1 << i)) == 0)
328
continue;
329
hpet_intr_single(&sc->t[i]);
330
}
331
return (FILTER_HANDLED);
332
}
333
return (FILTER_STRAY);
334
}
335
336
uint32_t
337
hpet_get_uid(device_t dev)
338
{
339
struct hpet_softc *sc;
340
341
sc = device_get_softc(dev);
342
return (sc->acpi_uid);
343
}
344
345
static ACPI_STATUS
346
hpet_find(ACPI_HANDLE handle, UINT32 level, void *context,
347
void **status)
348
{
349
char **ids;
350
uint32_t id = (uint32_t)(uintptr_t)context;
351
uint32_t uid = 0;
352
353
for (ids = hpet_ids; *ids != NULL; ids++) {
354
if (acpi_MatchHid(handle, *ids))
355
break;
356
}
357
if (*ids == NULL)
358
return (AE_OK);
359
if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &uid)) ||
360
id == uid)
361
*status = acpi_get_device(handle);
362
return (AE_OK);
363
}
364
365
/*
366
* Find an existing IRQ resource that matches the requested IRQ range
367
* and return its RID. If one is not found, use a new RID.
368
*/
369
static int
370
hpet_find_irq_rid(device_t dev, u_long start, u_long end)
371
{
372
rman_res_t irq;
373
int error, rid;
374
375
for (rid = 0;; rid++) {
376
error = bus_get_resource(dev, SYS_RES_IRQ, rid, &irq, NULL);
377
if (error != 0 || (start <= irq && irq <= end))
378
return (rid);
379
}
380
}
381
382
static int
383
hpet_open(struct cdev *cdev, int oflags, int devtype, struct thread *td)
384
{
385
struct hpet_softc *sc;
386
387
sc = cdev->si_drv1;
388
if (!sc->mmap_allow)
389
return (EPERM);
390
else
391
return (0);
392
}
393
394
static int
395
hpet_mmap(struct cdev *cdev, vm_ooffset_t offset, vm_paddr_t *paddr,
396
int nprot, vm_memattr_t *memattr)
397
{
398
struct hpet_softc *sc;
399
400
sc = cdev->si_drv1;
401
if (offset >= rman_get_size(sc->mem_res))
402
return (EINVAL);
403
if (!sc->mmap_allow_write && (nprot & PROT_WRITE))
404
return (EPERM);
405
*paddr = rman_get_start(sc->mem_res) + offset;
406
*memattr = VM_MEMATTR_UNCACHEABLE;
407
408
return (0);
409
}
410
411
/* Discover the HPET via the ACPI table of the same name. */
412
static void
413
hpet_identify(driver_t *driver, device_t parent)
414
{
415
ACPI_TABLE_HPET *hpet;
416
ACPI_STATUS status;
417
device_t child;
418
int i;
419
420
/* Only one HPET device can be added. */
421
if (devclass_get_device(devclass_find("hpet"), 0))
422
return;
423
for (i = 1; ; i++) {
424
/* Search for HPET table. */
425
status = AcpiGetTable(ACPI_SIG_HPET, i, (ACPI_TABLE_HEADER **)&hpet);
426
if (ACPI_FAILURE(status))
427
return;
428
/* Search for HPET device with same ID. */
429
child = NULL;
430
AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
431
100, hpet_find, NULL, (void *)(uintptr_t)hpet->Sequence,
432
(void *)&child);
433
/* If found - let it be probed in normal way. */
434
if (child) {
435
if (bus_get_resource(child, SYS_RES_MEMORY, 0,
436
NULL, NULL) != 0)
437
bus_set_resource(child, SYS_RES_MEMORY, 0,
438
hpet->Address.Address, HPET_MEM_WIDTH);
439
continue;
440
}
441
/* If not - create it from table info. */
442
child = BUS_ADD_CHILD(parent, 2, "hpet", 0);
443
if (child == NULL) {
444
printf("%s: can't add child\n", __func__);
445
continue;
446
}
447
bus_set_resource(child, SYS_RES_MEMORY, 0, hpet->Address.Address,
448
HPET_MEM_WIDTH);
449
}
450
}
451
452
static int
453
hpet_probe(device_t dev)
454
{
455
int rv;
456
457
ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
458
if (acpi_disabled("hpet") || acpi_hpet_disabled)
459
return (ENXIO);
460
if (acpi_get_handle(dev) != NULL)
461
rv = ACPI_ID_PROBE(device_get_parent(dev), dev, hpet_ids, NULL);
462
else
463
rv = 0;
464
if (rv <= 0)
465
device_set_desc(dev, "High Precision Event Timer");
466
return (rv);
467
}
468
469
static int
470
hpet_attach(device_t dev)
471
{
472
struct hpet_softc *sc;
473
struct hpet_timer *t;
474
struct make_dev_args mda;
475
int i, j, num_msi, num_timers, num_percpu_et, num_percpu_t, cur_cpu;
476
int pcpu_master, error;
477
rman_res_t hpet_region_size;
478
static int maxhpetet = 0;
479
uint32_t val, val2, cvectors, dvectors;
480
uint16_t vendor, rev;
481
482
ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
483
484
sc = device_get_softc(dev);
485
sc->dev = dev;
486
sc->handle = acpi_get_handle(dev);
487
488
sc->mem_rid = 0;
489
sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid,
490
RF_ACTIVE);
491
if (sc->mem_res == NULL)
492
return (ENOMEM);
493
494
hpet_region_size = rman_get_size(sc->mem_res);
495
/* Validate that the region is big enough for the control registers. */
496
if (hpet_region_size < HPET_MEM_MIN_WIDTH) {
497
device_printf(dev, "memory region width %jd too small\n",
498
hpet_region_size);
499
bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
500
return (ENXIO);
501
}
502
503
/* Be sure timer is enabled. */
504
hpet_enable(sc);
505
506
/* Read basic statistics about the timer. */
507
val = bus_read_4(sc->mem_res, HPET_PERIOD);
508
if (val == 0) {
509
device_printf(dev, "invalid period\n");
510
hpet_disable(sc);
511
bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
512
return (ENXIO);
513
}
514
515
sc->freq = (1000000000000000LL + val / 2) / val;
516
sc->caps = bus_read_4(sc->mem_res, HPET_CAPABILITIES);
517
vendor = (sc->caps & HPET_CAP_VENDOR_ID) >> 16;
518
rev = sc->caps & HPET_CAP_REV_ID;
519
num_timers = 1 + ((sc->caps & HPET_CAP_NUM_TIM) >> 8);
520
/*
521
* ATI/AMD violates IA-PC HPET (High Precision Event Timers)
522
* Specification and provides an off by one number
523
* of timers/comparators.
524
* Additionally, they use unregistered value in VENDOR_ID field.
525
*/
526
if (vendor == HPET_VENDID_AMD && rev < 0x10 && num_timers > 0)
527
num_timers--;
528
/*
529
* Now validate that the region is big enough to address all counters.
530
*/
531
if (hpet_region_size < HPET_TIMER_CAP_CNF(num_timers)) {
532
device_printf(dev,
533
"memory region width %jd too small for %d timers\n",
534
hpet_region_size, num_timers);
535
hpet_disable(sc);
536
bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
537
return (ENXIO);
538
}
539
540
sc->num_timers = num_timers;
541
if (bootverbose) {
542
device_printf(dev,
543
"vendor 0x%x, rev 0x%x, %jdHz%s, %d timers,%s\n",
544
vendor, rev, sc->freq,
545
(sc->caps & HPET_CAP_COUNT_SIZE) ? " 64bit" : "",
546
num_timers,
547
(sc->caps & HPET_CAP_LEG_RT) ? " legacy route" : "");
548
}
549
for (i = 0; i < num_timers; i++) {
550
t = &sc->t[i];
551
t->sc = sc;
552
t->num = i;
553
t->mode = TIMER_STOPPED;
554
t->intr_rid = -1;
555
t->irq = -1;
556
t->pcpu_cpu = -1;
557
t->pcpu_misrouted = 0;
558
t->pcpu_master = -1;
559
t->caps = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i));
560
t->vectors = bus_read_4(sc->mem_res, HPET_TIMER_CAP_CNF(i) + 4);
561
if (bootverbose) {
562
device_printf(dev,
563
" t%d: irqs 0x%08x (%d)%s%s%s\n", i,
564
t->vectors, (t->caps & HPET_TCNF_INT_ROUTE) >> 9,
565
(t->caps & HPET_TCAP_FSB_INT_DEL) ? ", MSI" : "",
566
(t->caps & HPET_TCAP_SIZE) ? ", 64bit" : "",
567
(t->caps & HPET_TCAP_PER_INT) ? ", periodic" : "");
568
}
569
}
570
if (testenv("debug.acpi.hpet_test"))
571
hpet_test(sc);
572
/*
573
* Don't attach if the timer never increments. Since the spec
574
* requires it to be at least 10 MHz, it has to change in 1 us.
575
*/
576
val = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
577
DELAY(1);
578
val2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
579
if (val == val2) {
580
device_printf(dev, "HPET never increments, disabling\n");
581
hpet_disable(sc);
582
bus_free_resource(dev, SYS_RES_MEMORY, sc->mem_res);
583
return (ENXIO);
584
}
585
/* Announce first HPET as timecounter. */
586
if (device_get_unit(dev) == 0) {
587
sc->tc.tc_get_timecount = hpet_get_timecount,
588
sc->tc.tc_counter_mask = ~0u,
589
sc->tc.tc_name = "HPET",
590
sc->tc.tc_quality = 950,
591
sc->tc.tc_frequency = sc->freq;
592
sc->tc.tc_priv = sc;
593
sc->tc.tc_fill_vdso_timehands = hpet_vdso_timehands;
594
#ifdef COMPAT_FREEBSD32
595
sc->tc.tc_fill_vdso_timehands32 = hpet_vdso_timehands32;
596
#endif
597
tc_init(&sc->tc);
598
}
599
/* If not disabled - setup and announce event timers. */
600
if (resource_int_value(device_get_name(dev), device_get_unit(dev),
601
"clock", &i) == 0 && i == 0)
602
return (0);
603
604
/* Check whether we can and want legacy routing. */
605
sc->legacy_route = 0;
606
resource_int_value(device_get_name(dev), device_get_unit(dev),
607
"legacy_route", &sc->legacy_route);
608
if ((sc->caps & HPET_CAP_LEG_RT) == 0)
609
sc->legacy_route = 0;
610
if (sc->legacy_route) {
611
sc->t[0].vectors = 0;
612
sc->t[1].vectors = 0;
613
}
614
615
/* Check what IRQs we want use. */
616
/* By default allow any PCI IRQs. */
617
sc->allowed_irqs = 0xffff0000;
618
/*
619
* HPETs in AMD chipsets before SB800 have problems with IRQs >= 16
620
* Lower are also not always working for different reasons.
621
* SB800 fixed it, but seems do not implements level triggering
622
* properly, that makes it very unreliable - it freezes after any
623
* interrupt loss. Avoid legacy IRQs for AMD.
624
*/
625
if (vendor == HPET_VENDID_AMD || vendor == HPET_VENDID_AMD2 ||
626
vendor == HPET_VENDID_HYGON)
627
sc->allowed_irqs = 0x00000000;
628
/*
629
* NVidia MCP5x chipsets have number of unexplained interrupt
630
* problems. For some reason, using HPET interrupts breaks HDA sound.
631
*/
632
if (vendor == HPET_VENDID_NVIDIA && rev <= 0x01)
633
sc->allowed_irqs = 0x00000000;
634
/*
635
* ServerWorks HT1000 reported to have problems with IRQs >= 16.
636
* Lower IRQs are working, but allowed mask is not set correctly.
637
* Legacy_route mode works fine.
638
*/
639
if (vendor == HPET_VENDID_SW && rev <= 0x01)
640
sc->allowed_irqs = 0x00000000;
641
/*
642
* Neither QEMU nor VirtualBox report supported IRQs correctly.
643
* The only way to use HPET there is to specify IRQs manually
644
* and/or use legacy_route. Legacy_route mode works on both.
645
*/
646
if (vm_guest != VM_GUEST_NO)
647
sc->allowed_irqs = 0x00000000;
648
/* Let user override. */
649
resource_int_value(device_get_name(dev), device_get_unit(dev),
650
"allowed_irqs", &sc->allowed_irqs);
651
652
/* Get how much per-CPU timers we should try to provide. */
653
sc->per_cpu = 1;
654
resource_int_value(device_get_name(dev), device_get_unit(dev),
655
"per_cpu", &sc->per_cpu);
656
657
num_msi = 0;
658
sc->useirq = 0;
659
/* Find IRQ vectors for all timers. */
660
cvectors = sc->allowed_irqs & 0xffff0000;
661
dvectors = sc->allowed_irqs & 0x0000ffff;
662
if (sc->legacy_route)
663
dvectors &= 0x0000fefe;
664
for (i = 0; i < num_timers; i++) {
665
t = &sc->t[i];
666
if (sc->legacy_route && i < 2)
667
t->irq = (i == 0) ? 0 : 8;
668
#ifdef DEV_APIC
669
else if (t->caps & HPET_TCAP_FSB_INT_DEL) {
670
if ((j = PCIB_ALLOC_MSIX(
671
device_get_parent(device_get_parent(dev)), dev,
672
&t->irq))) {
673
device_printf(dev,
674
"Can't allocate interrupt for t%d: %d\n",
675
i, j);
676
}
677
}
678
#endif
679
else if (dvectors & t->vectors) {
680
t->irq = ffs(dvectors & t->vectors) - 1;
681
dvectors &= ~(1 << t->irq);
682
}
683
if (t->irq >= 0) {
684
t->intr_rid = hpet_find_irq_rid(dev, t->irq, t->irq);
685
t->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ,
686
&t->intr_rid, t->irq, t->irq, 1, RF_ACTIVE);
687
if (t->intr_res == NULL) {
688
t->irq = -1;
689
device_printf(dev,
690
"Can't map interrupt for t%d.\n", i);
691
} else if (bus_setup_intr(dev, t->intr_res,
692
INTR_TYPE_CLK, hpet_intr_single, NULL, t,
693
&t->intr_handle) != 0) {
694
t->irq = -1;
695
device_printf(dev,
696
"Can't setup interrupt for t%d.\n", i);
697
} else {
698
bus_describe_intr(dev, t->intr_res,
699
t->intr_handle, "t%d", i);
700
num_msi++;
701
}
702
}
703
if (t->irq < 0 && (cvectors & t->vectors) != 0) {
704
cvectors &= t->vectors;
705
sc->useirq |= (1 << i);
706
}
707
}
708
if (sc->legacy_route && sc->t[0].irq < 0 && sc->t[1].irq < 0)
709
sc->legacy_route = 0;
710
if (sc->legacy_route)
711
hpet_enable(sc);
712
/* Group timers for per-CPU operation. */
713
num_percpu_et = min(num_msi / mp_ncpus, sc->per_cpu);
714
num_percpu_t = num_percpu_et * mp_ncpus;
715
pcpu_master = 0;
716
cur_cpu = CPU_FIRST();
717
for (i = 0; i < num_timers; i++) {
718
t = &sc->t[i];
719
if (t->irq >= 0 && num_percpu_t > 0) {
720
if (cur_cpu == CPU_FIRST())
721
pcpu_master = i;
722
t->pcpu_cpu = cur_cpu;
723
t->pcpu_master = pcpu_master;
724
sc->t[pcpu_master].
725
pcpu_slaves[cur_cpu] = i;
726
bus_bind_intr(dev, t->intr_res, cur_cpu);
727
cur_cpu = CPU_NEXT(cur_cpu);
728
num_percpu_t--;
729
} else if (t->irq >= 0)
730
bus_bind_intr(dev, t->intr_res, CPU_FIRST());
731
}
732
bus_write_4(sc->mem_res, HPET_ISR, 0xffffffff);
733
sc->irq = -1;
734
/* If at least one timer needs legacy IRQ - set it up. */
735
if (sc->useirq) {
736
j = i = fls(cvectors) - 1;
737
while (j > 0 && (cvectors & (1 << (j - 1))) != 0)
738
j--;
739
sc->intr_rid = hpet_find_irq_rid(dev, j, i);
740
sc->intr_res = bus_alloc_resource(dev, SYS_RES_IRQ,
741
&sc->intr_rid, j, i, 1, RF_SHAREABLE | RF_ACTIVE);
742
if (sc->intr_res == NULL)
743
device_printf(dev, "Can't map interrupt.\n");
744
else if (bus_setup_intr(dev, sc->intr_res, INTR_TYPE_CLK,
745
hpet_intr, NULL, sc, &sc->intr_handle) != 0) {
746
device_printf(dev, "Can't setup interrupt.\n");
747
} else {
748
sc->irq = rman_get_start(sc->intr_res);
749
/* Bind IRQ to BSP to avoid live migration. */
750
bus_bind_intr(dev, sc->intr_res, CPU_FIRST());
751
}
752
}
753
/* Program and announce event timers. */
754
for (i = 0; i < num_timers; i++) {
755
t = &sc->t[i];
756
t->caps &= ~(HPET_TCNF_FSB_EN | HPET_TCNF_INT_ROUTE);
757
t->caps &= ~(HPET_TCNF_VAL_SET | HPET_TCNF_INT_ENB);
758
t->caps &= ~(HPET_TCNF_INT_TYPE);
759
t->caps |= HPET_TCNF_32MODE;
760
if (t->irq >= 0 && sc->legacy_route && i < 2) {
761
/* Legacy route doesn't need more configuration. */
762
} else
763
#ifdef DEV_APIC
764
if ((t->caps & HPET_TCAP_FSB_INT_DEL) && t->irq >= 0) {
765
uint64_t addr;
766
uint32_t data;
767
768
if (PCIB_MAP_MSI(
769
device_get_parent(device_get_parent(dev)), dev,
770
t->irq, &addr, &data) == 0) {
771
bus_write_4(sc->mem_res,
772
HPET_TIMER_FSB_ADDR(i), addr);
773
bus_write_4(sc->mem_res,
774
HPET_TIMER_FSB_VAL(i), data);
775
t->caps |= HPET_TCNF_FSB_EN;
776
} else
777
t->irq = -2;
778
} else
779
#endif
780
if (t->irq >= 0)
781
t->caps |= (t->irq << 9);
782
else if (sc->irq >= 0 && (t->vectors & (1 << sc->irq)))
783
t->caps |= (sc->irq << 9) | HPET_TCNF_INT_TYPE;
784
bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(i), t->caps);
785
/* Skip event timers without set up IRQ. */
786
if (t->irq < 0 &&
787
(sc->irq < 0 || (t->vectors & (1 << sc->irq)) == 0))
788
continue;
789
/* Announce the reset. */
790
if (maxhpetet == 0)
791
t->et.et_name = "HPET";
792
else {
793
sprintf(t->name, "HPET%d", maxhpetet);
794
t->et.et_name = t->name;
795
}
796
t->et.et_flags = ET_FLAGS_PERIODIC | ET_FLAGS_ONESHOT;
797
t->et.et_quality = 450;
798
if (t->pcpu_master >= 0) {
799
t->et.et_flags |= ET_FLAGS_PERCPU;
800
t->et.et_quality += 100;
801
} else if (mp_ncpus >= 8)
802
t->et.et_quality -= 100;
803
if ((t->caps & HPET_TCAP_PER_INT) == 0)
804
t->et.et_quality -= 10;
805
t->et.et_frequency = sc->freq;
806
t->et.et_min_period =
807
((uint64_t)(HPET_MIN_CYCLES * 2) << 32) / sc->freq;
808
t->et.et_max_period = (0xfffffffeLLU << 32) / sc->freq;
809
t->et.et_start = hpet_start;
810
t->et.et_stop = hpet_stop;
811
t->et.et_priv = &sc->t[i];
812
if (t->pcpu_master < 0 || t->pcpu_master == i) {
813
et_register(&t->et);
814
maxhpetet++;
815
}
816
}
817
acpi_GetInteger(sc->handle, "_UID", &sc->acpi_uid);
818
819
make_dev_args_init(&mda);
820
mda.mda_devsw = &hpet_cdevsw;
821
mda.mda_uid = UID_ROOT;
822
mda.mda_gid = GID_WHEEL;
823
mda.mda_mode = 0644;
824
mda.mda_si_drv1 = sc;
825
error = make_dev_s(&mda, &sc->pdev, "hpet%d", device_get_unit(dev));
826
if (error == 0) {
827
sc->mmap_allow = 1;
828
TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow",
829
&sc->mmap_allow);
830
sc->mmap_allow_write = 0;
831
TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow_write",
832
&sc->mmap_allow_write);
833
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
834
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
835
OID_AUTO, "mmap_allow",
836
CTLFLAG_RW, &sc->mmap_allow, 0,
837
"Allow userland to memory map HPET");
838
SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
839
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
840
OID_AUTO, "mmap_allow_write",
841
CTLFLAG_RW, &sc->mmap_allow_write, 0,
842
"Allow userland write to the HPET register space");
843
} else {
844
device_printf(dev, "could not create /dev/hpet%d, error %d\n",
845
device_get_unit(dev), error);
846
}
847
848
return (0);
849
}
850
851
static int
852
hpet_detach(device_t dev)
853
{
854
ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
855
856
/* XXX Without a tc_remove() function, we can't detach. */
857
return (EBUSY);
858
}
859
860
static int
861
hpet_suspend(device_t dev)
862
{
863
// struct hpet_softc *sc;
864
865
/*
866
* Disable the timer during suspend. The timer will not lose
867
* its state in S1 or S2, but we are required to disable
868
* it.
869
*/
870
// sc = device_get_softc(dev);
871
// hpet_disable(sc);
872
873
return (0);
874
}
875
876
static int
877
hpet_resume(device_t dev)
878
{
879
struct hpet_softc *sc;
880
struct hpet_timer *t;
881
int i;
882
883
/* Re-enable the timer after a resume to keep the clock advancing. */
884
sc = device_get_softc(dev);
885
hpet_enable(sc);
886
/* Restart event timers that were running on suspend. */
887
for (i = 0; i < sc->num_timers; i++) {
888
t = &sc->t[i];
889
#ifdef DEV_APIC
890
if (t->irq >= 0 && (sc->legacy_route == 0 || i >= 2)) {
891
uint64_t addr;
892
uint32_t data;
893
894
if (PCIB_MAP_MSI(
895
device_get_parent(device_get_parent(dev)), dev,
896
t->irq, &addr, &data) == 0) {
897
bus_write_4(sc->mem_res,
898
HPET_TIMER_FSB_ADDR(i), addr);
899
bus_write_4(sc->mem_res,
900
HPET_TIMER_FSB_VAL(i), data);
901
}
902
}
903
#endif
904
if (t->mode == TIMER_STOPPED)
905
continue;
906
t->next = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
907
if (t->mode == TIMER_PERIODIC &&
908
(t->caps & HPET_TCAP_PER_INT) != 0) {
909
t->caps |= HPET_TCNF_TYPE;
910
t->next += t->div;
911
bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num),
912
t->caps | HPET_TCNF_VAL_SET);
913
bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
914
t->next);
915
bus_read_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num));
916
bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
917
t->div);
918
} else {
919
t->next += sc->freq / 1024;
920
bus_write_4(sc->mem_res, HPET_TIMER_COMPARATOR(t->num),
921
t->next);
922
}
923
bus_write_4(sc->mem_res, HPET_ISR, 1 << t->num);
924
bus_write_4(sc->mem_res, HPET_TIMER_CAP_CNF(t->num), t->caps);
925
}
926
return (0);
927
}
928
929
/* Print some basic latency/rate information to assist in debugging. */
930
static void
931
hpet_test(struct hpet_softc *sc)
932
{
933
int i;
934
uint32_t u1, u2;
935
struct bintime b0, b1, b2;
936
struct timespec ts;
937
938
binuptime(&b0);
939
binuptime(&b0);
940
binuptime(&b1);
941
u1 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
942
for (i = 1; i < 1000; i++)
943
u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
944
binuptime(&b2);
945
u2 = bus_read_4(sc->mem_res, HPET_MAIN_COUNTER);
946
947
bintime_sub(&b2, &b1);
948
bintime_sub(&b1, &b0);
949
bintime_sub(&b2, &b1);
950
bintime2timespec(&b2, &ts);
951
952
device_printf(sc->dev, "%ld.%09ld: %u ... %u = %u\n",
953
(long)ts.tv_sec, ts.tv_nsec, u1, u2, u2 - u1);
954
955
device_printf(sc->dev, "time per call: %ld ns\n", ts.tv_nsec / 1000);
956
}
957
958
#ifdef DEV_APIC
959
static int
960
hpet_remap_intr(device_t dev, device_t child, u_int irq)
961
{
962
struct hpet_softc *sc = device_get_softc(dev);
963
struct hpet_timer *t;
964
uint64_t addr;
965
uint32_t data;
966
int error, i;
967
968
for (i = 0; i < sc->num_timers; i++) {
969
t = &sc->t[i];
970
if (t->irq != irq)
971
continue;
972
error = PCIB_MAP_MSI(
973
device_get_parent(device_get_parent(dev)), dev,
974
irq, &addr, &data);
975
if (error)
976
return (error);
977
hpet_disable(sc); /* Stop timer to avoid interrupt loss. */
978
bus_write_4(sc->mem_res, HPET_TIMER_FSB_ADDR(i), addr);
979
bus_write_4(sc->mem_res, HPET_TIMER_FSB_VAL(i), data);
980
hpet_enable(sc);
981
return (0);
982
}
983
return (ENOENT);
984
}
985
#endif
986
987
static device_method_t hpet_methods[] = {
988
/* Device interface */
989
DEVMETHOD(device_identify, hpet_identify),
990
DEVMETHOD(device_probe, hpet_probe),
991
DEVMETHOD(device_attach, hpet_attach),
992
DEVMETHOD(device_detach, hpet_detach),
993
DEVMETHOD(device_suspend, hpet_suspend),
994
DEVMETHOD(device_resume, hpet_resume),
995
996
#ifdef DEV_APIC
997
DEVMETHOD(bus_remap_intr, hpet_remap_intr),
998
#endif
999
1000
DEVMETHOD_END
1001
};
1002
1003
static driver_t hpet_driver = {
1004
"hpet",
1005
hpet_methods,
1006
sizeof(struct hpet_softc),
1007
};
1008
1009
DRIVER_MODULE(hpet, acpi, hpet_driver, 0, 0);
1010
MODULE_DEPEND(hpet, acpi, 1, 1, 1);
1011
1012