Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/char/hpet.c
15109 views
1
/*
2
* Intel & MS High Precision Event Timer Implementation.
3
*
4
* Copyright (C) 2003 Intel Corporation
5
* Venki Pallipadi
6
* (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
7
* Bob Picco <[email protected]>
8
*
9
* This program is free software; you can redistribute it and/or modify
10
* it under the terms of the GNU General Public License version 2 as
11
* published by the Free Software Foundation.
12
*/
13
14
#include <linux/interrupt.h>
15
#include <linux/module.h>
16
#include <linux/kernel.h>
17
#include <linux/types.h>
18
#include <linux/miscdevice.h>
19
#include <linux/major.h>
20
#include <linux/ioport.h>
21
#include <linux/fcntl.h>
22
#include <linux/init.h>
23
#include <linux/poll.h>
24
#include <linux/mm.h>
25
#include <linux/proc_fs.h>
26
#include <linux/spinlock.h>
27
#include <linux/sysctl.h>
28
#include <linux/wait.h>
29
#include <linux/bcd.h>
30
#include <linux/seq_file.h>
31
#include <linux/bitops.h>
32
#include <linux/compat.h>
33
#include <linux/clocksource.h>
34
#include <linux/uaccess.h>
35
#include <linux/slab.h>
36
#include <linux/io.h>
37
38
#include <asm/current.h>
39
#include <asm/system.h>
40
#include <asm/irq.h>
41
#include <asm/div64.h>
42
43
#include <linux/acpi.h>
44
#include <acpi/acpi_bus.h>
45
#include <linux/hpet.h>
46
47
/*
48
* The High Precision Event Timer driver.
49
* This driver is closely modelled after the rtc.c driver.
50
* http://www.intel.com/hardwaredesign/hpetspec_1.pdf
51
*/
52
#define HPET_USER_FREQ (64)
53
#define HPET_DRIFT (500)
54
55
#define HPET_RANGE_SIZE 1024 /* from HPET spec */
56
57
58
/* WARNING -- don't get confused. These macros are never used
59
* to write the (single) counter, and rarely to read it.
60
* They're badly named; to fix, someday.
61
*/
62
#if BITS_PER_LONG == 64
63
#define write_counter(V, MC) writeq(V, MC)
64
#define read_counter(MC) readq(MC)
65
#else
66
#define write_counter(V, MC) writel(V, MC)
67
#define read_counter(MC) readl(MC)
68
#endif
69
70
static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
71
static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
72
73
/* This clocksource driver currently only works on ia64 */
74
#ifdef CONFIG_IA64
75
static void __iomem *hpet_mctr;
76
77
static cycle_t read_hpet(struct clocksource *cs)
78
{
79
return (cycle_t)read_counter((void __iomem *)hpet_mctr);
80
}
81
82
static struct clocksource clocksource_hpet = {
83
.name = "hpet",
84
.rating = 250,
85
.read = read_hpet,
86
.mask = CLOCKSOURCE_MASK(64),
87
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
88
};
89
static struct clocksource *hpet_clocksource;
90
#endif
91
92
/* A lock for concurrent access by app and isr hpet activity. */
93
static DEFINE_SPINLOCK(hpet_lock);
94
95
#define HPET_DEV_NAME (7)
96
97
struct hpet_dev {
98
struct hpets *hd_hpets;
99
struct hpet __iomem *hd_hpet;
100
struct hpet_timer __iomem *hd_timer;
101
unsigned long hd_ireqfreq;
102
unsigned long hd_irqdata;
103
wait_queue_head_t hd_waitqueue;
104
struct fasync_struct *hd_async_queue;
105
unsigned int hd_flags;
106
unsigned int hd_irq;
107
unsigned int hd_hdwirq;
108
char hd_name[HPET_DEV_NAME];
109
};
110
111
struct hpets {
112
struct hpets *hp_next;
113
struct hpet __iomem *hp_hpet;
114
unsigned long hp_hpet_phys;
115
struct clocksource *hp_clocksource;
116
unsigned long long hp_tick_freq;
117
unsigned long hp_delta;
118
unsigned int hp_ntimer;
119
unsigned int hp_which;
120
struct hpet_dev hp_dev[1];
121
};
122
123
static struct hpets *hpets;
124
125
#define HPET_OPEN 0x0001
126
#define HPET_IE 0x0002 /* interrupt enabled */
127
#define HPET_PERIODIC 0x0004
128
#define HPET_SHARED_IRQ 0x0008
129
130
131
#ifndef readq
132
static inline unsigned long long readq(void __iomem *addr)
133
{
134
return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
135
}
136
#endif
137
138
#ifndef writeq
139
static inline void writeq(unsigned long long v, void __iomem *addr)
140
{
141
writel(v & 0xffffffff, addr);
142
writel(v >> 32, addr + 4);
143
}
144
#endif
145
146
static irqreturn_t hpet_interrupt(int irq, void *data)
147
{
148
struct hpet_dev *devp;
149
unsigned long isr;
150
151
devp = data;
152
isr = 1 << (devp - devp->hd_hpets->hp_dev);
153
154
if ((devp->hd_flags & HPET_SHARED_IRQ) &&
155
!(isr & readl(&devp->hd_hpet->hpet_isr)))
156
return IRQ_NONE;
157
158
spin_lock(&hpet_lock);
159
devp->hd_irqdata++;
160
161
/*
162
* For non-periodic timers, increment the accumulator.
163
* This has the effect of treating non-periodic like periodic.
164
*/
165
if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
166
unsigned long m, t, mc, base, k;
167
struct hpet __iomem *hpet = devp->hd_hpet;
168
struct hpets *hpetp = devp->hd_hpets;
169
170
t = devp->hd_ireqfreq;
171
m = read_counter(&devp->hd_timer->hpet_compare);
172
mc = read_counter(&hpet->hpet_mc);
173
/* The time for the next interrupt would logically be t + m,
174
* however, if we are very unlucky and the interrupt is delayed
175
* for longer than t then we will completely miss the next
176
* interrupt if we set t + m and an application will hang.
177
* Therefore we need to make a more complex computation assuming
178
* that there exists a k for which the following is true:
179
* k * t + base < mc + delta
180
* (k + 1) * t + base > mc + delta
181
* where t is the interval in hpet ticks for the given freq,
182
* base is the theoretical start value 0 < base < t,
183
* mc is the main counter value at the time of the interrupt,
184
* delta is the time it takes to write the a value to the
185
* comparator.
186
* k may then be computed as (mc - base + delta) / t .
187
*/
188
base = mc % t;
189
k = (mc - base + hpetp->hp_delta) / t;
190
write_counter(t * (k + 1) + base,
191
&devp->hd_timer->hpet_compare);
192
}
193
194
if (devp->hd_flags & HPET_SHARED_IRQ)
195
writel(isr, &devp->hd_hpet->hpet_isr);
196
spin_unlock(&hpet_lock);
197
198
wake_up_interruptible(&devp->hd_waitqueue);
199
200
kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);
201
202
return IRQ_HANDLED;
203
}
204
205
static void hpet_timer_set_irq(struct hpet_dev *devp)
206
{
207
unsigned long v;
208
int irq, gsi;
209
struct hpet_timer __iomem *timer;
210
211
spin_lock_irq(&hpet_lock);
212
if (devp->hd_hdwirq) {
213
spin_unlock_irq(&hpet_lock);
214
return;
215
}
216
217
timer = devp->hd_timer;
218
219
/* we prefer level triggered mode */
220
v = readl(&timer->hpet_config);
221
if (!(v & Tn_INT_TYPE_CNF_MASK)) {
222
v |= Tn_INT_TYPE_CNF_MASK;
223
writel(v, &timer->hpet_config);
224
}
225
spin_unlock_irq(&hpet_lock);
226
227
v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >>
228
Tn_INT_ROUTE_CAP_SHIFT;
229
230
/*
231
* In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by
232
* legacy device. In IO APIC mode, we skip all the legacy IRQS.
233
*/
234
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC)
235
v &= ~0xf3df;
236
else
237
v &= ~0xffff;
238
239
for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
240
if (irq >= nr_irqs) {
241
irq = HPET_MAX_IRQ;
242
break;
243
}
244
245
gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
246
ACPI_ACTIVE_LOW);
247
if (gsi > 0)
248
break;
249
250
/* FIXME: Setup interrupt source table */
251
}
252
253
if (irq < HPET_MAX_IRQ) {
254
spin_lock_irq(&hpet_lock);
255
v = readl(&timer->hpet_config);
256
v |= irq << Tn_INT_ROUTE_CNF_SHIFT;
257
writel(v, &timer->hpet_config);
258
devp->hd_hdwirq = gsi;
259
spin_unlock_irq(&hpet_lock);
260
}
261
return;
262
}
263
264
static int hpet_open(struct inode *inode, struct file *file)
265
{
266
struct hpet_dev *devp;
267
struct hpets *hpetp;
268
int i;
269
270
if (file->f_mode & FMODE_WRITE)
271
return -EINVAL;
272
273
mutex_lock(&hpet_mutex);
274
spin_lock_irq(&hpet_lock);
275
276
for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
277
for (i = 0; i < hpetp->hp_ntimer; i++)
278
if (hpetp->hp_dev[i].hd_flags & HPET_OPEN)
279
continue;
280
else {
281
devp = &hpetp->hp_dev[i];
282
break;
283
}
284
285
if (!devp) {
286
spin_unlock_irq(&hpet_lock);
287
mutex_unlock(&hpet_mutex);
288
return -EBUSY;
289
}
290
291
file->private_data = devp;
292
devp->hd_irqdata = 0;
293
devp->hd_flags |= HPET_OPEN;
294
spin_unlock_irq(&hpet_lock);
295
mutex_unlock(&hpet_mutex);
296
297
hpet_timer_set_irq(devp);
298
299
return 0;
300
}
301
302
static ssize_t
303
hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
304
{
305
DECLARE_WAITQUEUE(wait, current);
306
unsigned long data;
307
ssize_t retval;
308
struct hpet_dev *devp;
309
310
devp = file->private_data;
311
if (!devp->hd_ireqfreq)
312
return -EIO;
313
314
if (count < sizeof(unsigned long))
315
return -EINVAL;
316
317
add_wait_queue(&devp->hd_waitqueue, &wait);
318
319
for ( ; ; ) {
320
set_current_state(TASK_INTERRUPTIBLE);
321
322
spin_lock_irq(&hpet_lock);
323
data = devp->hd_irqdata;
324
devp->hd_irqdata = 0;
325
spin_unlock_irq(&hpet_lock);
326
327
if (data)
328
break;
329
else if (file->f_flags & O_NONBLOCK) {
330
retval = -EAGAIN;
331
goto out;
332
} else if (signal_pending(current)) {
333
retval = -ERESTARTSYS;
334
goto out;
335
}
336
schedule();
337
}
338
339
retval = put_user(data, (unsigned long __user *)buf);
340
if (!retval)
341
retval = sizeof(unsigned long);
342
out:
343
__set_current_state(TASK_RUNNING);
344
remove_wait_queue(&devp->hd_waitqueue, &wait);
345
346
return retval;
347
}
348
349
static unsigned int hpet_poll(struct file *file, poll_table * wait)
350
{
351
unsigned long v;
352
struct hpet_dev *devp;
353
354
devp = file->private_data;
355
356
if (!devp->hd_ireqfreq)
357
return 0;
358
359
poll_wait(file, &devp->hd_waitqueue, wait);
360
361
spin_lock_irq(&hpet_lock);
362
v = devp->hd_irqdata;
363
spin_unlock_irq(&hpet_lock);
364
365
if (v != 0)
366
return POLLIN | POLLRDNORM;
367
368
return 0;
369
}
370
371
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
372
{
373
#ifdef CONFIG_HPET_MMAP
374
struct hpet_dev *devp;
375
unsigned long addr;
376
377
if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
378
return -EINVAL;
379
380
devp = file->private_data;
381
addr = devp->hd_hpets->hp_hpet_phys;
382
383
if (addr & (PAGE_SIZE - 1))
384
return -ENOSYS;
385
386
vma->vm_flags |= VM_IO;
387
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
388
389
if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
390
PAGE_SIZE, vma->vm_page_prot)) {
391
printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
392
__func__);
393
return -EAGAIN;
394
}
395
396
return 0;
397
#else
398
return -ENOSYS;
399
#endif
400
}
401
402
static int hpet_fasync(int fd, struct file *file, int on)
403
{
404
struct hpet_dev *devp;
405
406
devp = file->private_data;
407
408
if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0)
409
return 0;
410
else
411
return -EIO;
412
}
413
414
static int hpet_release(struct inode *inode, struct file *file)
415
{
416
struct hpet_dev *devp;
417
struct hpet_timer __iomem *timer;
418
int irq = 0;
419
420
devp = file->private_data;
421
timer = devp->hd_timer;
422
423
spin_lock_irq(&hpet_lock);
424
425
writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
426
&timer->hpet_config);
427
428
irq = devp->hd_irq;
429
devp->hd_irq = 0;
430
431
devp->hd_ireqfreq = 0;
432
433
if (devp->hd_flags & HPET_PERIODIC
434
&& readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
435
unsigned long v;
436
437
v = readq(&timer->hpet_config);
438
v ^= Tn_TYPE_CNF_MASK;
439
writeq(v, &timer->hpet_config);
440
}
441
442
devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC);
443
spin_unlock_irq(&hpet_lock);
444
445
if (irq)
446
free_irq(irq, devp);
447
448
file->private_data = NULL;
449
return 0;
450
}
451
452
static int hpet_ioctl_ieon(struct hpet_dev *devp)
453
{
454
struct hpet_timer __iomem *timer;
455
struct hpet __iomem *hpet;
456
struct hpets *hpetp;
457
int irq;
458
unsigned long g, v, t, m;
459
unsigned long flags, isr;
460
461
timer = devp->hd_timer;
462
hpet = devp->hd_hpet;
463
hpetp = devp->hd_hpets;
464
465
if (!devp->hd_ireqfreq)
466
return -EIO;
467
468
spin_lock_irq(&hpet_lock);
469
470
if (devp->hd_flags & HPET_IE) {
471
spin_unlock_irq(&hpet_lock);
472
return -EBUSY;
473
}
474
475
devp->hd_flags |= HPET_IE;
476
477
if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK)
478
devp->hd_flags |= HPET_SHARED_IRQ;
479
spin_unlock_irq(&hpet_lock);
480
481
irq = devp->hd_hdwirq;
482
483
if (irq) {
484
unsigned long irq_flags;
485
486
if (devp->hd_flags & HPET_SHARED_IRQ) {
487
/*
488
* To prevent the interrupt handler from seeing an
489
* unwanted interrupt status bit, program the timer
490
* so that it will not fire in the near future ...
491
*/
492
writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
493
&timer->hpet_config);
494
write_counter(read_counter(&hpet->hpet_mc),
495
&timer->hpet_compare);
496
/* ... and clear any left-over status. */
497
isr = 1 << (devp - devp->hd_hpets->hp_dev);
498
writel(isr, &hpet->hpet_isr);
499
}
500
501
sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
502
irq_flags = devp->hd_flags & HPET_SHARED_IRQ
503
? IRQF_SHARED : IRQF_DISABLED;
504
if (request_irq(irq, hpet_interrupt, irq_flags,
505
devp->hd_name, (void *)devp)) {
506
printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
507
irq = 0;
508
}
509
}
510
511
if (irq == 0) {
512
spin_lock_irq(&hpet_lock);
513
devp->hd_flags ^= HPET_IE;
514
spin_unlock_irq(&hpet_lock);
515
return -EIO;
516
}
517
518
devp->hd_irq = irq;
519
t = devp->hd_ireqfreq;
520
v = readq(&timer->hpet_config);
521
522
/* 64-bit comparators are not yet supported through the ioctls,
523
* so force this into 32-bit mode if it supports both modes
524
*/
525
g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;
526
527
if (devp->hd_flags & HPET_PERIODIC) {
528
g |= Tn_TYPE_CNF_MASK;
529
v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK;
530
writeq(v, &timer->hpet_config);
531
local_irq_save(flags);
532
533
/*
534
* NOTE: First we modify the hidden accumulator
535
* register supported by periodic-capable comparators.
536
* We never want to modify the (single) counter; that
537
* would affect all the comparators. The value written
538
* is the counter value when the first interrupt is due.
539
*/
540
m = read_counter(&hpet->hpet_mc);
541
write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
542
/*
543
* Then we modify the comparator, indicating the period
544
* for subsequent interrupt.
545
*/
546
write_counter(t, &timer->hpet_compare);
547
} else {
548
local_irq_save(flags);
549
m = read_counter(&hpet->hpet_mc);
550
write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
551
}
552
553
if (devp->hd_flags & HPET_SHARED_IRQ) {
554
isr = 1 << (devp - devp->hd_hpets->hp_dev);
555
writel(isr, &hpet->hpet_isr);
556
}
557
writeq(g, &timer->hpet_config);
558
local_irq_restore(flags);
559
560
return 0;
561
}
562
563
/* converts Hz to number of timer ticks */
564
static inline unsigned long hpet_time_div(struct hpets *hpets,
565
unsigned long dis)
566
{
567
unsigned long long m;
568
569
m = hpets->hp_tick_freq + (dis >> 1);
570
do_div(m, dis);
571
return (unsigned long)m;
572
}
573
574
static int
575
hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
576
struct hpet_info *info)
577
{
578
struct hpet_timer __iomem *timer;
579
struct hpet __iomem *hpet;
580
struct hpets *hpetp;
581
int err;
582
unsigned long v;
583
584
switch (cmd) {
585
case HPET_IE_OFF:
586
case HPET_INFO:
587
case HPET_EPI:
588
case HPET_DPI:
589
case HPET_IRQFREQ:
590
timer = devp->hd_timer;
591
hpet = devp->hd_hpet;
592
hpetp = devp->hd_hpets;
593
break;
594
case HPET_IE_ON:
595
return hpet_ioctl_ieon(devp);
596
default:
597
return -EINVAL;
598
}
599
600
err = 0;
601
602
switch (cmd) {
603
case HPET_IE_OFF:
604
if ((devp->hd_flags & HPET_IE) == 0)
605
break;
606
v = readq(&timer->hpet_config);
607
v &= ~Tn_INT_ENB_CNF_MASK;
608
writeq(v, &timer->hpet_config);
609
if (devp->hd_irq) {
610
free_irq(devp->hd_irq, devp);
611
devp->hd_irq = 0;
612
}
613
devp->hd_flags ^= HPET_IE;
614
break;
615
case HPET_INFO:
616
{
617
memset(info, 0, sizeof(*info));
618
if (devp->hd_ireqfreq)
619
info->hi_ireqfreq =
620
hpet_time_div(hpetp, devp->hd_ireqfreq);
621
info->hi_flags =
622
readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
623
info->hi_hpet = hpetp->hp_which;
624
info->hi_timer = devp - hpetp->hp_dev;
625
break;
626
}
627
case HPET_EPI:
628
v = readq(&timer->hpet_config);
629
if ((v & Tn_PER_INT_CAP_MASK) == 0) {
630
err = -ENXIO;
631
break;
632
}
633
devp->hd_flags |= HPET_PERIODIC;
634
break;
635
case HPET_DPI:
636
v = readq(&timer->hpet_config);
637
if ((v & Tn_PER_INT_CAP_MASK) == 0) {
638
err = -ENXIO;
639
break;
640
}
641
if (devp->hd_flags & HPET_PERIODIC &&
642
readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
643
v = readq(&timer->hpet_config);
644
v ^= Tn_TYPE_CNF_MASK;
645
writeq(v, &timer->hpet_config);
646
}
647
devp->hd_flags &= ~HPET_PERIODIC;
648
break;
649
case HPET_IRQFREQ:
650
if ((arg > hpet_max_freq) &&
651
!capable(CAP_SYS_RESOURCE)) {
652
err = -EACCES;
653
break;
654
}
655
656
if (!arg) {
657
err = -EINVAL;
658
break;
659
}
660
661
devp->hd_ireqfreq = hpet_time_div(hpetp, arg);
662
}
663
664
return err;
665
}
666
667
static long
668
hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
669
{
670
struct hpet_info info;
671
int err;
672
673
mutex_lock(&hpet_mutex);
674
err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
675
mutex_unlock(&hpet_mutex);
676
677
if ((cmd == HPET_INFO) && !err &&
678
(copy_to_user((void __user *)arg, &info, sizeof(info))))
679
err = -EFAULT;
680
681
return err;
682
}
683
684
#ifdef CONFIG_COMPAT
685
struct compat_hpet_info {
686
compat_ulong_t hi_ireqfreq; /* Hz */
687
compat_ulong_t hi_flags; /* information */
688
unsigned short hi_hpet;
689
unsigned short hi_timer;
690
};
691
692
static long
693
hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
694
{
695
struct hpet_info info;
696
int err;
697
698
mutex_lock(&hpet_mutex);
699
err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
700
mutex_unlock(&hpet_mutex);
701
702
if ((cmd == HPET_INFO) && !err) {
703
struct compat_hpet_info __user *u = compat_ptr(arg);
704
if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) ||
705
put_user(info.hi_flags, &u->hi_flags) ||
706
put_user(info.hi_hpet, &u->hi_hpet) ||
707
put_user(info.hi_timer, &u->hi_timer))
708
err = -EFAULT;
709
}
710
711
return err;
712
}
713
#endif
714
715
static const struct file_operations hpet_fops = {
716
.owner = THIS_MODULE,
717
.llseek = no_llseek,
718
.read = hpet_read,
719
.poll = hpet_poll,
720
.unlocked_ioctl = hpet_ioctl,
721
#ifdef CONFIG_COMPAT
722
.compat_ioctl = hpet_compat_ioctl,
723
#endif
724
.open = hpet_open,
725
.release = hpet_release,
726
.fasync = hpet_fasync,
727
.mmap = hpet_mmap,
728
};
729
730
static int hpet_is_known(struct hpet_data *hdp)
731
{
732
struct hpets *hpetp;
733
734
for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
735
if (hpetp->hp_hpet_phys == hdp->hd_phys_address)
736
return 1;
737
738
return 0;
739
}
740
741
static ctl_table hpet_table[] = {
742
{
743
.procname = "max-user-freq",
744
.data = &hpet_max_freq,
745
.maxlen = sizeof(int),
746
.mode = 0644,
747
.proc_handler = proc_dointvec,
748
},
749
{}
750
};
751
752
static ctl_table hpet_root[] = {
753
{
754
.procname = "hpet",
755
.maxlen = 0,
756
.mode = 0555,
757
.child = hpet_table,
758
},
759
{}
760
};
761
762
static ctl_table dev_root[] = {
763
{
764
.procname = "dev",
765
.maxlen = 0,
766
.mode = 0555,
767
.child = hpet_root,
768
},
769
{}
770
};
771
772
static struct ctl_table_header *sysctl_header;
773
774
/*
775
* Adjustment for when arming the timer with
776
* initial conditions. That is, main counter
777
* ticks expired before interrupts are enabled.
778
*/
779
#define TICK_CALIBRATE (1000UL)
780
781
static unsigned long __hpet_calibrate(struct hpets *hpetp)
782
{
783
struct hpet_timer __iomem *timer = NULL;
784
unsigned long t, m, count, i, flags, start;
785
struct hpet_dev *devp;
786
int j;
787
struct hpet __iomem *hpet;
788
789
for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++)
790
if ((devp->hd_flags & HPET_OPEN) == 0) {
791
timer = devp->hd_timer;
792
break;
793
}
794
795
if (!timer)
796
return 0;
797
798
hpet = hpetp->hp_hpet;
799
t = read_counter(&timer->hpet_compare);
800
801
i = 0;
802
count = hpet_time_div(hpetp, TICK_CALIBRATE);
803
804
local_irq_save(flags);
805
806
start = read_counter(&hpet->hpet_mc);
807
808
do {
809
m = read_counter(&hpet->hpet_mc);
810
write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
811
} while (i++, (m - start) < count);
812
813
local_irq_restore(flags);
814
815
return (m - start) / i;
816
}
817
818
static unsigned long hpet_calibrate(struct hpets *hpetp)
819
{
820
unsigned long ret = -1;
821
unsigned long tmp;
822
823
/*
824
* Try to calibrate until return value becomes stable small value.
825
* If SMI interruption occurs in calibration loop, the return value
826
* will be big. This avoids its impact.
827
*/
828
for ( ; ; ) {
829
tmp = __hpet_calibrate(hpetp);
830
if (ret <= tmp)
831
break;
832
ret = tmp;
833
}
834
835
return ret;
836
}
837
838
int hpet_alloc(struct hpet_data *hdp)
839
{
840
u64 cap, mcfg;
841
struct hpet_dev *devp;
842
u32 i, ntimer;
843
struct hpets *hpetp;
844
size_t siz;
845
struct hpet __iomem *hpet;
846
static struct hpets *last;
847
unsigned long period;
848
unsigned long long temp;
849
u32 remainder;
850
851
/*
852
* hpet_alloc can be called by platform dependent code.
853
* If platform dependent code has allocated the hpet that
854
* ACPI has also reported, then we catch it here.
855
*/
856
if (hpet_is_known(hdp)) {
857
printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
858
__func__);
859
return 0;
860
}
861
862
siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
863
sizeof(struct hpet_dev));
864
865
hpetp = kzalloc(siz, GFP_KERNEL);
866
867
if (!hpetp)
868
return -ENOMEM;
869
870
hpetp->hp_which = hpet_nhpet++;
871
hpetp->hp_hpet = hdp->hd_address;
872
hpetp->hp_hpet_phys = hdp->hd_phys_address;
873
874
hpetp->hp_ntimer = hdp->hd_nirqs;
875
876
for (i = 0; i < hdp->hd_nirqs; i++)
877
hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];
878
879
hpet = hpetp->hp_hpet;
880
881
cap = readq(&hpet->hpet_cap);
882
883
ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1;
884
885
if (hpetp->hp_ntimer != ntimer) {
886
printk(KERN_WARNING "hpet: number irqs doesn't agree"
887
" with number of timers\n");
888
kfree(hpetp);
889
return -ENODEV;
890
}
891
892
if (last)
893
last->hp_next = hpetp;
894
else
895
hpets = hpetp;
896
897
last = hpetp;
898
899
period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
900
HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */
901
temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */
902
temp += period >> 1; /* round */
903
do_div(temp, period);
904
hpetp->hp_tick_freq = temp; /* ticks per second */
905
906
printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
907
hpetp->hp_which, hdp->hd_phys_address,
908
hpetp->hp_ntimer > 1 ? "s" : "");
909
for (i = 0; i < hpetp->hp_ntimer; i++)
910
printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
911
printk("\n");
912
913
temp = hpetp->hp_tick_freq;
914
remainder = do_div(temp, 1000000);
915
printk(KERN_INFO
916
"hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n",
917
hpetp->hp_which, hpetp->hp_ntimer,
918
cap & HPET_COUNTER_SIZE_MASK ? 64 : 32,
919
(unsigned) temp, remainder);
920
921
mcfg = readq(&hpet->hpet_config);
922
if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
923
write_counter(0L, &hpet->hpet_mc);
924
mcfg |= HPET_ENABLE_CNF_MASK;
925
writeq(mcfg, &hpet->hpet_config);
926
}
927
928
for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) {
929
struct hpet_timer __iomem *timer;
930
931
timer = &hpet->hpet_timers[devp - hpetp->hp_dev];
932
933
devp->hd_hpets = hpetp;
934
devp->hd_hpet = hpet;
935
devp->hd_timer = timer;
936
937
/*
938
* If the timer was reserved by platform code,
939
* then make timer unavailable for opens.
940
*/
941
if (hdp->hd_state & (1 << i)) {
942
devp->hd_flags = HPET_OPEN;
943
continue;
944
}
945
946
init_waitqueue_head(&devp->hd_waitqueue);
947
}
948
949
hpetp->hp_delta = hpet_calibrate(hpetp);
950
951
/* This clocksource driver currently only works on ia64 */
952
#ifdef CONFIG_IA64
953
if (!hpet_clocksource) {
954
hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
955
CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr);
956
clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
957
hpetp->hp_clocksource = &clocksource_hpet;
958
hpet_clocksource = &clocksource_hpet;
959
}
960
#endif
961
962
return 0;
963
}
964
965
static acpi_status hpet_resources(struct acpi_resource *res, void *data)
966
{
967
struct hpet_data *hdp;
968
acpi_status status;
969
struct acpi_resource_address64 addr;
970
971
hdp = data;
972
973
status = acpi_resource_to_address64(res, &addr);
974
975
if (ACPI_SUCCESS(status)) {
976
hdp->hd_phys_address = addr.minimum;
977
hdp->hd_address = ioremap(addr.minimum, addr.address_length);
978
979
if (hpet_is_known(hdp)) {
980
iounmap(hdp->hd_address);
981
return AE_ALREADY_EXISTS;
982
}
983
} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
984
struct acpi_resource_fixed_memory32 *fixmem32;
985
986
fixmem32 = &res->data.fixed_memory32;
987
if (!fixmem32)
988
return AE_NO_MEMORY;
989
990
hdp->hd_phys_address = fixmem32->address;
991
hdp->hd_address = ioremap(fixmem32->address,
992
HPET_RANGE_SIZE);
993
994
if (hpet_is_known(hdp)) {
995
iounmap(hdp->hd_address);
996
return AE_ALREADY_EXISTS;
997
}
998
} else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
999
struct acpi_resource_extended_irq *irqp;
1000
int i, irq;
1001
1002
irqp = &res->data.extended_irq;
1003
1004
for (i = 0; i < irqp->interrupt_count; i++) {
1005
irq = acpi_register_gsi(NULL, irqp->interrupts[i],
1006
irqp->triggering, irqp->polarity);
1007
if (irq < 0)
1008
return AE_ERROR;
1009
1010
hdp->hd_irq[hdp->hd_nirqs] = irq;
1011
hdp->hd_nirqs++;
1012
}
1013
}
1014
1015
return AE_OK;
1016
}
1017
1018
static int hpet_acpi_add(struct acpi_device *device)
1019
{
1020
acpi_status result;
1021
struct hpet_data data;
1022
1023
memset(&data, 0, sizeof(data));
1024
1025
result =
1026
acpi_walk_resources(device->handle, METHOD_NAME__CRS,
1027
hpet_resources, &data);
1028
1029
if (ACPI_FAILURE(result))
1030
return -ENODEV;
1031
1032
if (!data.hd_address || !data.hd_nirqs) {
1033
if (data.hd_address)
1034
iounmap(data.hd_address);
1035
printk("%s: no address or irqs in _CRS\n", __func__);
1036
return -ENODEV;
1037
}
1038
1039
return hpet_alloc(&data);
1040
}
1041
1042
static int hpet_acpi_remove(struct acpi_device *device, int type)
1043
{
1044
/* XXX need to unregister clocksource, dealloc mem, etc */
1045
return -EINVAL;
1046
}
1047
1048
static const struct acpi_device_id hpet_device_ids[] = {
1049
{"PNP0103", 0},
1050
{"", 0},
1051
};
1052
MODULE_DEVICE_TABLE(acpi, hpet_device_ids);
1053
1054
static struct acpi_driver hpet_acpi_driver = {
1055
.name = "hpet",
1056
.ids = hpet_device_ids,
1057
.ops = {
1058
.add = hpet_acpi_add,
1059
.remove = hpet_acpi_remove,
1060
},
1061
};
1062
1063
static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
1064
1065
static int __init hpet_init(void)
1066
{
1067
int result;
1068
1069
result = misc_register(&hpet_misc);
1070
if (result < 0)
1071
return -ENODEV;
1072
1073
sysctl_header = register_sysctl_table(dev_root);
1074
1075
result = acpi_bus_register_driver(&hpet_acpi_driver);
1076
if (result < 0) {
1077
if (sysctl_header)
1078
unregister_sysctl_table(sysctl_header);
1079
misc_deregister(&hpet_misc);
1080
return result;
1081
}
1082
1083
return 0;
1084
}
1085
1086
static void __exit hpet_exit(void)
1087
{
1088
acpi_bus_unregister_driver(&hpet_acpi_driver);
1089
1090
if (sysctl_header)
1091
unregister_sysctl_table(sysctl_header);
1092
misc_deregister(&hpet_misc);
1093
1094
return;
1095
}
1096
1097
module_init(hpet_init);
1098
module_exit(hpet_exit);
1099
MODULE_AUTHOR("Bob Picco <[email protected]>");
1100
MODULE_LICENSE("GPL");
1101
1102