Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/arch/x86/lib/delay.c
10817 views
1
/*
2
* Precise Delay Loops for i386
3
*
4
* Copyright (C) 1993 Linus Torvalds
5
* Copyright (C) 1997 Martin Mares <[email protected]>
6
* Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
7
*
8
* The __delay function must _NOT_ be inlined as its execution time
9
* depends wildly on alignment on many x86 processors. The additional
10
* jump magic is needed to get the timing stable on all the CPU's
11
* we have to worry about.
12
*/
13
14
#include <linux/module.h>
15
#include <linux/sched.h>
16
#include <linux/timex.h>
17
#include <linux/preempt.h>
18
#include <linux/delay.h>
19
#include <linux/init.h>
20
21
#include <asm/processor.h>
22
#include <asm/delay.h>
23
#include <asm/timer.h>
24
25
#ifdef CONFIG_SMP
26
# include <asm/smp.h>
27
#endif
28
29
/* simple loop based delay: */
30
static void delay_loop(unsigned long loops)
31
{
32
asm volatile(
33
" test %0,%0 \n"
34
" jz 3f \n"
35
" jmp 1f \n"
36
37
".align 16 \n"
38
"1: jmp 2f \n"
39
40
".align 16 \n"
41
"2: dec %0 \n"
42
" jnz 2b \n"
43
"3: dec %0 \n"
44
45
: /* we don't need output */
46
:"a" (loops)
47
);
48
}
49
50
/* TSC based delay: */
51
static void delay_tsc(unsigned long loops)
52
{
53
unsigned long bclock, now;
54
int cpu;
55
56
preempt_disable();
57
cpu = smp_processor_id();
58
rdtsc_barrier();
59
rdtscl(bclock);
60
for (;;) {
61
rdtsc_barrier();
62
rdtscl(now);
63
if ((now - bclock) >= loops)
64
break;
65
66
/* Allow RT tasks to run */
67
preempt_enable();
68
rep_nop();
69
preempt_disable();
70
71
/*
72
* It is possible that we moved to another CPU, and
73
* since TSC's are per-cpu we need to calculate
74
* that. The delay must guarantee that we wait "at
75
* least" the amount of time. Being moved to another
76
* CPU could make the wait longer but we just need to
77
* make sure we waited long enough. Rebalance the
78
* counter for this CPU.
79
*/
80
if (unlikely(cpu != smp_processor_id())) {
81
loops -= (now - bclock);
82
cpu = smp_processor_id();
83
rdtsc_barrier();
84
rdtscl(bclock);
85
}
86
}
87
preempt_enable();
88
}
89
90
/*
91
* Since we calibrate only once at boot, this
92
* function should be set once at boot and not changed
93
*/
94
static void (*delay_fn)(unsigned long) = delay_loop;
95
96
void use_tsc_delay(void)
97
{
98
delay_fn = delay_tsc;
99
}
100
101
int __devinit read_current_timer(unsigned long *timer_val)
102
{
103
if (delay_fn == delay_tsc) {
104
rdtscll(*timer_val);
105
return 0;
106
}
107
return -1;
108
}
109
110
void __delay(unsigned long loops)
111
{
112
delay_fn(loops);
113
}
114
EXPORT_SYMBOL(__delay);
115
116
inline void __const_udelay(unsigned long xloops)
117
{
118
int d0;
119
120
xloops *= 4;
121
asm("mull %%edx"
122
:"=d" (xloops), "=&a" (d0)
123
:"1" (xloops), "0"
124
(this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
125
126
__delay(++xloops);
127
}
128
EXPORT_SYMBOL(__const_udelay);
129
130
void __udelay(unsigned long usecs)
131
{
132
__const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
133
}
134
EXPORT_SYMBOL(__udelay);
135
136
void __ndelay(unsigned long nsecs)
137
{
138
__const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
139
}
140
EXPORT_SYMBOL(__ndelay);
141
142