Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/sparc/vdso/vclock_gettime.c
26424 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright 2006 Andi Kleen, SUSE Labs.
4
*
5
* Fast user context implementation of clock_gettime, gettimeofday, and time.
6
*
7
* The code should have no internal unresolved relocations.
8
* Check with readelf after changing.
9
* Also alternative() doesn't work.
10
*/
11
/*
12
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
13
*/
14
15
#include <linux/kernel.h>
16
#include <linux/time.h>
17
#include <linux/string.h>
18
#include <asm/io.h>
19
#include <asm/unistd.h>
20
#include <asm/timex.h>
21
#include <asm/clocksource.h>
22
#include <asm/vvar.h>
23
24
#ifdef CONFIG_SPARC64
25
#define SYSCALL_STRING \
26
"ta 0x6d;" \
27
"bcs,a 1f;" \
28
" sub %%g0, %%o0, %%o0;" \
29
"1:"
30
#else
31
#define SYSCALL_STRING \
32
"ta 0x10;" \
33
"bcs,a 1f;" \
34
" sub %%g0, %%o0, %%o0;" \
35
"1:"
36
#endif
37
38
#define SYSCALL_CLOBBERS \
39
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
40
"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \
41
"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \
42
"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \
43
"f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", \
44
"f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", \
45
"cc", "memory"
46
47
/*
48
* Compute the vvar page's address in the process address space, and return it
49
* as a pointer to the vvar_data.
50
*/
51
notrace static __always_inline struct vvar_data *get_vvar_data(void)
52
{
53
unsigned long ret;
54
55
/*
56
* vdso data page is the first vDSO page so grab the PC
57
* and move up a page to get to the data page.
58
*/
59
__asm__("rd %%pc, %0" : "=r" (ret));
60
ret &= ~(8192 - 1);
61
ret -= 8192;
62
63
return (struct vvar_data *) ret;
64
}
65
66
notrace static long vdso_fallback_gettime(long clock, struct __kernel_old_timespec *ts)
67
{
68
register long num __asm__("g1") = __NR_clock_gettime;
69
register long o0 __asm__("o0") = clock;
70
register long o1 __asm__("o1") = (long) ts;
71
72
__asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
73
"0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
74
return o0;
75
}
76
77
notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
78
{
79
register long num __asm__("g1") = __NR_gettimeofday;
80
register long o0 __asm__("o0") = (long) tv;
81
register long o1 __asm__("o1") = (long) tz;
82
83
__asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
84
"0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
85
return o0;
86
}
87
88
#ifdef CONFIG_SPARC64
89
notrace static __always_inline u64 __shr64(u64 val, int amt)
90
{
91
return val >> amt;
92
}
93
94
notrace static __always_inline u64 vread_tick(void)
95
{
96
u64 ret;
97
98
__asm__ __volatile__("rd %%tick, %0" : "=r" (ret));
99
return ret;
100
}
101
102
notrace static __always_inline u64 vread_tick_stick(void)
103
{
104
u64 ret;
105
106
__asm__ __volatile__("rd %%asr24, %0" : "=r" (ret));
107
return ret;
108
}
109
#else
110
notrace static __always_inline u64 __shr64(u64 val, int amt)
111
{
112
u64 ret;
113
114
__asm__ __volatile__("sllx %H1, 32, %%g1\n\t"
115
"srl %L1, 0, %L1\n\t"
116
"or %%g1, %L1, %%g1\n\t"
117
"srlx %%g1, %2, %L0\n\t"
118
"srlx %L0, 32, %H0"
119
: "=r" (ret)
120
: "r" (val), "r" (amt)
121
: "g1");
122
return ret;
123
}
124
125
notrace static __always_inline u64 vread_tick(void)
126
{
127
register unsigned long long ret asm("o4");
128
129
__asm__ __volatile__("rd %%tick, %L0\n\t"
130
"srlx %L0, 32, %H0"
131
: "=r" (ret));
132
return ret;
133
}
134
135
notrace static __always_inline u64 vread_tick_stick(void)
136
{
137
register unsigned long long ret asm("o4");
138
139
__asm__ __volatile__("rd %%asr24, %L0\n\t"
140
"srlx %L0, 32, %H0"
141
: "=r" (ret));
142
return ret;
143
}
144
#endif
145
146
notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
147
{
148
u64 v;
149
u64 cycles;
150
151
cycles = vread_tick();
152
v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
153
return v * vvar->clock.mult;
154
}
155
156
notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar)
157
{
158
u64 v;
159
u64 cycles;
160
161
cycles = vread_tick_stick();
162
v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
163
return v * vvar->clock.mult;
164
}
165
166
notrace static __always_inline int do_realtime(struct vvar_data *vvar,
167
struct __kernel_old_timespec *ts)
168
{
169
unsigned long seq;
170
u64 ns;
171
172
do {
173
seq = vvar_read_begin(vvar);
174
ts->tv_sec = vvar->wall_time_sec;
175
ns = vvar->wall_time_snsec;
176
ns += vgetsns(vvar);
177
ns = __shr64(ns, vvar->clock.shift);
178
} while (unlikely(vvar_read_retry(vvar, seq)));
179
180
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
181
ts->tv_nsec = ns;
182
183
return 0;
184
}
185
186
notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
187
struct __kernel_old_timespec *ts)
188
{
189
unsigned long seq;
190
u64 ns;
191
192
do {
193
seq = vvar_read_begin(vvar);
194
ts->tv_sec = vvar->wall_time_sec;
195
ns = vvar->wall_time_snsec;
196
ns += vgetsns_stick(vvar);
197
ns = __shr64(ns, vvar->clock.shift);
198
} while (unlikely(vvar_read_retry(vvar, seq)));
199
200
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
201
ts->tv_nsec = ns;
202
203
return 0;
204
}
205
206
notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
207
struct __kernel_old_timespec *ts)
208
{
209
unsigned long seq;
210
u64 ns;
211
212
do {
213
seq = vvar_read_begin(vvar);
214
ts->tv_sec = vvar->monotonic_time_sec;
215
ns = vvar->monotonic_time_snsec;
216
ns += vgetsns(vvar);
217
ns = __shr64(ns, vvar->clock.shift);
218
} while (unlikely(vvar_read_retry(vvar, seq)));
219
220
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
221
ts->tv_nsec = ns;
222
223
return 0;
224
}
225
226
notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
227
struct __kernel_old_timespec *ts)
228
{
229
unsigned long seq;
230
u64 ns;
231
232
do {
233
seq = vvar_read_begin(vvar);
234
ts->tv_sec = vvar->monotonic_time_sec;
235
ns = vvar->monotonic_time_snsec;
236
ns += vgetsns_stick(vvar);
237
ns = __shr64(ns, vvar->clock.shift);
238
} while (unlikely(vvar_read_retry(vvar, seq)));
239
240
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
241
ts->tv_nsec = ns;
242
243
return 0;
244
}
245
246
notrace static int do_realtime_coarse(struct vvar_data *vvar,
247
struct __kernel_old_timespec *ts)
248
{
249
unsigned long seq;
250
251
do {
252
seq = vvar_read_begin(vvar);
253
ts->tv_sec = vvar->wall_time_coarse_sec;
254
ts->tv_nsec = vvar->wall_time_coarse_nsec;
255
} while (unlikely(vvar_read_retry(vvar, seq)));
256
return 0;
257
}
258
259
notrace static int do_monotonic_coarse(struct vvar_data *vvar,
260
struct __kernel_old_timespec *ts)
261
{
262
unsigned long seq;
263
264
do {
265
seq = vvar_read_begin(vvar);
266
ts->tv_sec = vvar->monotonic_time_coarse_sec;
267
ts->tv_nsec = vvar->monotonic_time_coarse_nsec;
268
} while (unlikely(vvar_read_retry(vvar, seq)));
269
270
return 0;
271
}
272
273
notrace int
274
__vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
275
{
276
struct vvar_data *vvd = get_vvar_data();
277
278
switch (clock) {
279
case CLOCK_REALTIME:
280
if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
281
break;
282
return do_realtime(vvd, ts);
283
case CLOCK_MONOTONIC:
284
if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
285
break;
286
return do_monotonic(vvd, ts);
287
case CLOCK_REALTIME_COARSE:
288
return do_realtime_coarse(vvd, ts);
289
case CLOCK_MONOTONIC_COARSE:
290
return do_monotonic_coarse(vvd, ts);
291
}
292
/*
293
* Unknown clock ID ? Fall back to the syscall.
294
*/
295
return vdso_fallback_gettime(clock, ts);
296
}
297
int
298
clock_gettime(clockid_t, struct __kernel_old_timespec *)
299
__attribute__((weak, alias("__vdso_clock_gettime")));
300
301
notrace int
302
__vdso_clock_gettime_stick(clockid_t clock, struct __kernel_old_timespec *ts)
303
{
304
struct vvar_data *vvd = get_vvar_data();
305
306
switch (clock) {
307
case CLOCK_REALTIME:
308
if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
309
break;
310
return do_realtime_stick(vvd, ts);
311
case CLOCK_MONOTONIC:
312
if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
313
break;
314
return do_monotonic_stick(vvd, ts);
315
case CLOCK_REALTIME_COARSE:
316
return do_realtime_coarse(vvd, ts);
317
case CLOCK_MONOTONIC_COARSE:
318
return do_monotonic_coarse(vvd, ts);
319
}
320
/*
321
* Unknown clock ID ? Fall back to the syscall.
322
*/
323
return vdso_fallback_gettime(clock, ts);
324
}
325
326
notrace int
327
__vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
328
{
329
struct vvar_data *vvd = get_vvar_data();
330
331
if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
332
if (likely(tv != NULL)) {
333
union tstv_t {
334
struct __kernel_old_timespec ts;
335
struct __kernel_old_timeval tv;
336
} *tstv = (union tstv_t *) tv;
337
do_realtime(vvd, &tstv->ts);
338
/*
339
* Assign before dividing to ensure that the division is
340
* done in the type of tv_usec, not tv_nsec.
341
*
342
* There cannot be > 1 billion usec in a second:
343
* do_realtime() has already distributed such overflow
344
* into tv_sec. So we can assign it to an int safely.
345
*/
346
tstv->tv.tv_usec = tstv->ts.tv_nsec;
347
tstv->tv.tv_usec /= 1000;
348
}
349
if (unlikely(tz != NULL)) {
350
/* Avoid memcpy. Some old compilers fail to inline it */
351
tz->tz_minuteswest = vvd->tz_minuteswest;
352
tz->tz_dsttime = vvd->tz_dsttime;
353
}
354
return 0;
355
}
356
return vdso_fallback_gettimeofday(tv, tz);
357
}
358
int
359
gettimeofday(struct __kernel_old_timeval *, struct timezone *)
360
__attribute__((weak, alias("__vdso_gettimeofday")));
361
362
notrace int
363
__vdso_gettimeofday_stick(struct __kernel_old_timeval *tv, struct timezone *tz)
364
{
365
struct vvar_data *vvd = get_vvar_data();
366
367
if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
368
if (likely(tv != NULL)) {
369
union tstv_t {
370
struct __kernel_old_timespec ts;
371
struct __kernel_old_timeval tv;
372
} *tstv = (union tstv_t *) tv;
373
do_realtime_stick(vvd, &tstv->ts);
374
/*
375
* Assign before dividing to ensure that the division is
376
* done in the type of tv_usec, not tv_nsec.
377
*
378
* There cannot be > 1 billion usec in a second:
379
* do_realtime() has already distributed such overflow
380
* into tv_sec. So we can assign it to an int safely.
381
*/
382
tstv->tv.tv_usec = tstv->ts.tv_nsec;
383
tstv->tv.tv_usec /= 1000;
384
}
385
if (unlikely(tz != NULL)) {
386
/* Avoid memcpy. Some old compilers fail to inline it */
387
tz->tz_minuteswest = vvd->tz_minuteswest;
388
tz->tz_dsttime = vvd->tz_dsttime;
389
}
390
return 0;
391
}
392
return vdso_fallback_gettimeofday(tv, tz);
393
}
394
395