Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/cddl/dev/dtrace/i386/dtrace_subr.c
48375 views
1
/*
2
* CDDL HEADER START
3
*
4
* The contents of this file are subject to the terms of the
5
* Common Development and Distribution License, Version 1.0 only
6
* (the "License"). You may not use this file except in compliance
7
* with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or http://www.opensolaris.org/os/licensing.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*
22
*/
23
/*
24
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
25
* Use is subject to license terms.
26
*/
27
28
/*
29
* Copyright (c) 2011, Joyent, Inc. All rights reserved.
30
*/
31
32
#include <sys/param.h>
33
#include <sys/systm.h>
34
#include <sys/cpuset.h>
35
#include <sys/kernel.h>
36
#include <sys/malloc.h>
37
#include <sys/kmem.h>
38
#include <sys/proc.h>
39
#include <sys/smp.h>
40
#include <sys/dtrace_impl.h>
41
#include <sys/dtrace_bsd.h>
42
#include <cddl/dev/dtrace/dtrace_cddl.h>
43
#include <machine/clock.h>
44
#include <machine/cpufunc.h>
45
#include <machine/frame.h>
46
#include <machine/psl.h>
47
#include <machine/trap.h>
48
#include <vm/pmap.h>
49
50
extern uintptr_t kernelbase;
51
52
extern void dtrace_getnanotime(struct timespec *tsp);
53
extern int (*dtrace_invop_jump_addr)(struct trapframe *);
54
55
int dtrace_invop(uintptr_t, struct trapframe *, uintptr_t);
56
int dtrace_invop_start(struct trapframe *frame);
57
void dtrace_invop_init(void);
58
void dtrace_invop_uninit(void);
59
60
typedef struct dtrace_invop_hdlr {
61
int (*dtih_func)(uintptr_t, struct trapframe *, uintptr_t);
62
struct dtrace_invop_hdlr *dtih_next;
63
} dtrace_invop_hdlr_t;
64
65
dtrace_invop_hdlr_t *dtrace_invop_hdlr;
66
67
int
68
dtrace_invop(uintptr_t addr, struct trapframe *frame, uintptr_t eax)
69
{
70
struct thread *td;
71
dtrace_invop_hdlr_t *hdlr;
72
int rval;
73
74
rval = 0;
75
td = curthread;
76
td->t_dtrace_trapframe = frame;
77
for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
78
if ((rval = hdlr->dtih_func(addr, frame, eax)) != 0)
79
break;
80
td->t_dtrace_trapframe = NULL;
81
return (rval);
82
}
83
84
void
85
dtrace_invop_add(int (*func)(uintptr_t, struct trapframe *, uintptr_t))
86
{
87
dtrace_invop_hdlr_t *hdlr;
88
89
hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
90
hdlr->dtih_func = func;
91
hdlr->dtih_next = dtrace_invop_hdlr;
92
dtrace_invop_hdlr = hdlr;
93
}
94
95
void
96
dtrace_invop_remove(int (*func)(uintptr_t, struct trapframe *, uintptr_t))
97
{
98
dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
99
100
for (;;) {
101
if (hdlr == NULL)
102
panic("attempt to remove non-existent invop handler");
103
104
if (hdlr->dtih_func == func)
105
break;
106
107
prev = hdlr;
108
hdlr = hdlr->dtih_next;
109
}
110
111
if (prev == NULL) {
112
ASSERT(dtrace_invop_hdlr == hdlr);
113
dtrace_invop_hdlr = hdlr->dtih_next;
114
} else {
115
ASSERT(dtrace_invop_hdlr != hdlr);
116
prev->dtih_next = hdlr->dtih_next;
117
}
118
119
kmem_free(hdlr, 0);
120
}
121
122
void
123
dtrace_invop_init(void)
124
{
125
126
dtrace_invop_jump_addr = dtrace_invop_start;
127
}
128
129
void
130
dtrace_invop_uninit(void)
131
{
132
133
dtrace_invop_jump_addr = NULL;
134
}
135
136
void
137
dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
138
{
139
(*func)(0, kernelbase);
140
}
141
142
#ifdef notyet
143
void
144
dtrace_safe_synchronous_signal(void)
145
{
146
kthread_t *t = curthread;
147
struct regs *rp = lwptoregs(ttolwp(t));
148
size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
149
150
ASSERT(t->t_dtrace_on);
151
152
/*
153
* If we're not in the range of scratch addresses, we're not actually
154
* tracing user instructions so turn off the flags. If the instruction
155
* we copied out caused a synchonous trap, reset the pc back to its
156
* original value and turn off the flags.
157
*/
158
if (rp->r_pc < t->t_dtrace_scrpc ||
159
rp->r_pc > t->t_dtrace_astpc + isz) {
160
t->t_dtrace_ft = 0;
161
} else if (rp->r_pc == t->t_dtrace_scrpc ||
162
rp->r_pc == t->t_dtrace_astpc) {
163
rp->r_pc = t->t_dtrace_pc;
164
t->t_dtrace_ft = 0;
165
}
166
}
167
168
int
169
dtrace_safe_defer_signal(void)
170
{
171
kthread_t *t = curthread;
172
struct regs *rp = lwptoregs(ttolwp(t));
173
size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
174
175
ASSERT(t->t_dtrace_on);
176
177
/*
178
* If we're not in the range of scratch addresses, we're not actually
179
* tracing user instructions so turn off the flags.
180
*/
181
if (rp->r_pc < t->t_dtrace_scrpc ||
182
rp->r_pc > t->t_dtrace_astpc + isz) {
183
t->t_dtrace_ft = 0;
184
return (0);
185
}
186
187
/*
188
* If we have executed the original instruction, but we have performed
189
* neither the jmp back to t->t_dtrace_npc nor the clean up of any
190
* registers used to emulate %rip-relative instructions in 64-bit mode,
191
* we'll save ourselves some effort by doing that here and taking the
192
* signal right away. We detect this condition by seeing if the program
193
* counter is the range [scrpc + isz, astpc).
194
*/
195
if (rp->r_pc >= t->t_dtrace_scrpc + isz &&
196
rp->r_pc < t->t_dtrace_astpc) {
197
#ifdef __amd64
198
/*
199
* If there is a scratch register and we're on the
200
* instruction immediately after the modified instruction,
201
* restore the value of that scratch register.
202
*/
203
if (t->t_dtrace_reg != 0 &&
204
rp->r_pc == t->t_dtrace_scrpc + isz) {
205
switch (t->t_dtrace_reg) {
206
case REG_RAX:
207
rp->r_rax = t->t_dtrace_regv;
208
break;
209
case REG_RCX:
210
rp->r_rcx = t->t_dtrace_regv;
211
break;
212
case REG_R8:
213
rp->r_r8 = t->t_dtrace_regv;
214
break;
215
case REG_R9:
216
rp->r_r9 = t->t_dtrace_regv;
217
break;
218
}
219
}
220
#endif
221
rp->r_pc = t->t_dtrace_npc;
222
t->t_dtrace_ft = 0;
223
return (0);
224
}
225
226
/*
227
* Otherwise, make sure we'll return to the kernel after executing
228
* the copied out instruction and defer the signal.
229
*/
230
if (!t->t_dtrace_step) {
231
ASSERT(rp->r_pc < t->t_dtrace_astpc);
232
rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
233
t->t_dtrace_step = 1;
234
}
235
236
t->t_dtrace_ast = 1;
237
238
return (1);
239
}
240
#endif
241
242
static int64_t tgt_cpu_tsc;
243
static int64_t hst_cpu_tsc;
244
static int64_t tsc_skew[MAXCPU];
245
static uint64_t nsec_scale;
246
247
/* See below for the explanation of this macro. */
248
#define SCALE_SHIFT 28
249
250
static void
251
dtrace_gethrtime_init_cpu(void *arg)
252
{
253
uintptr_t cpu = (uintptr_t) arg;
254
255
if (cpu == curcpu)
256
tgt_cpu_tsc = rdtsc();
257
else
258
hst_cpu_tsc = rdtsc();
259
}
260
261
static void
262
dtrace_gethrtime_init(void *arg)
263
{
264
struct pcpu *pc;
265
uint64_t tsc_f;
266
cpuset_t map;
267
int i;
268
269
/*
270
* Get TSC frequency known at this moment.
271
* This should be constant if TSC is invariant.
272
* Otherwise tick->time conversion will be inaccurate, but
273
* will preserve monotonic property of TSC.
274
*/
275
tsc_f = atomic_load_acq_64(&tsc_freq);
276
277
/*
278
* The following line checks that nsec_scale calculated below
279
* doesn't overflow 32-bit unsigned integer, so that it can multiply
280
* another 32-bit integer without overflowing 64-bit.
281
* Thus minimum supported TSC frequency is 62.5MHz.
282
*/
283
KASSERT(tsc_f > (NANOSEC >> (32 - SCALE_SHIFT)),
284
("TSC frequency is too low"));
285
286
/*
287
* We scale up NANOSEC/tsc_f ratio to preserve as much precision
288
* as possible.
289
* 2^28 factor was chosen quite arbitrarily from practical
290
* considerations:
291
* - it supports TSC frequencies as low as 62.5MHz (see above);
292
* - it provides quite good precision (e < 0.01%) up to THz
293
* (terahertz) values;
294
*/
295
nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f;
296
297
if (vm_guest != VM_GUEST_NO)
298
return;
299
300
/* The current CPU is the reference one. */
301
sched_pin();
302
tsc_skew[curcpu] = 0;
303
CPU_FOREACH(i) {
304
if (i == curcpu)
305
continue;
306
307
pc = pcpu_find(i);
308
CPU_SETOF(PCPU_GET(cpuid), &map);
309
CPU_SET(pc->pc_cpuid, &map);
310
311
smp_rendezvous_cpus(map, NULL,
312
dtrace_gethrtime_init_cpu,
313
smp_no_rendezvous_barrier, (void *)(uintptr_t) i);
314
315
tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
316
}
317
sched_unpin();
318
}
319
SYSINIT(dtrace_gethrtime_init, SI_SUB_DTRACE, SI_ORDER_ANY,
320
dtrace_gethrtime_init, NULL);
321
322
/*
323
* DTrace needs a high resolution time function which can
324
* be called from a probe context and guaranteed not to have
325
* instrumented with probes itself.
326
*
327
* Returns nanoseconds since boot.
328
*/
329
uint64_t
330
dtrace_gethrtime(void)
331
{
332
uint64_t tsc;
333
uint32_t lo, hi;
334
register_t eflags;
335
336
/*
337
* We split TSC value into lower and higher 32-bit halves and separately
338
* scale them with nsec_scale, then we scale them down by 2^28
339
* (see nsec_scale calculations) taking into account 32-bit shift of
340
* the higher half and finally add.
341
*/
342
eflags = intr_disable();
343
tsc = rdtsc() - tsc_skew[curcpu];
344
intr_restore(eflags);
345
346
lo = tsc;
347
hi = tsc >> 32;
348
return (((lo * nsec_scale) >> SCALE_SHIFT) +
349
((hi * nsec_scale) << (32 - SCALE_SHIFT)));
350
}
351
352
uint64_t
353
dtrace_gethrestime(void)
354
{
355
struct timespec current_time;
356
357
dtrace_getnanotime(&current_time);
358
359
return (current_time.tv_sec * 1000000000ULL + current_time.tv_nsec);
360
}
361
362
/* Function to handle DTrace traps during probes. See i386/i386/trap.c */
363
int
364
dtrace_trap(struct trapframe *frame, u_int type)
365
{
366
uint16_t nofault;
367
368
/*
369
* A trap can occur while DTrace executes a probe. Before
370
* executing the probe, DTrace blocks re-scheduling and sets
371
* a flag in its per-cpu flags to indicate that it doesn't
372
* want to fault. On returning from the probe, the no-fault
373
* flag is cleared and finally re-scheduling is enabled.
374
*
375
* Check if DTrace has enabled 'no-fault' mode:
376
*/
377
sched_pin();
378
nofault = cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT;
379
sched_unpin();
380
if (nofault) {
381
KASSERT((read_eflags() & PSL_I) == 0, ("interrupts enabled"));
382
383
/*
384
* There are only a couple of trap types that are expected.
385
* All the rest will be handled in the usual way.
386
*/
387
switch (type) {
388
/* General protection fault. */
389
case T_PROTFLT:
390
/* Flag an illegal operation. */
391
cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
392
393
/*
394
* Offset the instruction pointer to the instruction
395
* following the one causing the fault.
396
*/
397
frame->tf_eip += dtrace_instr_size((uint8_t *) frame->tf_eip);
398
return (1);
399
/* Page fault. */
400
case T_PAGEFLT:
401
/* Flag a bad address. */
402
cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
403
cpu_core[curcpu].cpuc_dtrace_illval = rcr2();
404
405
/*
406
* Offset the instruction pointer to the instruction
407
* following the one causing the fault.
408
*/
409
frame->tf_eip += dtrace_instr_size((uint8_t *) frame->tf_eip);
410
return (1);
411
default:
412
/* Handle all other traps in the usual way. */
413
break;
414
}
415
}
416
417
/* Handle the trap in the usual way. */
418
return (0);
419
}
420
421