Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/cddl/dev/dtrace/aarch64/dtrace_isa.c
48375 views
1
/*
2
* CDDL HEADER START
3
*
4
* The contents of this file are subject to the terms of the
5
* Common Development and Distribution License, Version 1.0 only
6
* (the "License"). You may not use this file except in compliance
7
* with the License.
8
*
9
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10
* or http://www.opensolaris.org/os/licensing.
11
* See the License for the specific language governing permissions
12
* and limitations under the License.
13
*
14
* When distributing Covered Code, include this CDDL HEADER in each
15
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16
* If applicable, add the following below this CDDL HEADER, with the
17
* fields enclosed by brackets "[]" replaced with your own identifying
18
* information: Portions Copyright [yyyy] [name of copyright owner]
19
*
20
* CDDL HEADER END
21
*/
22
/*
23
* Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24
* Use is subject to license terms.
25
*/
26
#include <sys/cdefs.h>
27
28
#include <sys/param.h>
29
#include <sys/systm.h>
30
#include <sys/dtrace_impl.h>
31
#include <sys/kernel.h>
32
#include <sys/stack.h>
33
#include <sys/pcpu.h>
34
35
#include <machine/frame.h>
36
#include <machine/md_var.h>
37
38
#include <vm/vm.h>
39
#include <vm/vm_param.h>
40
#include <vm/pmap.h>
41
42
#include <machine/atomic.h>
43
#include <machine/db_machdep.h>
44
#include <machine/md_var.h>
45
#include <machine/stack.h>
46
#include <ddb/db_sym.h>
47
#include <ddb/ddb.h>
48
#include <sys/kdb.h>
49
50
#include <cddl/dev/dtrace/dtrace_cddl.h>
51
52
#include "regset.h"
53
54
#define MAX_USTACK_DEPTH 2048
55
56
uint8_t dtrace_fuword8_nocheck(void *);
57
uint16_t dtrace_fuword16_nocheck(void *);
58
uint32_t dtrace_fuword32_nocheck(void *);
59
uint64_t dtrace_fuword64_nocheck(void *);
60
61
void
62
dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
63
uint32_t *intrpc)
64
{
65
struct unwind_state state;
66
int scp_offset;
67
int depth;
68
69
depth = 0;
70
71
if (intrpc != 0) {
72
pcstack[depth++] = (pc_t) intrpc;
73
}
74
75
aframes++;
76
77
state.fp = (uintptr_t)__builtin_frame_address(0);
78
state.pc = (uintptr_t)dtrace_getpcstack;
79
80
while (depth < pcstack_limit) {
81
if (!unwind_frame(curthread, &state))
82
break;
83
if (!INKERNEL(state.pc))
84
break;
85
86
/*
87
* NB: Unlike some other architectures, we don't need to
88
* explicitly insert cpu_dtrace_caller as it appears in the
89
* normal kernel stack trace rather than a special trap frame.
90
*/
91
if (aframes > 0) {
92
aframes--;
93
} else {
94
pcstack[depth++] = state.pc;
95
}
96
97
}
98
99
for (; depth < pcstack_limit; depth++) {
100
pcstack[depth] = 0;
101
}
102
}
103
104
static int
105
dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
106
uintptr_t fp)
107
{
108
volatile uint16_t *flags =
109
(volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
110
int ret = 0;
111
uintptr_t oldfp = fp;
112
113
ASSERT(pcstack == NULL || pcstack_limit > 0);
114
115
while (pc != 0) {
116
/*
117
* We limit the number of times we can go around this
118
* loop to account for a circular stack.
119
*/
120
if (ret++ >= MAX_USTACK_DEPTH) {
121
*flags |= CPU_DTRACE_BADSTACK;
122
cpu_core[curcpu].cpuc_dtrace_illval = fp;
123
break;
124
}
125
126
if (pcstack != NULL) {
127
*pcstack++ = (uint64_t)pc;
128
pcstack_limit--;
129
if (pcstack_limit <= 0)
130
break;
131
}
132
133
if (fp == 0)
134
break;
135
136
pc = dtrace_fuword64((void *)(fp +
137
offsetof(struct unwind_state, pc)));
138
fp = dtrace_fuword64((void *)fp);
139
140
if (fp == oldfp) {
141
*flags |= CPU_DTRACE_BADSTACK;
142
cpu_core[curcpu].cpuc_dtrace_illval = fp;
143
break;
144
}
145
146
/*
147
* ARM64TODO:
148
* This workaround might not be necessary. It needs to be
149
* revised and removed from all architectures if found
150
* unwanted. Leaving the original x86 comment for reference.
151
*
152
* This is totally bogus: if we faulted, we're going to clear
153
* the fault and break. This is to deal with the apparently
154
* broken Java stacks on x86.
155
*/
156
if (*flags & CPU_DTRACE_FAULT) {
157
*flags &= ~CPU_DTRACE_FAULT;
158
break;
159
}
160
161
oldfp = fp;
162
}
163
164
return (ret);
165
}
166
167
void
168
dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
169
{
170
proc_t *p = curproc;
171
struct trapframe *tf;
172
uintptr_t pc, fp;
173
volatile uint16_t *flags =
174
(volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
175
int n;
176
177
if (*flags & CPU_DTRACE_FAULT)
178
return;
179
180
if (pcstack_limit <= 0)
181
return;
182
183
/*
184
* If there's no user context we still need to zero the stack.
185
*/
186
if (p == NULL || (tf = curthread->td_frame) == NULL)
187
goto zero;
188
189
*pcstack++ = (uint64_t)p->p_pid;
190
pcstack_limit--;
191
192
if (pcstack_limit <= 0)
193
return;
194
195
pc = tf->tf_elr;
196
fp = tf->tf_x[29];
197
198
if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
199
/*
200
* In an entry probe. The frame pointer has not yet been
201
* pushed (that happens in the function prologue). The
202
* best approach is to add the current pc as a missing top
203
* of stack and back the pc up to the caller, which is stored
204
* at the current stack pointer address since the call
205
* instruction puts it there right before the branch.
206
*/
207
208
*pcstack++ = (uint64_t)pc;
209
pcstack_limit--;
210
if (pcstack_limit <= 0)
211
return;
212
213
pc = tf->tf_lr;
214
}
215
216
n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
217
ASSERT(n >= 0);
218
ASSERT(n <= pcstack_limit);
219
220
pcstack += n;
221
pcstack_limit -= n;
222
223
zero:
224
while (pcstack_limit-- > 0)
225
*pcstack++ = 0;
226
}
227
228
int
229
dtrace_getustackdepth(void)
230
{
231
232
printf("IMPLEMENT ME: %s\n", __func__);
233
234
return (0);
235
}
236
237
void
238
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
239
{
240
241
printf("IMPLEMENT ME: %s\n", __func__);
242
}
243
244
uint64_t
245
dtrace_getarg(int arg, int aframes __unused)
246
{
247
struct trapframe *tf;
248
249
/*
250
* We only handle invop providers here.
251
*/
252
if ((tf = curthread->t_dtrace_trapframe) == NULL) {
253
DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
254
return (0);
255
} else if (arg < 8) {
256
return (tf->tf_x[arg]);
257
} else {
258
uintptr_t p;
259
uint64_t val;
260
261
p = (tf->tf_sp + (arg - 8) * sizeof(uint64_t));
262
if ((p & 7) != 0) {
263
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADALIGN);
264
cpu_core[curcpu].cpuc_dtrace_illval = p;
265
return (0);
266
}
267
if (!kstack_contains(curthread, p, sizeof(uint64_t))) {
268
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
269
cpu_core[curcpu].cpuc_dtrace_illval = p;
270
return (0);
271
}
272
memcpy(&val, (void *)p, sizeof(uint64_t));
273
return (val);
274
}
275
}
276
277
int
278
dtrace_getstackdepth(int aframes)
279
{
280
struct unwind_state state;
281
int scp_offset;
282
int depth;
283
bool done;
284
285
depth = 1;
286
done = false;
287
288
state.fp = (uintptr_t)__builtin_frame_address(0);
289
state.pc = (uintptr_t)dtrace_getstackdepth;
290
291
do {
292
done = !unwind_frame(curthread, &state);
293
if (!INKERNEL(state.pc) || !INKERNEL(state.fp))
294
break;
295
depth++;
296
} while (!done);
297
298
if (depth < aframes)
299
return (0);
300
else
301
return (depth - aframes);
302
}
303
304
ulong_t
305
dtrace_getreg(struct trapframe *frame, uint_t reg)
306
{
307
switch (reg) {
308
case REG_X0 ... REG_X29:
309
return (frame->tf_x[reg]);
310
case REG_LR:
311
return (frame->tf_lr);
312
case REG_SP:
313
return (frame->tf_sp);
314
case REG_PC:
315
return (frame->tf_elr);
316
default:
317
DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
318
return (0);
319
}
320
/* NOTREACHED */
321
}
322
323
static int
324
dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
325
{
326
327
if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
328
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
329
cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
330
return (0);
331
}
332
333
return (1);
334
}
335
336
void
337
dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
338
volatile uint16_t *flags)
339
{
340
341
if (dtrace_copycheck(uaddr, kaddr, size))
342
dtrace_copy(uaddr, kaddr, size);
343
}
344
345
void
346
dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
347
volatile uint16_t *flags)
348
{
349
350
if (dtrace_copycheck(uaddr, kaddr, size))
351
dtrace_copy(kaddr, uaddr, size);
352
}
353
354
void
355
dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
356
volatile uint16_t *flags)
357
{
358
359
if (dtrace_copycheck(uaddr, kaddr, size))
360
dtrace_copystr(uaddr, kaddr, size, flags);
361
}
362
363
void
364
dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
365
volatile uint16_t *flags)
366
{
367
368
if (dtrace_copycheck(uaddr, kaddr, size))
369
dtrace_copystr(kaddr, uaddr, size, flags);
370
}
371
372
uint8_t
373
dtrace_fuword8(void *uaddr)
374
{
375
376
if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
377
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
378
cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
379
return (0);
380
}
381
382
return (dtrace_fuword8_nocheck(uaddr));
383
}
384
385
uint16_t
386
dtrace_fuword16(void *uaddr)
387
{
388
389
if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
390
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
391
cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
392
return (0);
393
}
394
395
return (dtrace_fuword16_nocheck(uaddr));
396
}
397
398
uint32_t
399
dtrace_fuword32(void *uaddr)
400
{
401
402
if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
403
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
404
cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
405
return (0);
406
}
407
408
return (dtrace_fuword32_nocheck(uaddr));
409
}
410
411
uint64_t
412
dtrace_fuword64(void *uaddr)
413
{
414
415
if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
416
DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
417
cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
418
return (0);
419
}
420
421
return (dtrace_fuword64_nocheck(uaddr));
422
}
423
424