Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/arm64/spe/arm_spe_dev.c
96295 views
1
/*-
2
* SPDX-License-Identifier: BSD-2-Clause
3
*
4
* Copyright (c) 2024 Arm Ltd
5
* Copyright (c) 2022 The FreeBSD Foundation
6
*
7
* Portions of this software were developed by Andrew Turner under sponsorship
8
* from the FreeBSD Foundation.
9
*
10
* Redistribution and use in source and binary forms, with or without
11
* modification, are permitted provided that the following conditions
12
* are met:
13
* 1. Redistributions of source code must retain the above copyright
14
* notice, this list of conditions and the following disclaimer.
15
* 2. Redistributions in binary form must reproduce the above copyright
16
* notice, this list of conditions and the following disclaimer in the
17
* documentation and/or other materials provided with the distribution.
18
*
19
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29
* SUCH DAMAGE.
30
*/
31
32
#include <sys/param.h>
33
#include <sys/bus.h>
34
#include <sys/conf.h>
35
#include <sys/event.h>
36
#include <sys/hwt.h>
37
#include <sys/kernel.h>
38
#include <sys/lock.h>
39
#include <sys/malloc.h>
40
#include <sys/module.h>
41
#include <sys/mutex.h>
42
#include <sys/rman.h>
43
#include <sys/smp.h>
44
#include <sys/systm.h>
45
#include <sys/taskqueue.h>
46
47
#include <machine/bus.h>
48
49
#include <arm64/spe/arm_spe.h>
50
#include <arm64/spe/arm_spe_dev.h>
51
52
MALLOC_DEFINE(M_ARM_SPE, "armspe", "Arm SPE tracing");
53
54
/*
55
* taskqueue(9) used for sleepable routines called from interrupt handlers
56
*/
57
TASKQUEUE_FAST_DEFINE_THREAD(arm_spe);
58
59
void arm_spe_send_buffer(void *, int);
60
static void arm_spe_error(void *, int);
61
static int arm_spe_intr(void *);
62
device_attach_t arm_spe_attach;
63
64
static device_method_t arm_spe_methods[] = {
65
/* Device interface */
66
DEVMETHOD(device_attach, arm_spe_attach),
67
68
DEVMETHOD_END,
69
};
70
71
DEFINE_CLASS_0(spe, arm_spe_driver, arm_spe_methods,
72
sizeof(struct arm_spe_softc));
73
74
#define ARM_SPE_KVA_MAX_ALIGN UL(2048)
75
76
int
77
arm_spe_attach(device_t dev)
78
{
79
struct arm_spe_softc *sc;
80
int error, rid;
81
82
sc = device_get_softc(dev);
83
sc->dev = dev;
84
85
sc->pmbidr = READ_SPECIALREG(PMBIDR_EL1_REG);
86
sc->pmsidr = READ_SPECIALREG(PMSIDR_EL1_REG);
87
device_printf(dev, "PMBIDR_EL1: %#lx\n", sc->pmbidr);
88
device_printf(dev, "PMSIDR_EL1: %#lx\n", sc->pmsidr);
89
if ((sc->pmbidr & PMBIDR_P) != 0) {
90
device_printf(dev, "Profiling Buffer is owned by a higher Exception level\n");
91
return (EPERM);
92
}
93
94
sc->kva_align = 1 << ((sc->pmbidr & PMBIDR_Align_MASK) >> PMBIDR_Align_SHIFT);
95
if (sc->kva_align > ARM_SPE_KVA_MAX_ALIGN) {
96
device_printf(dev, "Invalid PMBIDR.Align value of %d\n", sc->kva_align);
97
return (EINVAL);
98
}
99
100
rid = 0;
101
sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
102
RF_ACTIVE);
103
if (sc->sc_irq_res == NULL) {
104
device_printf(dev, "Unable to allocate interrupt\n");
105
return (ENXIO);
106
}
107
error = bus_setup_intr(dev, sc->sc_irq_res,
108
INTR_TYPE_MISC | INTR_MPSAFE, arm_spe_intr, NULL, sc,
109
&sc->sc_irq_cookie);
110
if (error != 0) {
111
device_printf(dev, "Unable to set up interrupt\n");
112
return (error);
113
}
114
115
mtx_init(&sc->sc_lock, "Arm SPE lock", NULL, MTX_SPIN);
116
117
STAILQ_INIT(&sc->pending);
118
sc->npending = 0;
119
120
spe_register(dev);
121
122
return (0);
123
}
124
125
/* Interrupt handler runs on the same core that triggered the exception */
126
static int
127
arm_spe_intr(void *arg)
128
{
129
int cpu_id = PCPU_GET(cpuid);
130
struct arm_spe_softc *sc = arg;
131
uint64_t pmbsr;
132
uint64_t base, limit;
133
uint8_t ec;
134
struct arm_spe_info *info = sc->spe_info[cpu_id];
135
uint8_t i = info->buf_idx;
136
struct arm_spe_buf_info *buf = &info->buf_info[i];
137
struct arm_spe_buf_info *prev_buf = &info->buf_info[!i];
138
device_t dev = sc->dev;
139
140
/* Make sure the profiling data is visible to the CPU */
141
psb_csync();
142
dsb(nsh);
143
144
/* Make sure any HW update of PMBPTR_EL1 is visible to the CPU */
145
isb();
146
147
pmbsr = READ_SPECIALREG(PMBSR_EL1_REG);
148
149
if (!(pmbsr & PMBSR_S))
150
return (FILTER_STRAY);
151
152
/* Event Class */
153
ec = PMBSR_EC_VAL(pmbsr);
154
switch (ec)
155
{
156
case PMBSR_EC_OTHER_BUF_MGMT: /* Other buffer management event */
157
break;
158
case PMBSR_EC_GRAN_PROT_CHK: /* Granule Protection Check fault */
159
device_printf(dev, "PMBSR_EC_GRAN_PROT_CHK\n");
160
break;
161
case PMBSR_EC_STAGE1_DA: /* Stage 1 Data Abort */
162
device_printf(dev, "PMBSR_EC_STAGE1_DA\n");
163
break;
164
case PMBSR_EC_STAGE2_DA: /* Stage 2 Data Abort */
165
device_printf(dev, "PMBSR_EC_STAGE2_DA\n");
166
break;
167
default:
168
/* Unknown EC */
169
device_printf(dev, "unknown PMBSR_EC: %#x\n", ec);
170
arm_spe_disable(NULL);
171
TASK_INIT(&sc->task, 0, (task_fn_t *)arm_spe_error, sc->ctx);
172
taskqueue_enqueue(taskqueue_arm_spe, &sc->task);
173
return (FILTER_HANDLED);
174
}
175
176
switch (ec) {
177
case PMBSR_EC_OTHER_BUF_MGMT:
178
/* Buffer Status Code = buffer filled */
179
if ((pmbsr & PMBSR_MSS_BSC_MASK) == PMBSR_MSS_BSC_BUFFER_FILLED) {
180
dprintf("%s SPE buffer full event (cpu:%d)\n",
181
__func__, cpu_id);
182
break;
183
}
184
case PMBSR_EC_GRAN_PROT_CHK:
185
case PMBSR_EC_STAGE1_DA:
186
case PMBSR_EC_STAGE2_DA:
187
/*
188
* If we have one of these, we've messed up the
189
* programming somehow (e.g. passed invalid memory to
190
* SPE) and can't recover
191
*/
192
arm_spe_disable(NULL);
193
TASK_INIT(&sc->task, 0, (task_fn_t *)arm_spe_error, sc->ctx);
194
taskqueue_enqueue(taskqueue_arm_spe, &sc->task);
195
/* PMBPTR_EL1 is fault address if PMBSR_DL is 1 */
196
device_printf(dev, "CPU:%d PMBSR_EL1:%#lx\n", cpu_id, pmbsr);
197
device_printf(dev, "PMBPTR_EL1:%#lx PMBLIMITR_EL1:%#lx\n",
198
READ_SPECIALREG(PMBPTR_EL1_REG),
199
READ_SPECIALREG(PMBLIMITR_EL1_REG));
200
return (FILTER_HANDLED);
201
}
202
203
mtx_lock_spin(&info->lock);
204
205
/*
206
* Data Loss bit - pmbptr might not be pointing to the end of the last
207
* complete record
208
*/
209
if ((pmbsr & PMBSR_DL) == PMBSR_DL)
210
buf->partial_rec = 1;
211
buf->pmbptr = READ_SPECIALREG(PMBPTR_EL1_REG);
212
buf->buf_svc = true;
213
214
/* Setup regs ready to start writing to the other half of the buffer */
215
info->buf_idx = !info->buf_idx;
216
base = buf_start_addr(info->buf_idx, info);
217
limit = base + (info->buf_size/2);
218
limit &= PMBLIMITR_LIMIT_MASK;
219
limit |= PMBLIMITR_E;
220
WRITE_SPECIALREG(PMBPTR_EL1_REG, base);
221
WRITE_SPECIALREG(PMBLIMITR_EL1_REG, limit);
222
isb();
223
224
/*
225
* Notify userspace via kqueue that buffer is full and needs copying
226
* out - since kqueue can sleep, don't do this in the interrupt handler,
227
* add to a taskqueue to be scheduled later instead
228
*/
229
TASK_INIT(&info->task[i], 0, (task_fn_t *)arm_spe_send_buffer, buf);
230
taskqueue_enqueue(taskqueue_arm_spe, &info->task[i]);
231
232
/*
233
* It's possible userspace hasn't yet notified us they've copied out the
234
* other half of the buffer
235
*
236
* This might be because:
237
* a) Kernel hasn't scheduled the task via taskqueue to notify
238
* userspace to copy out the data
239
* b) Userspace is still copying the buffer or hasn't notified us
240
* back via the HWT_IOC_SVC_BUF ioctl
241
*
242
* Either way we need to avoid overwriting uncopied data in the
243
* buffer, so disable profiling until we receive that SVC_BUF
244
* ioctl
245
*
246
* Using a larger buffer size should help to minimise these events and
247
* loss of profiling data while profiling is disabled
248
*/
249
if (prev_buf->buf_svc) {
250
device_printf(sc->dev, "cpu%d: buffer full interrupt, but other"
251
" half of buffer has not been copied out - consider"
252
" increasing buffer size to minimise loss of profiling data\n",
253
cpu_id);
254
WRITE_SPECIALREG(PMSCR_EL1_REG, 0x0);
255
prev_buf->buf_wait = true;
256
}
257
258
mtx_unlock_spin(&info->lock);
259
260
/* Clear Profiling Buffer Status Register */
261
WRITE_SPECIALREG(PMBSR_EL1_REG, 0);
262
263
isb();
264
265
return (FILTER_HANDLED);
266
}
267
268
/* note: Scheduled and run via taskqueue, so can run on any CPU at any time */
269
void
270
arm_spe_send_buffer(void *arg, int pending __unused)
271
{
272
struct arm_spe_buf_info *buf = (struct arm_spe_buf_info *)arg;
273
struct arm_spe_info *info = buf->info;
274
struct arm_spe_queue *queue;
275
struct kevent kev;
276
int ret;
277
278
queue = malloc(sizeof(struct arm_spe_queue), M_ARM_SPE,
279
M_WAITOK | M_ZERO);
280
281
mtx_lock_spin(&info->lock);
282
283
/* Add to queue for userspace to pickup */
284
queue->ident = info->ident;
285
queue->offset = buf->pmbptr - buf_start_addr(buf->buf_idx, info);
286
queue->buf_idx = buf->buf_idx;
287
queue->final_buf = !info->enabled;
288
queue->partial_rec = buf->partial_rec;
289
mtx_unlock_spin(&info->lock);
290
291
mtx_lock_spin(&info->sc->sc_lock);
292
STAILQ_INSERT_TAIL(&info->sc->pending, queue, next);
293
info->sc->npending++;
294
EV_SET(&kev, ARM_SPE_KQ_BUF, EVFILT_USER, 0, NOTE_TRIGGER,
295
info->sc->npending, NULL);
296
mtx_unlock_spin(&info->sc->sc_lock);
297
298
/* Notify userspace */
299
ret = kqfd_register(info->sc->kqueue_fd, &kev, info->sc->hwt_td,
300
M_WAITOK);
301
if (ret) {
302
dprintf("%s kqfd_register ret:%d\n", __func__, ret);
303
arm_spe_error(info->sc->ctx, 0);
304
}
305
}
306
307
static void
308
arm_spe_error(void *arg, int pending __unused)
309
{
310
struct hwt_context *ctx = arg;
311
struct kevent kev;
312
int ret;
313
314
if (!CPU_EMPTY(&ctx->cpu_map))
315
smp_rendezvous_cpus(ctx->cpu_map, smp_no_rendezvous_barrier,
316
arm_spe_disable, smp_no_rendezvous_barrier, NULL);
317
318
EV_SET(&kev, ARM_SPE_KQ_SHUTDOWN, EVFILT_USER, 0, NOTE_TRIGGER, 0, NULL);
319
ret = kqfd_register(ctx->kqueue_fd, &kev, ctx->hwt_td, M_WAITOK);
320
if (ret)
321
dprintf("%s kqfd_register ret:%d\n", __func__, ret);
322
}
323
324
MODULE_DEPEND(spe, hwt, 1, 1, 1);
325
MODULE_VERSION(spe, 1);
326
327