Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/amdxdna/amdxdna_mailbox.c
51892 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4
*/
5
6
#include <drm/drm_device.h>
7
#include <drm/drm_managed.h>
8
#include <linux/bitfield.h>
9
#include <linux/interrupt.h>
10
#include <linux/iopoll.h>
11
#include <linux/slab.h>
12
#include <linux/xarray.h>
13
14
#define CREATE_TRACE_POINTS
15
#include <trace/events/amdxdna.h>
16
17
#include "amdxdna_mailbox.h"
18
19
#define MB_ERR(chann, fmt, args...) \
20
({ \
21
typeof(chann) _chann = chann; \
22
dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
23
(_chann)->msix_irq, ##args); \
24
})
25
#define MB_DBG(chann, fmt, args...) \
26
({ \
27
typeof(chann) _chann = chann; \
28
dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
29
(_chann)->msix_irq, ##args); \
30
})
31
#define MB_WARN_ONCE(chann, fmt, args...) \
32
({ \
33
typeof(chann) _chann = chann; \
34
dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
35
(_chann)->msix_irq, ##args); \
36
})
37
38
#define MAGIC_VAL 0x1D000000U
39
#define MAGIC_VAL_MASK 0xFF000000
40
#define MAX_MSG_ID_ENTRIES 256
41
#define MSG_RX_TIMER 200 /* milliseconds */
42
#define MAILBOX_NAME "xdna_mailbox"
43
44
enum channel_res_type {
45
CHAN_RES_X2I,
46
CHAN_RES_I2X,
47
CHAN_RES_NUM
48
};
49
50
struct mailbox {
51
struct device *dev;
52
struct xdna_mailbox_res res;
53
};
54
55
struct mailbox_channel {
56
struct mailbox *mb;
57
struct xdna_mailbox_chann_res res[CHAN_RES_NUM];
58
int msix_irq;
59
u32 iohub_int_addr;
60
struct xarray chan_xa;
61
u32 next_msgid;
62
u32 x2i_tail;
63
64
/* Received msg related fields */
65
struct workqueue_struct *work_q;
66
struct work_struct rx_work;
67
u32 i2x_head;
68
bool bad_state;
69
};
70
71
#define MSG_BODY_SZ GENMASK(10, 0)
72
#define MSG_PROTO_VER GENMASK(23, 16)
73
struct xdna_msg_header {
74
__u32 total_size;
75
__u32 sz_ver;
76
__u32 id;
77
__u32 opcode;
78
} __packed;
79
80
static_assert(sizeof(struct xdna_msg_header) == 16);
81
82
struct mailbox_pkg {
83
struct xdna_msg_header header;
84
__u32 payload[];
85
};
86
87
/* The protocol version. */
88
#define MSG_PROTOCOL_VERSION 0x1
89
/* The tombstone value. */
90
#define TOMBSTONE 0xDEADFACE
91
92
struct mailbox_msg {
93
void *handle;
94
int (*notify_cb)(void *handle, void __iomem *data, size_t size);
95
size_t pkg_size; /* package size in bytes */
96
struct mailbox_pkg pkg;
97
};
98
99
static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)
100
{
101
struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
102
void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
103
104
writel(data, ringbuf_addr);
105
}
106
107
static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)
108
{
109
struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
110
void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
111
112
return readl(ringbuf_addr);
113
}
114
115
static inline void
116
mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)
117
{
118
mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);
119
mb_chann->i2x_head = headptr_val;
120
}
121
122
static inline void
123
mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)
124
{
125
mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);
126
mb_chann->x2i_tail = tailptr_val;
127
}
128
129
static inline u32
130
mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
131
{
132
return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);
133
}
134
135
static inline u32
136
mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
137
{
138
return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);
139
}
140
141
static inline u32
142
mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)
143
{
144
return mb_chann->res[type].rb_size;
145
}
146
147
static inline int mailbox_validate_msgid(int msg_id)
148
{
149
return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;
150
}
151
152
static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
153
{
154
u32 msg_id;
155
int ret;
156
157
ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
158
XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
159
&mb_chann->next_msgid, GFP_NOWAIT);
160
if (ret < 0)
161
return ret;
162
163
/*
164
* Add MAGIC_VAL to the higher bits.
165
*/
166
msg_id |= MAGIC_VAL;
167
return msg_id;
168
}
169
170
static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
171
{
172
msg_id &= ~MAGIC_VAL_MASK;
173
xa_erase_irq(&mb_chann->chan_xa, msg_id);
174
}
175
176
static void mailbox_release_msg(struct mailbox_channel *mb_chann,
177
struct mailbox_msg *mb_msg)
178
{
179
MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
180
mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
181
if (mb_msg->notify_cb)
182
mb_msg->notify_cb(mb_msg->handle, NULL, 0);
183
kfree(mb_msg);
184
}
185
186
static int
187
mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
188
{
189
void __iomem *write_addr;
190
u32 ringbuf_size;
191
u32 head, tail;
192
u32 start_addr;
193
u32 tmp_tail;
194
int ret;
195
196
head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);
197
tail = mb_chann->x2i_tail;
198
ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I) - sizeof(u32);
199
start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;
200
tmp_tail = tail + mb_msg->pkg_size;
201
202
203
check_again:
204
if (tail >= head && tmp_tail > ringbuf_size) {
205
write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
206
writel(TOMBSTONE, write_addr);
207
208
/* tombstone is set. Write from the start of the ringbuf */
209
tail = 0;
210
tmp_tail = tail + mb_msg->pkg_size;
211
}
212
213
if (tail < head && tmp_tail >= head) {
214
ret = read_poll_timeout(mailbox_get_headptr, head,
215
tmp_tail < head || tail >= head,
216
1, 100, false, mb_chann, CHAN_RES_X2I);
217
if (ret)
218
return ret;
219
220
if (tail >= head)
221
goto check_again;
222
}
223
224
write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
225
memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);
226
mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);
227
228
trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,
229
mb_msg->pkg.header.opcode,
230
mb_msg->pkg.header.id);
231
232
return 0;
233
}
234
235
static int
236
mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
237
void __iomem *data)
238
{
239
struct mailbox_msg *mb_msg;
240
int msg_id;
241
int ret = 0;
242
243
msg_id = header->id;
244
if (!mailbox_validate_msgid(msg_id)) {
245
MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);
246
return -EINVAL;
247
}
248
249
msg_id &= ~MAGIC_VAL_MASK;
250
mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
251
if (!mb_msg) {
252
MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
253
return -EINVAL;
254
}
255
256
MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
257
header->opcode, header->total_size, header->id);
258
if (mb_msg->notify_cb) {
259
ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
260
if (unlikely(ret))
261
MB_ERR(mb_chann, "Message callback ret %d", ret);
262
}
263
264
kfree(mb_msg);
265
return ret;
266
}
267
268
static int mailbox_get_msg(struct mailbox_channel *mb_chann)
269
{
270
struct xdna_msg_header header;
271
void __iomem *read_addr;
272
u32 msg_size, rest;
273
u32 ringbuf_size;
274
u32 head, tail;
275
u32 start_addr;
276
int ret;
277
278
tail = mailbox_get_tailptr(mb_chann, CHAN_RES_I2X);
279
head = mb_chann->i2x_head;
280
ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);
281
start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;
282
283
if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {
284
MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);
285
return -EINVAL;
286
}
287
288
/* ringbuf empty */
289
if (head == tail)
290
return -ENOENT;
291
292
if (head == ringbuf_size)
293
head = 0;
294
295
/* Peek size of the message or TOMBSTONE */
296
read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;
297
header.total_size = readl(read_addr);
298
/* size is TOMBSTONE, set next read from 0 */
299
if (header.total_size == TOMBSTONE) {
300
if (head < tail) {
301
MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",
302
head, tail);
303
return -EINVAL;
304
}
305
mailbox_set_headptr(mb_chann, 0);
306
return 0;
307
}
308
309
if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {
310
MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);
311
return -EINVAL;
312
}
313
msg_size = sizeof(header) + header.total_size;
314
315
if (msg_size > ringbuf_size - head || msg_size > tail - head) {
316
MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",
317
msg_size, tail, head);
318
return -EINVAL;
319
}
320
321
rest = sizeof(header) - sizeof(u32);
322
read_addr += sizeof(u32);
323
memcpy_fromio((u32 *)&header + 1, read_addr, rest);
324
read_addr += rest;
325
326
ret = mailbox_get_resp(mb_chann, &header, read_addr);
327
328
mailbox_set_headptr(mb_chann, head + msg_size);
329
/* After update head, it can equal to ringbuf_size. This is expected. */
330
trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,
331
header.opcode, header.id);
332
333
return ret;
334
}
335
336
static irqreturn_t mailbox_irq_handler(int irq, void *p)
337
{
338
struct mailbox_channel *mb_chann = p;
339
340
trace_mbox_irq_handle(MAILBOX_NAME, irq);
341
/* Schedule a rx_work to call the callback functions */
342
queue_work(mb_chann->work_q, &mb_chann->rx_work);
343
344
return IRQ_HANDLED;
345
}
346
347
static void mailbox_rx_worker(struct work_struct *rx_work)
348
{
349
struct mailbox_channel *mb_chann;
350
int ret;
351
352
mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);
353
354
if (READ_ONCE(mb_chann->bad_state)) {
355
MB_ERR(mb_chann, "Channel in bad state, work aborted");
356
return;
357
}
358
359
again:
360
mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
361
362
while (1) {
363
/*
364
* If return is 0, keep consuming next message, until there is
365
* no messages or an error happened.
366
*/
367
ret = mailbox_get_msg(mb_chann);
368
if (ret == -ENOENT)
369
break;
370
371
/* Other error means device doesn't look good, disable irq. */
372
if (unlikely(ret)) {
373
MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
374
WRITE_ONCE(mb_chann->bad_state, true);
375
return;
376
}
377
}
378
379
/*
380
* The hardware will not generate interrupt if firmware creates a new
381
* response right after driver clears interrupt register. Check
382
* the interrupt register to make sure there is not any new response
383
* before exiting.
384
*/
385
if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr))
386
goto again;
387
}
388
389
int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
390
const struct xdna_mailbox_msg *msg, u64 tx_timeout)
391
{
392
struct xdna_msg_header *header;
393
struct mailbox_msg *mb_msg;
394
size_t pkg_size;
395
int ret;
396
397
pkg_size = sizeof(*header) + msg->send_size;
398
if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {
399
MB_ERR(mb_chann, "Message size larger than ringbuf size");
400
return -EINVAL;
401
}
402
403
if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {
404
MB_ERR(mb_chann, "Message must be 4 bytes align");
405
return -EINVAL;
406
}
407
408
/* The fist word in payload can NOT be TOMBSTONE */
409
if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {
410
MB_ERR(mb_chann, "Tomb stone in data");
411
return -EINVAL;
412
}
413
414
if (READ_ONCE(mb_chann->bad_state)) {
415
MB_ERR(mb_chann, "Channel in bad state");
416
return -EPIPE;
417
}
418
419
mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);
420
if (!mb_msg)
421
return -ENOMEM;
422
423
mb_msg->handle = msg->handle;
424
mb_msg->notify_cb = msg->notify_cb;
425
mb_msg->pkg_size = pkg_size;
426
427
header = &mb_msg->pkg.header;
428
/*
429
* Hardware use total_size and size to split huge message.
430
* We do not support it here. Thus the values are the same.
431
*/
432
header->total_size = msg->send_size;
433
header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |
434
FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);
435
header->opcode = msg->opcode;
436
memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);
437
438
ret = mailbox_acquire_msgid(mb_chann, mb_msg);
439
if (unlikely(ret < 0)) {
440
MB_ERR(mb_chann, "mailbox_acquire_msgid failed");
441
goto msg_id_failed;
442
}
443
header->id = ret;
444
445
MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
446
header->opcode, header->total_size, header->id);
447
448
ret = mailbox_send_msg(mb_chann, mb_msg);
449
if (ret) {
450
MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);
451
goto release_id;
452
}
453
454
return 0;
455
456
release_id:
457
mailbox_release_msgid(mb_chann, header->id);
458
msg_id_failed:
459
kfree(mb_msg);
460
return ret;
461
}
462
463
struct mailbox_channel *
464
xdna_mailbox_create_channel(struct mailbox *mb,
465
const struct xdna_mailbox_chann_res *x2i,
466
const struct xdna_mailbox_chann_res *i2x,
467
u32 iohub_int_addr,
468
int mb_irq)
469
{
470
struct mailbox_channel *mb_chann;
471
int ret;
472
473
if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
474
pr_err("Ring buf size must be power of 2");
475
return NULL;
476
}
477
478
mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);
479
if (!mb_chann)
480
return NULL;
481
482
mb_chann->mb = mb;
483
mb_chann->msix_irq = mb_irq;
484
mb_chann->iohub_int_addr = iohub_int_addr;
485
memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
486
memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
487
488
xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
489
mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
490
mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
491
492
INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
493
mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
494
if (!mb_chann->work_q) {
495
MB_ERR(mb_chann, "Create workqueue failed");
496
goto free_and_out;
497
}
498
499
/* Everything look good. Time to enable irq handler */
500
ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
501
if (ret) {
502
MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
503
goto destroy_wq;
504
}
505
506
mb_chann->bad_state = false;
507
mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
508
509
MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
510
return mb_chann;
511
512
destroy_wq:
513
destroy_workqueue(mb_chann->work_q);
514
free_and_out:
515
kfree(mb_chann);
516
return NULL;
517
}
518
519
int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
520
{
521
struct mailbox_msg *mb_msg;
522
unsigned long msg_id;
523
524
MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
525
free_irq(mb_chann->msix_irq, mb_chann);
526
destroy_workqueue(mb_chann->work_q);
527
/* We can clean up and release resources */
528
529
xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
530
mailbox_release_msg(mb_chann, mb_msg);
531
532
xa_destroy(&mb_chann->chan_xa);
533
534
MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
535
kfree(mb_chann);
536
return 0;
537
}
538
539
void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
540
{
541
/* Disable an irq and wait. This might sleep. */
542
disable_irq(mb_chann->msix_irq);
543
544
/* Cancel RX work and wait for it to finish */
545
cancel_work_sync(&mb_chann->rx_work);
546
MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
547
}
548
549
struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
550
const struct xdna_mailbox_res *res)
551
{
552
struct mailbox *mb;
553
554
mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);
555
if (!mb)
556
return NULL;
557
mb->dev = ddev->dev;
558
559
/* mailbox and ring buf base and size information */
560
memcpy(&mb->res, res, sizeof(*res));
561
562
return mb;
563
}
564
565