Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/amdxdna/amdxdna_mailbox.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2022-2024, Advanced Micro Devices, Inc.
4
*/
5
6
#include <drm/drm_device.h>
7
#include <drm/drm_managed.h>
8
#include <linux/bitfield.h>
9
#include <linux/interrupt.h>
10
#include <linux/iopoll.h>
11
#include <linux/slab.h>
12
#include <linux/xarray.h>
13
14
#define CREATE_TRACE_POINTS
15
#include <trace/events/amdxdna.h>
16
17
#include "amdxdna_mailbox.h"
18
19
#define MB_ERR(chann, fmt, args...) \
20
({ \
21
typeof(chann) _chann = chann; \
22
dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
23
(_chann)->msix_irq, ##args); \
24
})
25
#define MB_DBG(chann, fmt, args...) \
26
({ \
27
typeof(chann) _chann = chann; \
28
dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
29
(_chann)->msix_irq, ##args); \
30
})
31
#define MB_WARN_ONCE(chann, fmt, args...) \
32
({ \
33
typeof(chann) _chann = chann; \
34
dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \
35
(_chann)->msix_irq, ##args); \
36
})
37
38
#define MAGIC_VAL 0x1D000000U
39
#define MAGIC_VAL_MASK 0xFF000000
40
#define MAX_MSG_ID_ENTRIES 256
41
#define MSG_RX_TIMER 200 /* milliseconds */
42
#define MAILBOX_NAME "xdna_mailbox"
43
44
enum channel_res_type {
45
CHAN_RES_X2I,
46
CHAN_RES_I2X,
47
CHAN_RES_NUM
48
};
49
50
struct mailbox {
51
struct device *dev;
52
struct xdna_mailbox_res res;
53
};
54
55
struct mailbox_channel {
56
struct mailbox *mb;
57
struct xdna_mailbox_chann_res res[CHAN_RES_NUM];
58
int msix_irq;
59
u32 iohub_int_addr;
60
struct xarray chan_xa;
61
u32 next_msgid;
62
u32 x2i_tail;
63
64
/* Received msg related fields */
65
struct workqueue_struct *work_q;
66
struct work_struct rx_work;
67
u32 i2x_head;
68
bool bad_state;
69
};
70
71
#define MSG_BODY_SZ GENMASK(10, 0)
72
#define MSG_PROTO_VER GENMASK(23, 16)
73
struct xdna_msg_header {
74
__u32 total_size;
75
__u32 sz_ver;
76
__u32 id;
77
__u32 opcode;
78
} __packed;
79
80
static_assert(sizeof(struct xdna_msg_header) == 16);
81
82
struct mailbox_pkg {
83
struct xdna_msg_header header;
84
__u32 payload[];
85
};
86
87
/* The protocol version. */
88
#define MSG_PROTOCOL_VERSION 0x1
89
/* The tombstone value. */
90
#define TOMBSTONE 0xDEADFACE
91
92
struct mailbox_msg {
93
void *handle;
94
int (*notify_cb)(void *handle, void __iomem *data, size_t size);
95
size_t pkg_size; /* package size in bytes */
96
struct mailbox_pkg pkg;
97
};
98
99
static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)
100
{
101
struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
102
void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
103
104
writel(data, ringbuf_addr);
105
}
106
107
static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)
108
{
109
struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
110
void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
111
112
return readl(ringbuf_addr);
113
}
114
115
static int mailbox_reg_read_non_zero(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 *val)
116
{
117
struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;
118
void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;
119
int ret, value;
120
121
/* Poll till value is not zero */
122
ret = readx_poll_timeout(readl, ringbuf_addr, value,
123
value, 1 /* us */, 100);
124
if (ret < 0)
125
return ret;
126
127
*val = value;
128
return 0;
129
}
130
131
static inline void
132
mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)
133
{
134
mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);
135
mb_chann->i2x_head = headptr_val;
136
}
137
138
static inline void
139
mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)
140
{
141
mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);
142
mb_chann->x2i_tail = tailptr_val;
143
}
144
145
static inline u32
146
mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
147
{
148
return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);
149
}
150
151
static inline u32
152
mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)
153
{
154
return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);
155
}
156
157
static inline u32
158
mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)
159
{
160
return mb_chann->res[type].rb_size;
161
}
162
163
static inline int mailbox_validate_msgid(int msg_id)
164
{
165
return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;
166
}
167
168
static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
169
{
170
u32 msg_id;
171
int ret;
172
173
ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,
174
XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),
175
&mb_chann->next_msgid, GFP_NOWAIT);
176
if (ret < 0)
177
return ret;
178
179
/*
180
* Add MAGIC_VAL to the higher bits.
181
*/
182
msg_id |= MAGIC_VAL;
183
return msg_id;
184
}
185
186
static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)
187
{
188
msg_id &= ~MAGIC_VAL_MASK;
189
xa_erase_irq(&mb_chann->chan_xa, msg_id);
190
}
191
192
static void mailbox_release_msg(struct mailbox_channel *mb_chann,
193
struct mailbox_msg *mb_msg)
194
{
195
MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
196
mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
197
mb_msg->notify_cb(mb_msg->handle, NULL, 0);
198
kfree(mb_msg);
199
}
200
201
static int
202
mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)
203
{
204
void __iomem *write_addr;
205
u32 ringbuf_size;
206
u32 head, tail;
207
u32 start_addr;
208
u32 tmp_tail;
209
210
head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);
211
tail = mb_chann->x2i_tail;
212
ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I);
213
start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;
214
tmp_tail = tail + mb_msg->pkg_size;
215
216
if (tail < head && tmp_tail >= head)
217
goto no_space;
218
219
if (tail >= head && (tmp_tail > ringbuf_size - sizeof(u32) &&
220
mb_msg->pkg_size >= head))
221
goto no_space;
222
223
if (tail >= head && tmp_tail > ringbuf_size - sizeof(u32)) {
224
write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
225
writel(TOMBSTONE, write_addr);
226
227
/* tombstone is set. Write from the start of the ringbuf */
228
tail = 0;
229
}
230
231
write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;
232
memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);
233
mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);
234
235
trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,
236
mb_msg->pkg.header.opcode,
237
mb_msg->pkg.header.id);
238
239
return 0;
240
241
no_space:
242
return -ENOSPC;
243
}
244
245
static int
246
mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,
247
void __iomem *data)
248
{
249
struct mailbox_msg *mb_msg;
250
int msg_id;
251
int ret;
252
253
msg_id = header->id;
254
if (!mailbox_validate_msgid(msg_id)) {
255
MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);
256
return -EINVAL;
257
}
258
259
msg_id &= ~MAGIC_VAL_MASK;
260
mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);
261
if (!mb_msg) {
262
MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);
263
return -EINVAL;
264
}
265
266
MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
267
header->opcode, header->total_size, header->id);
268
ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
269
if (unlikely(ret))
270
MB_ERR(mb_chann, "Message callback ret %d", ret);
271
272
kfree(mb_msg);
273
return ret;
274
}
275
276
static int mailbox_get_msg(struct mailbox_channel *mb_chann)
277
{
278
struct xdna_msg_header header;
279
void __iomem *read_addr;
280
u32 msg_size, rest;
281
u32 ringbuf_size;
282
u32 head, tail;
283
u32 start_addr;
284
int ret;
285
286
if (mailbox_reg_read_non_zero(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_tail_ptr_reg, &tail))
287
return -EINVAL;
288
head = mb_chann->i2x_head;
289
ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);
290
start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;
291
292
if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {
293
MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);
294
return -EINVAL;
295
}
296
297
/* ringbuf empty */
298
if (head == tail)
299
return -ENOENT;
300
301
if (head == ringbuf_size)
302
head = 0;
303
304
/* Peek size of the message or TOMBSTONE */
305
read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;
306
header.total_size = readl(read_addr);
307
/* size is TOMBSTONE, set next read from 0 */
308
if (header.total_size == TOMBSTONE) {
309
if (head < tail) {
310
MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",
311
head, tail);
312
return -EINVAL;
313
}
314
mailbox_set_headptr(mb_chann, 0);
315
return 0;
316
}
317
318
if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {
319
MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);
320
return -EINVAL;
321
}
322
msg_size = sizeof(header) + header.total_size;
323
324
if (msg_size > ringbuf_size - head || msg_size > tail - head) {
325
MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",
326
msg_size, tail, head);
327
return -EINVAL;
328
}
329
330
rest = sizeof(header) - sizeof(u32);
331
read_addr += sizeof(u32);
332
memcpy_fromio((u32 *)&header + 1, read_addr, rest);
333
read_addr += rest;
334
335
ret = mailbox_get_resp(mb_chann, &header, read_addr);
336
337
mailbox_set_headptr(mb_chann, head + msg_size);
338
/* After update head, it can equal to ringbuf_size. This is expected. */
339
trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,
340
header.opcode, header.id);
341
342
return ret;
343
}
344
345
static irqreturn_t mailbox_irq_handler(int irq, void *p)
346
{
347
struct mailbox_channel *mb_chann = p;
348
349
trace_mbox_irq_handle(MAILBOX_NAME, irq);
350
/* Schedule a rx_work to call the callback functions */
351
queue_work(mb_chann->work_q, &mb_chann->rx_work);
352
353
return IRQ_HANDLED;
354
}
355
356
static void mailbox_rx_worker(struct work_struct *rx_work)
357
{
358
struct mailbox_channel *mb_chann;
359
int ret;
360
361
mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);
362
363
if (READ_ONCE(mb_chann->bad_state)) {
364
MB_ERR(mb_chann, "Channel in bad state, work aborted");
365
return;
366
}
367
368
again:
369
mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
370
371
while (1) {
372
/*
373
* If return is 0, keep consuming next message, until there is
374
* no messages or an error happened.
375
*/
376
ret = mailbox_get_msg(mb_chann);
377
if (ret == -ENOENT)
378
break;
379
380
/* Other error means device doesn't look good, disable irq. */
381
if (unlikely(ret)) {
382
MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);
383
WRITE_ONCE(mb_chann->bad_state, true);
384
return;
385
}
386
}
387
388
/*
389
* The hardware will not generate interrupt if firmware creates a new
390
* response right after driver clears interrupt register. Check
391
* the interrupt register to make sure there is not any new response
392
* before exiting.
393
*/
394
if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr))
395
goto again;
396
}
397
398
int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,
399
const struct xdna_mailbox_msg *msg, u64 tx_timeout)
400
{
401
struct xdna_msg_header *header;
402
struct mailbox_msg *mb_msg;
403
size_t pkg_size;
404
int ret;
405
406
pkg_size = sizeof(*header) + msg->send_size;
407
if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {
408
MB_ERR(mb_chann, "Message size larger than ringbuf size");
409
return -EINVAL;
410
}
411
412
if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {
413
MB_ERR(mb_chann, "Message must be 4 bytes align");
414
return -EINVAL;
415
}
416
417
/* The fist word in payload can NOT be TOMBSTONE */
418
if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {
419
MB_ERR(mb_chann, "Tomb stone in data");
420
return -EINVAL;
421
}
422
423
if (READ_ONCE(mb_chann->bad_state)) {
424
MB_ERR(mb_chann, "Channel in bad state");
425
return -EPIPE;
426
}
427
428
mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);
429
if (!mb_msg)
430
return -ENOMEM;
431
432
mb_msg->handle = msg->handle;
433
mb_msg->notify_cb = msg->notify_cb;
434
mb_msg->pkg_size = pkg_size;
435
436
header = &mb_msg->pkg.header;
437
/*
438
* Hardware use total_size and size to split huge message.
439
* We do not support it here. Thus the values are the same.
440
*/
441
header->total_size = msg->send_size;
442
header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |
443
FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);
444
header->opcode = msg->opcode;
445
memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);
446
447
ret = mailbox_acquire_msgid(mb_chann, mb_msg);
448
if (unlikely(ret < 0)) {
449
MB_ERR(mb_chann, "mailbox_acquire_msgid failed");
450
goto msg_id_failed;
451
}
452
header->id = ret;
453
454
MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
455
header->opcode, header->total_size, header->id);
456
457
ret = mailbox_send_msg(mb_chann, mb_msg);
458
if (ret) {
459
MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);
460
goto release_id;
461
}
462
463
return 0;
464
465
release_id:
466
mailbox_release_msgid(mb_chann, header->id);
467
msg_id_failed:
468
kfree(mb_msg);
469
return ret;
470
}
471
472
struct mailbox_channel *
473
xdna_mailbox_create_channel(struct mailbox *mb,
474
const struct xdna_mailbox_chann_res *x2i,
475
const struct xdna_mailbox_chann_res *i2x,
476
u32 iohub_int_addr,
477
int mb_irq)
478
{
479
struct mailbox_channel *mb_chann;
480
int ret;
481
482
if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {
483
pr_err("Ring buf size must be power of 2");
484
return NULL;
485
}
486
487
mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);
488
if (!mb_chann)
489
return NULL;
490
491
mb_chann->mb = mb;
492
mb_chann->msix_irq = mb_irq;
493
mb_chann->iohub_int_addr = iohub_int_addr;
494
memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));
495
memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));
496
497
xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
498
mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);
499
mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);
500
501
INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);
502
mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);
503
if (!mb_chann->work_q) {
504
MB_ERR(mb_chann, "Create workqueue failed");
505
goto free_and_out;
506
}
507
508
/* Everything look good. Time to enable irq handler */
509
ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);
510
if (ret) {
511
MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);
512
goto destroy_wq;
513
}
514
515
mb_chann->bad_state = false;
516
517
MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
518
return mb_chann;
519
520
destroy_wq:
521
destroy_workqueue(mb_chann->work_q);
522
free_and_out:
523
kfree(mb_chann);
524
return NULL;
525
}
526
527
int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)
528
{
529
struct mailbox_msg *mb_msg;
530
unsigned long msg_id;
531
532
MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
533
free_irq(mb_chann->msix_irq, mb_chann);
534
destroy_workqueue(mb_chann->work_q);
535
/* We can clean up and release resources */
536
537
xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)
538
mailbox_release_msg(mb_chann, mb_msg);
539
540
xa_destroy(&mb_chann->chan_xa);
541
542
MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);
543
kfree(mb_chann);
544
return 0;
545
}
546
547
void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)
548
{
549
/* Disable an irq and wait. This might sleep. */
550
disable_irq(mb_chann->msix_irq);
551
552
/* Cancel RX work and wait for it to finish */
553
cancel_work_sync(&mb_chann->rx_work);
554
MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");
555
}
556
557
struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,
558
const struct xdna_mailbox_res *res)
559
{
560
struct mailbox *mb;
561
562
mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);
563
if (!mb)
564
return NULL;
565
mb->dev = ddev->dev;
566
567
/* mailbox and ring buf base and size information */
568
memcpy(&mb->res, res, sizeof(*res));
569
570
return mb;
571
}
572
573