Path: blob/master/drivers/accel/amdxdna/amdxdna_mailbox.c
26428 views
// SPDX-License-Identifier: GPL-2.01/*2* Copyright (C) 2022-2024, Advanced Micro Devices, Inc.3*/45#include <drm/drm_device.h>6#include <drm/drm_managed.h>7#include <linux/bitfield.h>8#include <linux/interrupt.h>9#include <linux/iopoll.h>10#include <linux/slab.h>11#include <linux/xarray.h>1213#define CREATE_TRACE_POINTS14#include <trace/events/amdxdna.h>1516#include "amdxdna_mailbox.h"1718#define MB_ERR(chann, fmt, args...) \19({ \20typeof(chann) _chann = chann; \21dev_err((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \22(_chann)->msix_irq, ##args); \23})24#define MB_DBG(chann, fmt, args...) \25({ \26typeof(chann) _chann = chann; \27dev_dbg((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \28(_chann)->msix_irq, ##args); \29})30#define MB_WARN_ONCE(chann, fmt, args...) \31({ \32typeof(chann) _chann = chann; \33dev_warn_once((_chann)->mb->dev, "xdna_mailbox.%d: "fmt, \34(_chann)->msix_irq, ##args); \35})3637#define MAGIC_VAL 0x1D000000U38#define MAGIC_VAL_MASK 0xFF00000039#define MAX_MSG_ID_ENTRIES 25640#define MSG_RX_TIMER 200 /* milliseconds */41#define MAILBOX_NAME "xdna_mailbox"4243enum channel_res_type {44CHAN_RES_X2I,45CHAN_RES_I2X,46CHAN_RES_NUM47};4849struct mailbox {50struct device *dev;51struct xdna_mailbox_res res;52};5354struct mailbox_channel {55struct mailbox *mb;56struct xdna_mailbox_chann_res res[CHAN_RES_NUM];57int msix_irq;58u32 iohub_int_addr;59struct xarray chan_xa;60u32 next_msgid;61u32 x2i_tail;6263/* Received msg related fields */64struct workqueue_struct *work_q;65struct work_struct rx_work;66u32 i2x_head;67bool bad_state;68};6970#define MSG_BODY_SZ GENMASK(10, 0)71#define MSG_PROTO_VER GENMASK(23, 16)72struct xdna_msg_header {73__u32 total_size;74__u32 sz_ver;75__u32 id;76__u32 opcode;77} __packed;7879static_assert(sizeof(struct xdna_msg_header) == 16);8081struct mailbox_pkg {82struct xdna_msg_header header;83__u32 payload[];84};8586/* The protocol version. */87#define MSG_PROTOCOL_VERSION 0x188/* The tombstone value. */89#define TOMBSTONE 0xDEADFACE9091struct mailbox_msg {92void *handle;93int (*notify_cb)(void *handle, void __iomem *data, size_t size);94size_t pkg_size; /* package size in bytes */95struct mailbox_pkg pkg;96};9798static void mailbox_reg_write(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 data)99{100struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;101void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;102103writel(data, ringbuf_addr);104}105106static u32 mailbox_reg_read(struct mailbox_channel *mb_chann, u32 mbox_reg)107{108struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;109void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;110111return readl(ringbuf_addr);112}113114static int mailbox_reg_read_non_zero(struct mailbox_channel *mb_chann, u32 mbox_reg, u32 *val)115{116struct xdna_mailbox_res *mb_res = &mb_chann->mb->res;117void __iomem *ringbuf_addr = mb_res->mbox_base + mbox_reg;118int ret, value;119120/* Poll till value is not zero */121ret = readx_poll_timeout(readl, ringbuf_addr, value,122value, 1 /* us */, 100);123if (ret < 0)124return ret;125126*val = value;127return 0;128}129130static inline void131mailbox_set_headptr(struct mailbox_channel *mb_chann, u32 headptr_val)132{133mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_head_ptr_reg, headptr_val);134mb_chann->i2x_head = headptr_val;135}136137static inline void138mailbox_set_tailptr(struct mailbox_channel *mb_chann, u32 tailptr_val)139{140mailbox_reg_write(mb_chann, mb_chann->res[CHAN_RES_X2I].mb_tail_ptr_reg, tailptr_val);141mb_chann->x2i_tail = tailptr_val;142}143144static inline u32145mailbox_get_headptr(struct mailbox_channel *mb_chann, enum channel_res_type type)146{147return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_head_ptr_reg);148}149150static inline u32151mailbox_get_tailptr(struct mailbox_channel *mb_chann, enum channel_res_type type)152{153return mailbox_reg_read(mb_chann, mb_chann->res[type].mb_tail_ptr_reg);154}155156static inline u32157mailbox_get_ringbuf_size(struct mailbox_channel *mb_chann, enum channel_res_type type)158{159return mb_chann->res[type].rb_size;160}161162static inline int mailbox_validate_msgid(int msg_id)163{164return (msg_id & MAGIC_VAL_MASK) == MAGIC_VAL;165}166167static int mailbox_acquire_msgid(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)168{169u32 msg_id;170int ret;171172ret = xa_alloc_cyclic_irq(&mb_chann->chan_xa, &msg_id, mb_msg,173XA_LIMIT(0, MAX_MSG_ID_ENTRIES - 1),174&mb_chann->next_msgid, GFP_NOWAIT);175if (ret < 0)176return ret;177178/*179* Add MAGIC_VAL to the higher bits.180*/181msg_id |= MAGIC_VAL;182return msg_id;183}184185static void mailbox_release_msgid(struct mailbox_channel *mb_chann, int msg_id)186{187msg_id &= ~MAGIC_VAL_MASK;188xa_erase_irq(&mb_chann->chan_xa, msg_id);189}190191static void mailbox_release_msg(struct mailbox_channel *mb_chann,192struct mailbox_msg *mb_msg)193{194MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",195mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);196mb_msg->notify_cb(mb_msg->handle, NULL, 0);197kfree(mb_msg);198}199200static int201mailbox_send_msg(struct mailbox_channel *mb_chann, struct mailbox_msg *mb_msg)202{203void __iomem *write_addr;204u32 ringbuf_size;205u32 head, tail;206u32 start_addr;207u32 tmp_tail;208209head = mailbox_get_headptr(mb_chann, CHAN_RES_X2I);210tail = mb_chann->x2i_tail;211ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I);212start_addr = mb_chann->res[CHAN_RES_X2I].rb_start_addr;213tmp_tail = tail + mb_msg->pkg_size;214215if (tail < head && tmp_tail >= head)216goto no_space;217218if (tail >= head && (tmp_tail > ringbuf_size - sizeof(u32) &&219mb_msg->pkg_size >= head))220goto no_space;221222if (tail >= head && tmp_tail > ringbuf_size - sizeof(u32)) {223write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;224writel(TOMBSTONE, write_addr);225226/* tombstone is set. Write from the start of the ringbuf */227tail = 0;228}229230write_addr = mb_chann->mb->res.ringbuf_base + start_addr + tail;231memcpy_toio(write_addr, &mb_msg->pkg, mb_msg->pkg_size);232mailbox_set_tailptr(mb_chann, tail + mb_msg->pkg_size);233234trace_mbox_set_tail(MAILBOX_NAME, mb_chann->msix_irq,235mb_msg->pkg.header.opcode,236mb_msg->pkg.header.id);237238return 0;239240no_space:241return -ENOSPC;242}243244static int245mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *header,246void __iomem *data)247{248struct mailbox_msg *mb_msg;249int msg_id;250int ret;251252msg_id = header->id;253if (!mailbox_validate_msgid(msg_id)) {254MB_ERR(mb_chann, "Bad message ID 0x%x", msg_id);255return -EINVAL;256}257258msg_id &= ~MAGIC_VAL_MASK;259mb_msg = xa_erase_irq(&mb_chann->chan_xa, msg_id);260if (!mb_msg) {261MB_ERR(mb_chann, "Cannot find msg 0x%x", msg_id);262return -EINVAL;263}264265MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",266header->opcode, header->total_size, header->id);267ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);268if (unlikely(ret))269MB_ERR(mb_chann, "Message callback ret %d", ret);270271kfree(mb_msg);272return ret;273}274275static int mailbox_get_msg(struct mailbox_channel *mb_chann)276{277struct xdna_msg_header header;278void __iomem *read_addr;279u32 msg_size, rest;280u32 ringbuf_size;281u32 head, tail;282u32 start_addr;283int ret;284285if (mailbox_reg_read_non_zero(mb_chann, mb_chann->res[CHAN_RES_I2X].mb_tail_ptr_reg, &tail))286return -EINVAL;287head = mb_chann->i2x_head;288ringbuf_size = mailbox_get_ringbuf_size(mb_chann, CHAN_RES_I2X);289start_addr = mb_chann->res[CHAN_RES_I2X].rb_start_addr;290291if (unlikely(tail > ringbuf_size || !IS_ALIGNED(tail, 4))) {292MB_WARN_ONCE(mb_chann, "Invalid tail 0x%x", tail);293return -EINVAL;294}295296/* ringbuf empty */297if (head == tail)298return -ENOENT;299300if (head == ringbuf_size)301head = 0;302303/* Peek size of the message or TOMBSTONE */304read_addr = mb_chann->mb->res.ringbuf_base + start_addr + head;305header.total_size = readl(read_addr);306/* size is TOMBSTONE, set next read from 0 */307if (header.total_size == TOMBSTONE) {308if (head < tail) {309MB_WARN_ONCE(mb_chann, "Tombstone, head 0x%x tail 0x%x",310head, tail);311return -EINVAL;312}313mailbox_set_headptr(mb_chann, 0);314return 0;315}316317if (unlikely(!header.total_size || !IS_ALIGNED(header.total_size, 4))) {318MB_WARN_ONCE(mb_chann, "Invalid total size 0x%x", header.total_size);319return -EINVAL;320}321msg_size = sizeof(header) + header.total_size;322323if (msg_size > ringbuf_size - head || msg_size > tail - head) {324MB_WARN_ONCE(mb_chann, "Invalid message size %d, tail %d, head %d",325msg_size, tail, head);326return -EINVAL;327}328329rest = sizeof(header) - sizeof(u32);330read_addr += sizeof(u32);331memcpy_fromio((u32 *)&header + 1, read_addr, rest);332read_addr += rest;333334ret = mailbox_get_resp(mb_chann, &header, read_addr);335336mailbox_set_headptr(mb_chann, head + msg_size);337/* After update head, it can equal to ringbuf_size. This is expected. */338trace_mbox_set_head(MAILBOX_NAME, mb_chann->msix_irq,339header.opcode, header.id);340341return ret;342}343344static irqreturn_t mailbox_irq_handler(int irq, void *p)345{346struct mailbox_channel *mb_chann = p;347348trace_mbox_irq_handle(MAILBOX_NAME, irq);349/* Schedule a rx_work to call the callback functions */350queue_work(mb_chann->work_q, &mb_chann->rx_work);351352return IRQ_HANDLED;353}354355static void mailbox_rx_worker(struct work_struct *rx_work)356{357struct mailbox_channel *mb_chann;358int ret;359360mb_chann = container_of(rx_work, struct mailbox_channel, rx_work);361362if (READ_ONCE(mb_chann->bad_state)) {363MB_ERR(mb_chann, "Channel in bad state, work aborted");364return;365}366367again:368mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);369370while (1) {371/*372* If return is 0, keep consuming next message, until there is373* no messages or an error happened.374*/375ret = mailbox_get_msg(mb_chann);376if (ret == -ENOENT)377break;378379/* Other error means device doesn't look good, disable irq. */380if (unlikely(ret)) {381MB_ERR(mb_chann, "Unexpected ret %d, disable irq", ret);382WRITE_ONCE(mb_chann->bad_state, true);383return;384}385}386387/*388* The hardware will not generate interrupt if firmware creates a new389* response right after driver clears interrupt register. Check390* the interrupt register to make sure there is not any new response391* before exiting.392*/393if (mailbox_reg_read(mb_chann, mb_chann->iohub_int_addr))394goto again;395}396397int xdna_mailbox_send_msg(struct mailbox_channel *mb_chann,398const struct xdna_mailbox_msg *msg, u64 tx_timeout)399{400struct xdna_msg_header *header;401struct mailbox_msg *mb_msg;402size_t pkg_size;403int ret;404405pkg_size = sizeof(*header) + msg->send_size;406if (pkg_size > mailbox_get_ringbuf_size(mb_chann, CHAN_RES_X2I)) {407MB_ERR(mb_chann, "Message size larger than ringbuf size");408return -EINVAL;409}410411if (unlikely(!IS_ALIGNED(msg->send_size, 4))) {412MB_ERR(mb_chann, "Message must be 4 bytes align");413return -EINVAL;414}415416/* The fist word in payload can NOT be TOMBSTONE */417if (unlikely(((u32 *)msg->send_data)[0] == TOMBSTONE)) {418MB_ERR(mb_chann, "Tomb stone in data");419return -EINVAL;420}421422if (READ_ONCE(mb_chann->bad_state)) {423MB_ERR(mb_chann, "Channel in bad state");424return -EPIPE;425}426427mb_msg = kzalloc(sizeof(*mb_msg) + pkg_size, GFP_KERNEL);428if (!mb_msg)429return -ENOMEM;430431mb_msg->handle = msg->handle;432mb_msg->notify_cb = msg->notify_cb;433mb_msg->pkg_size = pkg_size;434435header = &mb_msg->pkg.header;436/*437* Hardware use total_size and size to split huge message.438* We do not support it here. Thus the values are the same.439*/440header->total_size = msg->send_size;441header->sz_ver = FIELD_PREP(MSG_BODY_SZ, msg->send_size) |442FIELD_PREP(MSG_PROTO_VER, MSG_PROTOCOL_VERSION);443header->opcode = msg->opcode;444memcpy(mb_msg->pkg.payload, msg->send_data, msg->send_size);445446ret = mailbox_acquire_msgid(mb_chann, mb_msg);447if (unlikely(ret < 0)) {448MB_ERR(mb_chann, "mailbox_acquire_msgid failed");449goto msg_id_failed;450}451header->id = ret;452453MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",454header->opcode, header->total_size, header->id);455456ret = mailbox_send_msg(mb_chann, mb_msg);457if (ret) {458MB_DBG(mb_chann, "Error in mailbox send msg, ret %d", ret);459goto release_id;460}461462return 0;463464release_id:465mailbox_release_msgid(mb_chann, header->id);466msg_id_failed:467kfree(mb_msg);468return ret;469}470471struct mailbox_channel *472xdna_mailbox_create_channel(struct mailbox *mb,473const struct xdna_mailbox_chann_res *x2i,474const struct xdna_mailbox_chann_res *i2x,475u32 iohub_int_addr,476int mb_irq)477{478struct mailbox_channel *mb_chann;479int ret;480481if (!is_power_of_2(x2i->rb_size) || !is_power_of_2(i2x->rb_size)) {482pr_err("Ring buf size must be power of 2");483return NULL;484}485486mb_chann = kzalloc(sizeof(*mb_chann), GFP_KERNEL);487if (!mb_chann)488return NULL;489490mb_chann->mb = mb;491mb_chann->msix_irq = mb_irq;492mb_chann->iohub_int_addr = iohub_int_addr;493memcpy(&mb_chann->res[CHAN_RES_X2I], x2i, sizeof(*x2i));494memcpy(&mb_chann->res[CHAN_RES_I2X], i2x, sizeof(*i2x));495496xa_init_flags(&mb_chann->chan_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);497mb_chann->x2i_tail = mailbox_get_tailptr(mb_chann, CHAN_RES_X2I);498mb_chann->i2x_head = mailbox_get_headptr(mb_chann, CHAN_RES_I2X);499500INIT_WORK(&mb_chann->rx_work, mailbox_rx_worker);501mb_chann->work_q = create_singlethread_workqueue(MAILBOX_NAME);502if (!mb_chann->work_q) {503MB_ERR(mb_chann, "Create workqueue failed");504goto free_and_out;505}506507/* Everything look good. Time to enable irq handler */508ret = request_irq(mb_irq, mailbox_irq_handler, 0, MAILBOX_NAME, mb_chann);509if (ret) {510MB_ERR(mb_chann, "Failed to request irq %d ret %d", mb_irq, ret);511goto destroy_wq;512}513514mb_chann->bad_state = false;515516MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);517return mb_chann;518519destroy_wq:520destroy_workqueue(mb_chann->work_q);521free_and_out:522kfree(mb_chann);523return NULL;524}525526int xdna_mailbox_destroy_channel(struct mailbox_channel *mb_chann)527{528struct mailbox_msg *mb_msg;529unsigned long msg_id;530531MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");532free_irq(mb_chann->msix_irq, mb_chann);533destroy_workqueue(mb_chann->work_q);534/* We can clean up and release resources */535536xa_for_each(&mb_chann->chan_xa, msg_id, mb_msg)537mailbox_release_msg(mb_chann, mb_msg);538539xa_destroy(&mb_chann->chan_xa);540541MB_DBG(mb_chann, "Mailbox channel destroyed, irq: %d", mb_chann->msix_irq);542kfree(mb_chann);543return 0;544}545546void xdna_mailbox_stop_channel(struct mailbox_channel *mb_chann)547{548/* Disable an irq and wait. This might sleep. */549disable_irq(mb_chann->msix_irq);550551/* Cancel RX work and wait for it to finish */552cancel_work_sync(&mb_chann->rx_work);553MB_DBG(mb_chann, "IRQ disabled and RX work cancelled");554}555556struct mailbox *xdnam_mailbox_create(struct drm_device *ddev,557const struct xdna_mailbox_res *res)558{559struct mailbox *mb;560561mb = drmm_kzalloc(ddev, sizeof(*mb), GFP_KERNEL);562if (!mb)563return NULL;564mb->dev = ddev->dev;565566/* mailbox and ring buf base and size information */567memcpy(&mb->res, res, sizeof(*res));568569return mb;570}571572573