Path: blob/master/drivers/firmware/arm_scmi/transports/mailbox.c
26483 views
// SPDX-License-Identifier: GPL-2.01/*2* System Control and Management Interface (SCMI) Message Mailbox Transport3* driver.4*5* Copyright (C) 2019-2024 ARM Ltd.6*/78#include <linux/err.h>9#include <linux/device.h>10#include <linux/mailbox_client.h>11#include <linux/of.h>12#include <linux/of_address.h>13#include <linux/platform_device.h>14#include <linux/slab.h>1516#include "../common.h"1718/**19* struct scmi_mailbox - Structure representing a SCMI mailbox transport20*21* @cl: Mailbox Client22* @chan: Transmit/Receive mailbox uni/bi-directional channel23* @chan_receiver: Optional Receiver mailbox unidirectional channel24* @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel25* @cinfo: SCMI channel info26* @shmem: Transmit/Receive shared memory area27* @chan_lock: Lock that prevents multiple xfers from being queued28* @io_ops: Transport specific I/O operations29*/30struct scmi_mailbox {31struct mbox_client cl;32struct mbox_chan *chan;33struct mbox_chan *chan_receiver;34struct mbox_chan *chan_platform_receiver;35struct scmi_chan_info *cinfo;36struct scmi_shared_mem __iomem *shmem;37struct mutex chan_lock;38struct scmi_shmem_io_ops *io_ops;39};4041#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)4243static struct scmi_transport_core_operations *core;4445static void tx_prepare(struct mbox_client *cl, void *m)46{47struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);4849core->shmem->tx_prepare(smbox->shmem, m, smbox->cinfo,50smbox->io_ops->toio);51}5253static void rx_callback(struct mbox_client *cl, void *m)54{55struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);5657/*58* An A2P IRQ is NOT valid when received while the platform still has59* the ownership of the channel, because the platform at first releases60* the SMT channel and then sends the completion interrupt.61*62* This addresses a possible race condition in which a spurious IRQ from63* a previous timed-out reply which arrived late could be wrongly64* associated with the next pending transaction.65*/66if (cl->knows_txdone &&67!core->shmem->channel_free(smbox->shmem)) {68dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");69core->bad_message_trace(smbox->cinfo,70core->shmem->read_header(smbox->shmem),71MSG_MBOX_SPURIOUS);72return;73}7475core->rx_callback(smbox->cinfo,76core->shmem->read_header(smbox->shmem), NULL);77}7879static bool mailbox_chan_available(struct device_node *of_node, int idx)80{81int num_mb;8283/*84* Just check if bidirrectional channels are involved, and check the85* index accordingly; proper full validation will be made later86* in mailbox_chan_setup().87*/88num_mb = of_count_phandle_with_args(of_node, "mboxes", "#mbox-cells");89if (num_mb == 3 && idx == 1)90idx = 2;9192return !of_parse_phandle_with_args(of_node, "mboxes",93"#mbox-cells", idx, NULL);94}9596/**97* mailbox_chan_validate - Validate transport configuration and map channels98*99* @cdev: Reference to the underlying transport device carrying the100* of_node descriptor to analyze.101* @a2p_rx_chan: A reference to an optional unidirectional channel to use102* for replies on the a2p channel. Set as zero if not present.103* @p2a_chan: A reference to the optional p2a channel.104* Set as zero if not present.105* @p2a_rx_chan: A reference to the optional p2a completion channel.106* Set as zero if not present.107*108* At first, validate the transport configuration as described in terms of109* 'mboxes' and 'shmem', then determin which mailbox channel indexes are110* appropriate to be use in the current configuration.111*112* Return: 0 on Success or error113*/114static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,115int *p2a_chan, int *p2a_rx_chan)116{117int num_mb, num_sh, ret = 0;118struct device_node *np = cdev->of_node;119120num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");121num_sh = of_count_phandle_with_args(np, "shmem", NULL);122dev_dbg(cdev, "Found %d mboxes and %d shmems !\n", num_mb, num_sh);123124/* Bail out if mboxes and shmem descriptors are inconsistent */125if (num_mb <= 0 || num_sh <= 0 || num_sh > 2 || num_mb > 4 ||126(num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) ||127(num_mb == 4 && num_sh != 2)) {128dev_warn(cdev,129"Invalid channel descriptor for '%s' - mbs:%d shm:%d\n",130of_node_full_name(np), num_mb, num_sh);131return -EINVAL;132}133134/* Bail out if provided shmem descriptors do not refer distinct areas */135if (num_sh > 1) {136struct device_node *np_tx __free(device_node) =137of_parse_phandle(np, "shmem", 0);138struct device_node *np_rx __free(device_node) =139of_parse_phandle(np, "shmem", 1);140141if (!np_tx || !np_rx || np_tx == np_rx) {142dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",143of_node_full_name(np));144ret = -EINVAL;145}146}147148/* Calculate channels IDs to use depending on mboxes/shmem layout */149if (!ret) {150switch (num_mb) {151case 1:152*a2p_rx_chan = 0;153*p2a_chan = 0;154*p2a_rx_chan = 0;155break;156case 2:157if (num_sh == 2) {158*a2p_rx_chan = 0;159*p2a_chan = 1;160} else {161*a2p_rx_chan = 1;162*p2a_chan = 0;163}164*p2a_rx_chan = 0;165break;166case 3:167*a2p_rx_chan = 1;168*p2a_chan = 2;169*p2a_rx_chan = 0;170break;171case 4:172*a2p_rx_chan = 1;173*p2a_chan = 2;174*p2a_rx_chan = 3;175break;176}177}178179return ret;180}181182static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,183bool tx)184{185const char *desc = tx ? "Tx" : "Rx";186struct device *cdev = cinfo->dev;187struct scmi_mailbox *smbox;188int ret, a2p_rx_chan, p2a_chan, p2a_rx_chan;189struct mbox_client *cl;190191ret = mailbox_chan_validate(cdev, &a2p_rx_chan, &p2a_chan, &p2a_rx_chan);192if (ret)193return ret;194195if (!tx && !p2a_chan)196return -ENODEV;197198smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);199if (!smbox)200return -ENOMEM;201202smbox->shmem = core->shmem->setup_iomap(cinfo, dev, tx, NULL,203&smbox->io_ops);204if (IS_ERR(smbox->shmem))205return PTR_ERR(smbox->shmem);206207cl = &smbox->cl;208cl->dev = cdev;209cl->tx_prepare = tx ? tx_prepare : NULL;210cl->rx_callback = rx_callback;211cl->tx_block = false;212cl->knows_txdone = tx;213214smbox->chan = mbox_request_channel(cl, tx ? 0 : p2a_chan);215if (IS_ERR(smbox->chan)) {216ret = PTR_ERR(smbox->chan);217if (ret != -EPROBE_DEFER)218dev_err(cdev,219"failed to request SCMI %s mailbox\n", desc);220return ret;221}222223/* Additional unidirectional channel for TX if needed */224if (tx && a2p_rx_chan) {225smbox->chan_receiver = mbox_request_channel(cl, a2p_rx_chan);226if (IS_ERR(smbox->chan_receiver)) {227ret = PTR_ERR(smbox->chan_receiver);228if (ret != -EPROBE_DEFER)229dev_err(cdev, "failed to request SCMI Tx Receiver mailbox\n");230return ret;231}232}233234if (!tx && p2a_rx_chan) {235smbox->chan_platform_receiver = mbox_request_channel(cl, p2a_rx_chan);236if (IS_ERR(smbox->chan_platform_receiver)) {237ret = PTR_ERR(smbox->chan_platform_receiver);238if (ret != -EPROBE_DEFER)239dev_err(cdev, "failed to request SCMI P2A Receiver mailbox\n");240return ret;241}242}243244cinfo->transport_info = smbox;245smbox->cinfo = cinfo;246mutex_init(&smbox->chan_lock);247248return 0;249}250251static int mailbox_chan_free(int id, void *p, void *data)252{253struct scmi_chan_info *cinfo = p;254struct scmi_mailbox *smbox = cinfo->transport_info;255256if (smbox && !IS_ERR(smbox->chan)) {257mbox_free_channel(smbox->chan);258mbox_free_channel(smbox->chan_receiver);259mbox_free_channel(smbox->chan_platform_receiver);260cinfo->transport_info = NULL;261smbox->chan = NULL;262smbox->chan_receiver = NULL;263smbox->chan_platform_receiver = NULL;264smbox->cinfo = NULL;265}266267return 0;268}269270static int mailbox_send_message(struct scmi_chan_info *cinfo,271struct scmi_xfer *xfer)272{273struct scmi_mailbox *smbox = cinfo->transport_info;274int ret;275276/*277* The mailbox layer has its own queue. However the mailbox queue278* confuses the per message SCMI timeouts since the clock starts when279* the message is submitted into the mailbox queue. So when multiple280* messages are queued up the clock starts on all messages instead of281* only the one inflight.282*/283mutex_lock(&smbox->chan_lock);284285ret = mbox_send_message(smbox->chan, xfer);286/* mbox_send_message returns non-negative value on success */287if (ret < 0) {288mutex_unlock(&smbox->chan_lock);289return ret;290}291292return 0;293}294295static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,296struct scmi_xfer *__unused)297{298struct scmi_mailbox *smbox = cinfo->transport_info;299300mbox_client_txdone(smbox->chan, ret);301302/* Release channel */303mutex_unlock(&smbox->chan_lock);304}305306static void mailbox_fetch_response(struct scmi_chan_info *cinfo,307struct scmi_xfer *xfer)308{309struct scmi_mailbox *smbox = cinfo->transport_info;310311core->shmem->fetch_response(smbox->shmem, xfer, smbox->io_ops->fromio);312}313314static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,315size_t max_len, struct scmi_xfer *xfer)316{317struct scmi_mailbox *smbox = cinfo->transport_info;318319core->shmem->fetch_notification(smbox->shmem, max_len, xfer,320smbox->io_ops->fromio);321}322323static void mailbox_clear_channel(struct scmi_chan_info *cinfo)324{325struct scmi_mailbox *smbox = cinfo->transport_info;326struct mbox_chan *intr_chan;327int ret;328329core->shmem->clear_channel(smbox->shmem);330331if (!core->shmem->channel_intr_enabled(smbox->shmem))332return;333334if (smbox->chan_platform_receiver)335intr_chan = smbox->chan_platform_receiver;336else if (smbox->chan)337intr_chan = smbox->chan;338else339return;340341ret = mbox_send_message(intr_chan, NULL);342/* mbox_send_message returns non-negative value on success, so reset */343if (ret > 0)344ret = 0;345346mbox_client_txdone(intr_chan, ret);347}348349static bool350mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)351{352struct scmi_mailbox *smbox = cinfo->transport_info;353354return core->shmem->poll_done(smbox->shmem, xfer);355}356357static const struct scmi_transport_ops scmi_mailbox_ops = {358.chan_available = mailbox_chan_available,359.chan_setup = mailbox_chan_setup,360.chan_free = mailbox_chan_free,361.send_message = mailbox_send_message,362.mark_txdone = mailbox_mark_txdone,363.fetch_response = mailbox_fetch_response,364.fetch_notification = mailbox_fetch_notification,365.clear_channel = mailbox_clear_channel,366.poll_done = mailbox_poll_done,367};368369static struct scmi_desc scmi_mailbox_desc = {370.ops = &scmi_mailbox_ops,371.max_rx_timeout_ms = 30, /* We may increase this if required */372.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */373.max_msg_size = SCMI_SHMEM_MAX_PAYLOAD_SIZE,374};375376static const struct of_device_id scmi_of_match[] = {377{ .compatible = "arm,scmi" },378{ /* Sentinel */ },379};380MODULE_DEVICE_TABLE(of, scmi_of_match);381382DEFINE_SCMI_TRANSPORT_DRIVER(scmi_mailbox, scmi_mailbox_driver,383scmi_mailbox_desc, scmi_of_match, core);384module_platform_driver(scmi_mailbox_driver);385386MODULE_AUTHOR("Sudeep Holla <[email protected]>");387MODULE_DESCRIPTION("SCMI Mailbox Transport driver");388MODULE_LICENSE("GPL");389390391