Path: blob/master/drivers/firmware/arm_scmi/transports/virtio.c
26483 views
// SPDX-License-Identifier: GPL-2.01/*2* Virtio Transport driver for Arm System Control and Management Interface3* (SCMI).4*5* Copyright (C) 2020-2022 OpenSynergy.6* Copyright (C) 2021-2024 ARM Ltd.7*/89/**10* DOC: Theory of Operation11*12* The scmi-virtio transport implements a driver for the virtio SCMI device.13*14* There is one Tx channel (virtio cmdq, A2P channel) and at most one Rx15* channel (virtio eventq, P2A channel). Each channel is implemented through a16* virtqueue. Access to each virtqueue is protected by spinlocks.17*/1819#include <linux/completion.h>20#include <linux/errno.h>21#include <linux/platform_device.h>22#include <linux/refcount.h>23#include <linux/slab.h>24#include <linux/virtio.h>25#include <linux/virtio_config.h>2627#include <uapi/linux/virtio_ids.h>28#include <uapi/linux/virtio_scmi.h>2930#include "../common.h"3132#define VIRTIO_MAX_RX_TIMEOUT_MS 6000033#define VIRTIO_SCMI_MAX_MSG_SIZE 128 /* Value may be increased. */34#define VIRTIO_SCMI_MAX_PDU_SIZE(ci) \35((ci)->max_msg_size + SCMI_MSG_MAX_PROT_OVERHEAD)36#define DESCRIPTORS_PER_TX_MSG 23738/**39* struct scmi_vio_channel - Transport channel information40*41* @vqueue: Associated virtqueue42* @cinfo: SCMI Tx or Rx channel43* @free_lock: Protects access to the @free_list.44* @free_list: List of unused scmi_vio_msg, maintained for Tx channels only45* @deferred_tx_work: Worker for TX deferred replies processing46* @deferred_tx_wq: Workqueue for TX deferred replies47* @pending_lock: Protects access to the @pending_cmds_list.48* @pending_cmds_list: List of pre-fetched commands queueud for later processing49* @is_rx: Whether channel is an Rx channel50* @max_msg: Maximum number of pending messages for this channel.51* @lock: Protects access to all members except users, free_list and52* pending_cmds_list.53* @shutdown_done: A reference to a completion used when freeing this channel.54* @users: A reference count to currently active users of this channel.55*/56struct scmi_vio_channel {57struct virtqueue *vqueue;58struct scmi_chan_info *cinfo;59/* lock to protect access to the free list. */60spinlock_t free_lock;61struct list_head free_list;62/* lock to protect access to the pending list. */63spinlock_t pending_lock;64struct list_head pending_cmds_list;65struct work_struct deferred_tx_work;66struct workqueue_struct *deferred_tx_wq;67bool is_rx;68unsigned int max_msg;69/*70* Lock to protect access to all members except users, free_list and71* pending_cmds_list72*/73spinlock_t lock;74struct completion *shutdown_done;75refcount_t users;76};7778enum poll_states {79VIO_MSG_NOT_POLLED,80VIO_MSG_POLL_TIMEOUT,81VIO_MSG_POLLING,82VIO_MSG_POLL_DONE,83};8485/**86* struct scmi_vio_msg - Transport PDU information87*88* @request: SDU used for commands89* @input: SDU used for (delayed) responses and notifications90* @list: List which scmi_vio_msg may be part of91* @rx_len: Input SDU size in bytes, once input has been received92* @max_len: Maximumm allowed SDU size in bytes93* @poll_idx: Last used index registered for polling purposes if this message94* transaction reply was configured for polling.95* @poll_status: Polling state for this message.96* @poll_lock: A lock to protect @poll_status97* @users: A reference count to track this message users and avoid premature98* freeing (and reuse) when polling and IRQ execution paths interleave.99*/100struct scmi_vio_msg {101struct scmi_msg_payld *request;102struct scmi_msg_payld *input;103struct list_head list;104unsigned int rx_len;105unsigned int max_len;106unsigned int poll_idx;107enum poll_states poll_status;108/* Lock to protect access to poll_status */109spinlock_t poll_lock;110refcount_t users;111};112113static struct scmi_transport_core_operations *core;114115/* Only one SCMI VirtIO device can possibly exist */116static struct virtio_device *scmi_vdev;117118static void scmi_vio_channel_ready(struct scmi_vio_channel *vioch,119struct scmi_chan_info *cinfo)120{121unsigned long flags;122123spin_lock_irqsave(&vioch->lock, flags);124cinfo->transport_info = vioch;125/* Indirectly setting channel not available any more */126vioch->cinfo = cinfo;127spin_unlock_irqrestore(&vioch->lock, flags);128129refcount_set(&vioch->users, 1);130}131132static inline bool scmi_vio_channel_acquire(struct scmi_vio_channel *vioch)133{134return refcount_inc_not_zero(&vioch->users);135}136137static inline void scmi_vio_channel_release(struct scmi_vio_channel *vioch)138{139if (refcount_dec_and_test(&vioch->users)) {140unsigned long flags;141142spin_lock_irqsave(&vioch->lock, flags);143if (vioch->shutdown_done) {144vioch->cinfo = NULL;145complete(vioch->shutdown_done);146}147spin_unlock_irqrestore(&vioch->lock, flags);148}149}150151static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)152{153unsigned long flags;154DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done);155156/*157* Prepare to wait for the last release if not already released158* or in progress.159*/160spin_lock_irqsave(&vioch->lock, flags);161if (!vioch->cinfo || vioch->shutdown_done) {162spin_unlock_irqrestore(&vioch->lock, flags);163return;164}165166vioch->shutdown_done = &vioch_shutdown_done;167if (!vioch->is_rx && vioch->deferred_tx_wq)168/* Cannot be kicked anymore after this...*/169vioch->deferred_tx_wq = NULL;170spin_unlock_irqrestore(&vioch->lock, flags);171172scmi_vio_channel_release(vioch);173174/* Let any possibly concurrent RX path release the channel */175wait_for_completion(vioch->shutdown_done);176}177178/* Assumes to be called with vio channel acquired already */179static struct scmi_vio_msg *180scmi_virtio_get_free_msg(struct scmi_vio_channel *vioch)181{182unsigned long flags;183struct scmi_vio_msg *msg;184185spin_lock_irqsave(&vioch->free_lock, flags);186if (list_empty(&vioch->free_list)) {187spin_unlock_irqrestore(&vioch->free_lock, flags);188return NULL;189}190191msg = list_first_entry(&vioch->free_list, typeof(*msg), list);192list_del_init(&msg->list);193spin_unlock_irqrestore(&vioch->free_lock, flags);194195/* Still no users, no need to acquire poll_lock */196msg->poll_status = VIO_MSG_NOT_POLLED;197refcount_set(&msg->users, 1);198199return msg;200}201202static inline bool scmi_vio_msg_acquire(struct scmi_vio_msg *msg)203{204return refcount_inc_not_zero(&msg->users);205}206207/* Assumes to be called with vio channel acquired already */208static inline bool scmi_vio_msg_release(struct scmi_vio_channel *vioch,209struct scmi_vio_msg *msg)210{211bool ret;212213ret = refcount_dec_and_test(&msg->users);214if (ret) {215unsigned long flags;216217spin_lock_irqsave(&vioch->free_lock, flags);218list_add_tail(&msg->list, &vioch->free_list);219spin_unlock_irqrestore(&vioch->free_lock, flags);220}221222return ret;223}224225static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)226{227return virtio_has_feature(vdev, VIRTIO_SCMI_F_P2A_CHANNELS);228}229230static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,231struct scmi_vio_msg *msg)232{233struct scatterlist sg_in;234int rc;235unsigned long flags;236struct device *dev = &vioch->vqueue->vdev->dev;237238sg_init_one(&sg_in, msg->input, msg->max_len);239240spin_lock_irqsave(&vioch->lock, flags);241242rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);243if (rc)244dev_err(dev, "failed to add to RX virtqueue (%d)\n", rc);245else246virtqueue_kick(vioch->vqueue);247248spin_unlock_irqrestore(&vioch->lock, flags);249250return rc;251}252253/*254* Assume to be called with channel already acquired or not ready at all;255* vioch->lock MUST NOT have been already acquired.256*/257static void scmi_finalize_message(struct scmi_vio_channel *vioch,258struct scmi_vio_msg *msg)259{260if (vioch->is_rx)261scmi_vio_feed_vq_rx(vioch, msg);262else263scmi_vio_msg_release(vioch, msg);264}265266static void scmi_vio_complete_cb(struct virtqueue *vqueue)267{268unsigned long flags;269unsigned int length;270struct scmi_vio_channel *vioch;271struct scmi_vio_msg *msg;272bool cb_enabled = true;273274if (WARN_ON_ONCE(!vqueue->vdev->priv))275return;276vioch = &((struct scmi_vio_channel *)vqueue->vdev->priv)[vqueue->index];277278for (;;) {279if (!scmi_vio_channel_acquire(vioch))280return;281282spin_lock_irqsave(&vioch->lock, flags);283if (cb_enabled) {284virtqueue_disable_cb(vqueue);285cb_enabled = false;286}287288msg = virtqueue_get_buf(vqueue, &length);289if (!msg) {290if (virtqueue_enable_cb(vqueue)) {291spin_unlock_irqrestore(&vioch->lock, flags);292scmi_vio_channel_release(vioch);293return;294}295cb_enabled = true;296}297spin_unlock_irqrestore(&vioch->lock, flags);298299if (msg) {300msg->rx_len = length;301core->rx_callback(vioch->cinfo,302core->msg->read_header(msg->input),303msg);304305scmi_finalize_message(vioch, msg);306}307308/*309* Release vio channel between loop iterations to allow310* virtio_chan_free() to eventually fully release it when311* shutting down; in such a case, any outstanding message will312* be ignored since this loop will bail out at the next313* iteration.314*/315scmi_vio_channel_release(vioch);316}317}318319static void scmi_vio_deferred_tx_worker(struct work_struct *work)320{321unsigned long flags;322struct scmi_vio_channel *vioch;323struct scmi_vio_msg *msg, *tmp;324325vioch = container_of(work, struct scmi_vio_channel, deferred_tx_work);326327if (!scmi_vio_channel_acquire(vioch))328return;329330/*331* Process pre-fetched messages: these could be non-polled messages or332* late timed-out replies to polled messages dequeued by chance while333* polling for some other messages: this worker is in charge to process334* the valid non-expired messages and anyway finally free all of them.335*/336spin_lock_irqsave(&vioch->pending_lock, flags);337338/* Scan the list of possibly pre-fetched messages during polling. */339list_for_each_entry_safe(msg, tmp, &vioch->pending_cmds_list, list) {340list_del(&msg->list);341342/*343* Channel is acquired here (cannot vanish) and this message344* is no more processed elsewhere so no poll_lock needed.345*/346if (msg->poll_status == VIO_MSG_NOT_POLLED)347core->rx_callback(vioch->cinfo,348core->msg->read_header(msg->input),349msg);350351/* Free the processed message once done */352scmi_vio_msg_release(vioch, msg);353}354355spin_unlock_irqrestore(&vioch->pending_lock, flags);356357/* Process possibly still pending messages */358scmi_vio_complete_cb(vioch->vqueue);359360scmi_vio_channel_release(vioch);361}362363static struct virtqueue_info scmi_vio_vqs_info[] = {364{ "tx", scmi_vio_complete_cb },365{ "rx", scmi_vio_complete_cb },366};367368static unsigned int virtio_get_max_msg(struct scmi_chan_info *base_cinfo)369{370struct scmi_vio_channel *vioch = base_cinfo->transport_info;371372return vioch->max_msg;373}374375static bool virtio_chan_available(struct device_node *of_node, int idx)376{377struct scmi_vio_channel *channels, *vioch = NULL;378379if (WARN_ON_ONCE(!scmi_vdev))380return false;381382channels = (struct scmi_vio_channel *)scmi_vdev->priv;383384switch (idx) {385case VIRTIO_SCMI_VQ_TX:386vioch = &channels[VIRTIO_SCMI_VQ_TX];387break;388case VIRTIO_SCMI_VQ_RX:389if (scmi_vio_have_vq_rx(scmi_vdev))390vioch = &channels[VIRTIO_SCMI_VQ_RX];391break;392default:393return false;394}395396return vioch && !vioch->cinfo;397}398399static void scmi_destroy_tx_workqueue(void *deferred_tx_wq)400{401destroy_workqueue(deferred_tx_wq);402}403404static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,405bool tx)406{407struct scmi_vio_channel *vioch;408int index = tx ? VIRTIO_SCMI_VQ_TX : VIRTIO_SCMI_VQ_RX;409int i;410411if (!scmi_vdev)412return -EPROBE_DEFER;413414vioch = &((struct scmi_vio_channel *)scmi_vdev->priv)[index];415416/* Setup a deferred worker for polling. */417if (tx && !vioch->deferred_tx_wq) {418int ret;419420vioch->deferred_tx_wq =421alloc_workqueue(dev_name(&scmi_vdev->dev),422WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,4230);424if (!vioch->deferred_tx_wq)425return -ENOMEM;426427ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue,428vioch->deferred_tx_wq);429if (ret)430return ret;431432INIT_WORK(&vioch->deferred_tx_work,433scmi_vio_deferred_tx_worker);434}435436for (i = 0; i < vioch->max_msg; i++) {437struct scmi_vio_msg *msg;438439msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL);440if (!msg)441return -ENOMEM;442443msg->max_len = VIRTIO_SCMI_MAX_PDU_SIZE(cinfo);444if (tx) {445msg->request = devm_kzalloc(dev, msg->max_len,446GFP_KERNEL);447if (!msg->request)448return -ENOMEM;449spin_lock_init(&msg->poll_lock);450refcount_set(&msg->users, 1);451}452453msg->input = devm_kzalloc(dev, msg->max_len, GFP_KERNEL);454if (!msg->input)455return -ENOMEM;456457scmi_finalize_message(vioch, msg);458}459460scmi_vio_channel_ready(vioch, cinfo);461462return 0;463}464465static int virtio_chan_free(int id, void *p, void *data)466{467struct scmi_chan_info *cinfo = p;468struct scmi_vio_channel *vioch = cinfo->transport_info;469470/*471* Break device to inhibit further traffic flowing while shutting down472* the channels: doing it later holding vioch->lock creates unsafe473* locking dependency chains as reported by LOCKDEP.474*/475virtio_break_device(vioch->vqueue->vdev);476scmi_vio_channel_cleanup_sync(vioch);477478return 0;479}480481static int virtio_send_message(struct scmi_chan_info *cinfo,482struct scmi_xfer *xfer)483{484struct scmi_vio_channel *vioch = cinfo->transport_info;485struct scatterlist sg_out;486struct scatterlist sg_in;487struct scatterlist *sgs[DESCRIPTORS_PER_TX_MSG] = { &sg_out, &sg_in };488unsigned long flags;489int rc;490struct scmi_vio_msg *msg;491492if (!scmi_vio_channel_acquire(vioch))493return -EINVAL;494495msg = scmi_virtio_get_free_msg(vioch);496if (!msg) {497scmi_vio_channel_release(vioch);498return -EBUSY;499}500501core->msg->tx_prepare(msg->request, xfer);502503sg_init_one(&sg_out, msg->request, core->msg->command_size(xfer));504sg_init_one(&sg_in, msg->input, core->msg->response_size(xfer));505506spin_lock_irqsave(&vioch->lock, flags);507508/*509* If polling was requested for this transaction:510* - retrieve last used index (will be used as polling reference)511* - bind the polled message to the xfer via .priv512* - grab an additional msg refcount for the poll-path513*/514if (xfer->hdr.poll_completion) {515msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);516/* Still no users, no need to acquire poll_lock */517msg->poll_status = VIO_MSG_POLLING;518scmi_vio_msg_acquire(msg);519/* Ensure initialized msg is visibly bound to xfer */520smp_store_mb(xfer->priv, msg);521}522523rc = virtqueue_add_sgs(vioch->vqueue, sgs, 1, 1, msg, GFP_ATOMIC);524if (rc)525dev_err(vioch->cinfo->dev,526"failed to add to TX virtqueue (%d)\n", rc);527else528virtqueue_kick(vioch->vqueue);529530spin_unlock_irqrestore(&vioch->lock, flags);531532if (rc) {533/* Ensure order between xfer->priv clear and vq feeding */534smp_store_mb(xfer->priv, NULL);535if (xfer->hdr.poll_completion)536scmi_vio_msg_release(vioch, msg);537scmi_vio_msg_release(vioch, msg);538}539540scmi_vio_channel_release(vioch);541542return rc;543}544545static void virtio_fetch_response(struct scmi_chan_info *cinfo,546struct scmi_xfer *xfer)547{548struct scmi_vio_msg *msg = xfer->priv;549550if (msg)551core->msg->fetch_response(msg->input, msg->rx_len, xfer);552}553554static void virtio_fetch_notification(struct scmi_chan_info *cinfo,555size_t max_len, struct scmi_xfer *xfer)556{557struct scmi_vio_msg *msg = xfer->priv;558559if (msg)560core->msg->fetch_notification(msg->input, msg->rx_len,561max_len, xfer);562}563564/**565* virtio_mark_txdone - Mark transmission done566*567* Free only completed polling transfer messages.568*569* Note that in the SCMI VirtIO transport we never explicitly release still570* outstanding but timed-out messages by forcibly re-adding them to the571* free-list inside the TX code path; we instead let IRQ/RX callbacks, or the572* TX deferred worker, eventually clean up such messages once, finally, a late573* reply is received and discarded (if ever).574*575* This approach was deemed preferable since those pending timed-out buffers are576* still effectively owned by the SCMI platform VirtIO device even after timeout577* expiration: forcibly freeing and reusing them before they had been returned578* explicitly by the SCMI platform could lead to subtle bugs due to message579* corruption.580* An SCMI platform VirtIO device which never returns message buffers is581* anyway broken and it will quickly lead to exhaustion of available messages.582*583* For this same reason, here, we take care to free only the polled messages584* that had been somehow replied (only if not by chance already processed on the585* IRQ path - the initial scmi_vio_msg_release() takes care of this) and also586* any timed-out polled message if that indeed appears to have been at least587* dequeued from the virtqueues (VIO_MSG_POLL_DONE): this is needed since such588* messages won't be freed elsewhere. Any other polled message is marked as589* VIO_MSG_POLL_TIMEOUT.590*591* Possible late replies to timed-out polled messages will be eventually freed592* by RX callbacks if delivered on the IRQ path or by the deferred TX worker if593* dequeued on some other polling path.594*595* @cinfo: SCMI channel info596* @ret: Transmission return code597* @xfer: Transfer descriptor598*/599static void virtio_mark_txdone(struct scmi_chan_info *cinfo, int ret,600struct scmi_xfer *xfer)601{602unsigned long flags;603struct scmi_vio_channel *vioch = cinfo->transport_info;604struct scmi_vio_msg *msg = xfer->priv;605606if (!msg || !scmi_vio_channel_acquire(vioch))607return;608609/* Ensure msg is unbound from xfer anyway at this point */610smp_store_mb(xfer->priv, NULL);611612/* Must be a polled xfer and not already freed on the IRQ path */613if (!xfer->hdr.poll_completion || scmi_vio_msg_release(vioch, msg)) {614scmi_vio_channel_release(vioch);615return;616}617618spin_lock_irqsave(&msg->poll_lock, flags);619/* Do not free timedout polled messages only if still inflight */620if (ret != -ETIMEDOUT || msg->poll_status == VIO_MSG_POLL_DONE)621scmi_vio_msg_release(vioch, msg);622else if (msg->poll_status == VIO_MSG_POLLING)623msg->poll_status = VIO_MSG_POLL_TIMEOUT;624spin_unlock_irqrestore(&msg->poll_lock, flags);625626scmi_vio_channel_release(vioch);627}628629/**630* virtio_poll_done - Provide polling support for VirtIO transport631*632* @cinfo: SCMI channel info633* @xfer: Reference to the transfer being poll for.634*635* VirtIO core provides a polling mechanism based only on last used indexes:636* this means that it is possible to poll the virtqueues waiting for something637* new to arrive from the host side, but the only way to check if the freshly638* arrived buffer was indeed what we were waiting for is to compare the newly639* arrived message descriptor with the one we are polling on.640*641* As a consequence it can happen to dequeue something different from the buffer642* we were poll-waiting for: if that is the case such early fetched buffers are643* then added to a the @pending_cmds_list list for later processing by a644* dedicated deferred worker.645*646* So, basically, once something new is spotted we proceed to de-queue all the647* freshly received used buffers until we found the one we were polling on, or,648* we have 'seemingly' emptied the virtqueue; if some buffers are still pending649* in the vqueue at the end of the polling loop (possible due to inherent races650* in virtqueues handling mechanisms), we similarly kick the deferred worker651* and let it process those, to avoid indefinitely looping in the .poll_done652* busy-waiting helper.653*654* Finally, we delegate to the deferred worker also the final free of any timed655* out reply to a polled message that we should dequeue.656*657* Note that, since we do NOT have per-message suppress notification mechanism,658* the message we are polling for could be alternatively delivered via usual659* IRQs callbacks on another core which happened to have IRQs enabled while we660* are actively polling for it here: in such a case it will be handled as such661* by rx_callback() and the polling loop in the SCMI Core TX path will be662* transparently terminated anyway.663*664* Return: True once polling has successfully completed.665*/666static bool virtio_poll_done(struct scmi_chan_info *cinfo,667struct scmi_xfer *xfer)668{669bool pending, found = false;670unsigned int length, any_prefetched = 0;671unsigned long flags;672struct scmi_vio_msg *next_msg, *msg = xfer->priv;673struct scmi_vio_channel *vioch = cinfo->transport_info;674675if (!msg)676return true;677678/*679* Processed already by other polling loop on another CPU ?680*681* Note that this message is acquired on the poll path so cannot vanish682* while inside this loop iteration even if concurrently processed on683* the IRQ path.684*685* Avoid to acquire poll_lock since polled_status can be changed686* in a relevant manner only later in this same thread of execution:687* any other possible changes made concurrently by other polling loops688* or by a reply delivered on the IRQ path have no meaningful impact on689* this loop iteration: in other words it is harmless to allow this690* possible race but let has avoid spinlocking with irqs off in this691* initial part of the polling loop.692*/693if (msg->poll_status == VIO_MSG_POLL_DONE)694return true;695696if (!scmi_vio_channel_acquire(vioch))697return true;698699/* Has cmdq index moved at all ? */700pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);701if (!pending) {702scmi_vio_channel_release(vioch);703return false;704}705706spin_lock_irqsave(&vioch->lock, flags);707virtqueue_disable_cb(vioch->vqueue);708709/*710* Process all new messages till the polled-for message is found OR711* the vqueue is empty.712*/713while ((next_msg = virtqueue_get_buf(vioch->vqueue, &length))) {714bool next_msg_done = false;715716/*717* Mark any dequeued buffer message as VIO_MSG_POLL_DONE so718* that can be properly freed even on timeout in mark_txdone.719*/720spin_lock(&next_msg->poll_lock);721if (next_msg->poll_status == VIO_MSG_POLLING) {722next_msg->poll_status = VIO_MSG_POLL_DONE;723next_msg_done = true;724}725spin_unlock(&next_msg->poll_lock);726727next_msg->rx_len = length;728/* Is the message we were polling for ? */729if (next_msg == msg) {730found = true;731break;732} else if (next_msg_done) {733/* Skip the rest if this was another polled msg */734continue;735}736737/*738* Enqueue for later processing any non-polled message and any739* timed-out polled one that we happen to have dequeued.740*/741spin_lock(&next_msg->poll_lock);742if (next_msg->poll_status == VIO_MSG_NOT_POLLED ||743next_msg->poll_status == VIO_MSG_POLL_TIMEOUT) {744spin_unlock(&next_msg->poll_lock);745746any_prefetched++;747spin_lock(&vioch->pending_lock);748list_add_tail(&next_msg->list,749&vioch->pending_cmds_list);750spin_unlock(&vioch->pending_lock);751} else {752spin_unlock(&next_msg->poll_lock);753}754}755756/*757* When the polling loop has successfully terminated if something758* else was queued in the meantime, it will be served by a deferred759* worker OR by the normal IRQ/callback OR by other poll loops.760*761* If we are still looking for the polled reply, the polling index has762* to be updated to the current vqueue last used index.763*/764if (found) {765pending = !virtqueue_enable_cb(vioch->vqueue);766} else {767msg->poll_idx = virtqueue_enable_cb_prepare(vioch->vqueue);768pending = virtqueue_poll(vioch->vqueue, msg->poll_idx);769}770771if (vioch->deferred_tx_wq && (any_prefetched || pending))772queue_work(vioch->deferred_tx_wq, &vioch->deferred_tx_work);773774spin_unlock_irqrestore(&vioch->lock, flags);775776scmi_vio_channel_release(vioch);777778return found;779}780781static const struct scmi_transport_ops scmi_virtio_ops = {782.chan_available = virtio_chan_available,783.chan_setup = virtio_chan_setup,784.chan_free = virtio_chan_free,785.get_max_msg = virtio_get_max_msg,786.send_message = virtio_send_message,787.fetch_response = virtio_fetch_response,788.fetch_notification = virtio_fetch_notification,789.mark_txdone = virtio_mark_txdone,790.poll_done = virtio_poll_done,791};792793static struct scmi_desc scmi_virtio_desc = {794.ops = &scmi_virtio_ops,795/* for non-realtime virtio devices */796.max_rx_timeout_ms = VIRTIO_MAX_RX_TIMEOUT_MS,797.max_msg = 0, /* overridden by virtio_get_max_msg() */798.max_msg_size = VIRTIO_SCMI_MAX_MSG_SIZE,799.atomic_enabled = IS_ENABLED(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE),800};801802static const struct of_device_id scmi_of_match[] = {803{ .compatible = "arm,scmi-virtio" },804{ /* Sentinel */ },805};806807DEFINE_SCMI_TRANSPORT_DRIVER(scmi_virtio, scmi_virtio_driver, scmi_virtio_desc,808scmi_of_match, core);809810static int scmi_vio_probe(struct virtio_device *vdev)811{812struct device *dev = &vdev->dev;813struct scmi_vio_channel *channels;814bool have_vq_rx;815int vq_cnt;816int i;817int ret;818struct virtqueue *vqs[VIRTIO_SCMI_VQ_MAX_CNT];819820/* Only one SCMI VirtiO device allowed */821if (scmi_vdev) {822dev_err(dev,823"One SCMI Virtio device was already initialized: only one allowed.\n");824return -EBUSY;825}826827have_vq_rx = scmi_vio_have_vq_rx(vdev);828vq_cnt = have_vq_rx ? VIRTIO_SCMI_VQ_MAX_CNT : 1;829830channels = devm_kcalloc(dev, vq_cnt, sizeof(*channels), GFP_KERNEL);831if (!channels)832return -ENOMEM;833834if (have_vq_rx)835channels[VIRTIO_SCMI_VQ_RX].is_rx = true;836837ret = virtio_find_vqs(vdev, vq_cnt, vqs, scmi_vio_vqs_info, NULL);838if (ret) {839dev_err(dev, "Failed to get %d virtqueue(s)\n", vq_cnt);840return ret;841}842843for (i = 0; i < vq_cnt; i++) {844unsigned int sz;845846spin_lock_init(&channels[i].lock);847spin_lock_init(&channels[i].free_lock);848INIT_LIST_HEAD(&channels[i].free_list);849spin_lock_init(&channels[i].pending_lock);850INIT_LIST_HEAD(&channels[i].pending_cmds_list);851channels[i].vqueue = vqs[i];852853sz = virtqueue_get_vring_size(channels[i].vqueue);854/* Tx messages need multiple descriptors. */855if (!channels[i].is_rx)856sz /= DESCRIPTORS_PER_TX_MSG;857858if (sz > MSG_TOKEN_MAX) {859dev_info(dev,860"%s virtqueue could hold %d messages. Only %ld allowed to be pending.\n",861channels[i].is_rx ? "rx" : "tx",862sz, MSG_TOKEN_MAX);863sz = MSG_TOKEN_MAX;864}865channels[i].max_msg = sz;866}867868vdev->priv = channels;869870/* Ensure initialized scmi_vdev is visible */871smp_store_mb(scmi_vdev, vdev);872873ret = platform_driver_register(&scmi_virtio_driver);874if (ret) {875vdev->priv = NULL;876vdev->config->del_vqs(vdev);877/* Ensure NULLified scmi_vdev is visible */878smp_store_mb(scmi_vdev, NULL);879880return ret;881}882883return 0;884}885886static void scmi_vio_remove(struct virtio_device *vdev)887{888platform_driver_unregister(&scmi_virtio_driver);889890/*891* Once we get here, virtio_chan_free() will have already been called by892* the SCMI core for any existing channel and, as a consequence, all the893* virtio channels will have been already marked NOT ready, causing any894* outstanding message on any vqueue to be ignored by complete_cb: now895* we can just stop processing buffers and destroy the vqueues.896*/897virtio_reset_device(vdev);898vdev->config->del_vqs(vdev);899/* Ensure scmi_vdev is visible as NULL */900smp_store_mb(scmi_vdev, NULL);901}902903static int scmi_vio_validate(struct virtio_device *vdev)904{905#ifdef CONFIG_ARM_SCMI_TRANSPORT_VIRTIO_VERSION1_COMPLIANCE906if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {907dev_err(&vdev->dev,908"device does not comply with spec version 1.x\n");909return -EINVAL;910}911#endif912return 0;913}914915static unsigned int features[] = {916VIRTIO_SCMI_F_P2A_CHANNELS,917};918919static const struct virtio_device_id id_table[] = {920{ VIRTIO_ID_SCMI, VIRTIO_DEV_ANY_ID },921{ 0 }922};923MODULE_DEVICE_TABLE(virtio, id_table);924925static struct virtio_driver virtio_scmi_driver = {926.driver.name = "scmi-virtio",927.feature_table = features,928.feature_table_size = ARRAY_SIZE(features),929.id_table = id_table,930.probe = scmi_vio_probe,931.remove = scmi_vio_remove,932.validate = scmi_vio_validate,933};934935module_virtio_driver(virtio_scmi_driver);936937MODULE_AUTHOR("Igor Skalkin <[email protected]>");938MODULE_AUTHOR("Peter Hilber <[email protected]>");939MODULE_AUTHOR("Cristian Marussi <[email protected]>");940MODULE_DESCRIPTION("SCMI VirtIO Transport driver");941MODULE_LICENSE("GPL");942943944