// SPDX-License-Identifier: GPL-2.01/*2* System Control and Management Interface (SCMI) Notification support3*4* Copyright (C) 2020-2021 ARM Ltd.5*/6/**7* DOC: Theory of operation8*9* SCMI Protocol specification allows the platform to signal events to10* interested agents via notification messages: this is an implementation11* of the dispatch and delivery of such notifications to the interested users12* inside the Linux kernel.13*14* An SCMI Notification core instance is initialized for each active platform15* instance identified by the means of the usual &struct scmi_handle.16*17* Each SCMI Protocol implementation, during its initialization, registers with18* this core its set of supported events using scmi_register_protocol_events():19* all the needed descriptors are stored in the &struct registered_protocols and20* &struct registered_events arrays.21*22* Kernel users interested in some specific event can register their callbacks23* providing the usual notifier_block descriptor, since this core implements24* events' delivery using the standard Kernel notification chains machinery.25*26* Given the number of possible events defined by SCMI and the extensibility27* of the SCMI Protocol itself, the underlying notification chains are created28* and destroyed dynamically on demand depending on the number of users29* effectively registered for an event, so that no support structures or chains30* are allocated until at least one user has registered a notifier_block for31* such event. Similarly, events' generation itself is enabled at the platform32* level only after at least one user has registered, and it is shutdown after33* the last user for that event has gone.34*35* All users provided callbacks and allocated notification-chains are stored in36* the @registered_events_handlers hashtable. Callbacks' registration requests37* for still to be registered events are instead kept in the dedicated common38* hashtable @pending_events_handlers.39*40* An event is identified univocally by the tuple (proto_id, evt_id, src_id)41* and is served by its own dedicated notification chain; information contained42* in such tuples is used, in a few different ways, to generate the needed43* hash-keys.44*45* Here proto_id and evt_id are simply the protocol_id and message_id numbers46* as described in the SCMI Protocol specification, while src_id represents an47* optional, protocol dependent, source identifier (like domain_id, perf_id48* or sensor_id and so forth).49*50* Upon reception of a notification message from the platform the SCMI RX ISR51* passes the received message payload and some ancillary information (including52* an arrival timestamp in nanoseconds) to the core via @scmi_notify() which53* pushes the event-data itself on a protocol-dedicated kfifo queue for further54* deferred processing as specified in @scmi_events_dispatcher().55*56* Each protocol has it own dedicated work_struct and worker which, once kicked57* by the ISR, takes care to empty its own dedicated queue, deliverying the58* queued items into the proper notification-chain: notifications processing can59* proceed concurrently on distinct workers only between events belonging to60* different protocols while delivery of events within the same protocol is61* still strictly sequentially ordered by time of arrival.62*63* Events' information is then extracted from the SCMI Notification messages and64* conveyed, converted into a custom per-event report struct, as the void *data65* param to the user callback provided by the registered notifier_block, so that66* from the user perspective his callback will look invoked like:67*68* int user_cb(struct notifier_block *nb, unsigned long event_id, void *report)69*70*/7172#define dev_fmt(fmt) "SCMI Notifications - " fmt73#define pr_fmt(fmt) "SCMI Notifications - " fmt7475#include <linux/bitfield.h>76#include <linux/bug.h>77#include <linux/compiler.h>78#include <linux/device.h>79#include <linux/err.h>80#include <linux/hashtable.h>81#include <linux/kernel.h>82#include <linux/ktime.h>83#include <linux/kfifo.h>84#include <linux/list.h>85#include <linux/mutex.h>86#include <linux/notifier.h>87#include <linux/refcount.h>88#include <linux/scmi_protocol.h>89#include <linux/slab.h>90#include <linux/types.h>91#include <linux/workqueue.h>9293#include "common.h"94#include "notify.h"9596#define SCMI_MAX_PROTO 2569798#define PROTO_ID_MASK GENMASK(31, 24)99#define EVT_ID_MASK GENMASK(23, 16)100#define SRC_ID_MASK GENMASK(15, 0)101#define NOTIF_UNSUPP -1102103/*104* Builds an unsigned 32bit key from the given input tuple to be used105* as a key in hashtables.106*/107#define MAKE_HASH_KEY(p, e, s) \108(FIELD_PREP(PROTO_ID_MASK, (p)) | \109FIELD_PREP(EVT_ID_MASK, (e)) | \110FIELD_PREP(SRC_ID_MASK, (s)))111112#define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK)113114/*115* Assumes that the stored obj includes its own hash-key in a field named 'key':116* with this simplification this macro can be equally used for all the objects'117* types hashed by this implementation.118*119* @__ht: The hashtable name120* @__obj: A pointer to the object type to be retrieved from the hashtable;121* it will be used as a cursor while scanning the hastable and it will122* be possibly left as NULL when @__k is not found123* @__k: The key to search for124*/125#define KEY_FIND(__ht, __obj, __k) \126({ \127typeof(__k) k_ = __k; \128typeof(__obj) obj_; \129\130hash_for_each_possible((__ht), obj_, hash, k_) \131if (obj_->key == k_) \132break; \133__obj = obj_; \134})135136#define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key))137#define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key))138#define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key))139140/*141* A set of macros used to access safely @registered_protocols and142* @registered_events arrays; these are fixed in size and each entry is possibly143* populated at protocols' registration time and then only read but NEVER144* modified or removed.145*/146#define SCMI_GET_PROTO(__ni, __pid) \147({ \148typeof(__ni) ni_ = __ni; \149struct scmi_registered_events_desc *__pd = NULL; \150\151if (ni_) \152__pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \153__pd; \154})155156#define SCMI_GET_REVT_FROM_PD(__pd, __eid) \157({ \158typeof(__pd) pd_ = __pd; \159typeof(__eid) eid_ = __eid; \160struct scmi_registered_event *__revt = NULL; \161\162if (pd_ && eid_ < pd_->num_events) \163__revt = READ_ONCE(pd_->registered_events[eid_]); \164__revt; \165})166167#define SCMI_GET_REVT(__ni, __pid, __eid) \168({ \169struct scmi_registered_event *__revt; \170struct scmi_registered_events_desc *__pd; \171\172__pd = SCMI_GET_PROTO((__ni), (__pid)); \173__revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \174__revt; \175})176177/* A couple of utility macros to limit cruft when calling protocols' helpers */178#define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \179({ \180typeof(revt) r = revt; \181r->proto->ops->set_notify_enabled(r->proto->ph, \182(eid), (sid), (state)); \183})184185#define REVT_NOTIFY_ENABLE(revt, eid, sid) \186REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true)187188#define REVT_NOTIFY_DISABLE(revt, eid, sid) \189REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false)190191#define REVT_FILL_REPORT(revt, ...) \192({ \193typeof(revt) r = revt; \194r->proto->ops->fill_custom_report(r->proto->ph, \195__VA_ARGS__); \196})197198#define SCMI_PENDING_HASH_SZ 4199#define SCMI_REGISTERED_HASH_SZ 6200201struct scmi_registered_events_desc;202203/**204* struct scmi_notify_instance - Represents an instance of the notification205* core206* @gid: GroupID used for devres207* @handle: A reference to the platform instance208* @init_work: A work item to perform final initializations of pending handlers209* @notify_wq: A reference to the allocated Kernel cmwq210* @pending_mtx: A mutex to protect @pending_events_handlers211* @registered_protocols: A statically allocated array containing pointers to212* all the registered protocol-level specific information213* related to events' handling214* @pending_events_handlers: An hashtable containing all pending events'215* handlers descriptors216*217* Each platform instance, represented by a handle, has its own instance of218* the notification subsystem represented by this structure.219*/220struct scmi_notify_instance {221void *gid;222struct scmi_handle *handle;223struct work_struct init_work;224struct workqueue_struct *notify_wq;225/* lock to protect pending_events_handlers */226struct mutex pending_mtx;227struct scmi_registered_events_desc **registered_protocols;228DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ);229};230231/**232* struct events_queue - Describes a queue and its associated worker233* @sz: Size in bytes of the related kfifo234* @kfifo: A dedicated Kernel kfifo descriptor235* @notify_work: A custom work item bound to this queue236* @wq: A reference to the associated workqueue237*238* Each protocol has its own dedicated events_queue descriptor.239*/240struct events_queue {241size_t sz;242struct kfifo kfifo;243struct work_struct notify_work;244struct workqueue_struct *wq;245};246247/**248* struct scmi_event_header - A utility header249* @timestamp: The timestamp, in nanoseconds (boottime), which was associated250* to this event as soon as it entered the SCMI RX ISR251* @payld_sz: Effective size of the embedded message payload which follows252* @evt_id: Event ID (corresponds to the Event MsgID for this Protocol)253* @payld: A reference to the embedded event payload254*255* This header is prepended to each received event message payload before256* queueing it on the related &struct events_queue.257*/258struct scmi_event_header {259ktime_t timestamp;260size_t payld_sz;261unsigned char evt_id;262unsigned char payld[];263};264265struct scmi_registered_event;266267/**268* struct scmi_registered_events_desc - Protocol Specific information269* @id: Protocol ID270* @ops: Protocol specific and event-related operations271* @equeue: The embedded per-protocol events_queue272* @ni: A reference to the initialized instance descriptor273* @eh: A reference to pre-allocated buffer to be used as a scratch area by the274* deferred worker when fetching data from the kfifo275* @eh_sz: Size of the pre-allocated buffer @eh276* @in_flight: A reference to an in flight &struct scmi_registered_event277* @num_events: Number of events in @registered_events278* @registered_events: A dynamically allocated array holding all the registered279* events' descriptors, whose fixed-size is determined at280* compile time.281* @registered_mtx: A mutex to protect @registered_events_handlers282* @ph: SCMI protocol handle reference283* @registered_events_handlers: An hashtable containing all events' handlers284* descriptors registered for this protocol285*286* All protocols that register at least one event have their protocol-specific287* information stored here, together with the embedded allocated events_queue.288* These descriptors are stored in the @registered_protocols array at protocol289* registration time.290*291* Once these descriptors are successfully registered, they are NEVER again292* removed or modified since protocols do not unregister ever, so that, once293* we safely grab a NON-NULL reference from the array we can keep it and use it.294*/295struct scmi_registered_events_desc {296u8 id;297const struct scmi_event_ops *ops;298struct events_queue equeue;299struct scmi_notify_instance *ni;300struct scmi_event_header *eh;301size_t eh_sz;302void *in_flight;303int num_events;304struct scmi_registered_event **registered_events;305/* mutex to protect registered_events_handlers */306struct mutex registered_mtx;307const struct scmi_protocol_handle *ph;308DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ);309};310311/**312* struct scmi_registered_event - Event Specific Information313* @proto: A reference to the associated protocol descriptor314* @evt: A reference to the associated event descriptor (as provided at315* registration time)316* @report: A pre-allocated buffer used by the deferred worker to fill a317* customized event report318* @num_sources: The number of possible sources for this event as stated at319* events' registration time320* @not_supported_by_platform: A flag to indicate that not even one source was321* found to be supported by the platform for this322* event323* @sources: A reference to a dynamically allocated array used to refcount the324* events' enable requests for all the existing sources325* @sources_mtx: A mutex to serialize the access to @sources326*327* All registered events are represented by one of these structures that are328* stored in the @registered_events array at protocol registration time.329*330* Once these descriptors are successfully registered, they are NEVER again331* removed or modified since protocols do not unregister ever, so that once we332* safely grab a NON-NULL reference from the table we can keep it and use it.333*/334struct scmi_registered_event {335struct scmi_registered_events_desc *proto;336const struct scmi_event *evt;337void *report;338u32 num_sources;339bool not_supported_by_platform;340refcount_t *sources;341/* locking to serialize the access to sources */342struct mutex sources_mtx;343};344345/**346* struct scmi_event_handler - Event handler information347* @key: The used hashkey348* @users: A reference count for number of active users for this handler349* @r_evt: A reference to the associated registered event; when this is NULL350* this handler is pending, which means that identifies a set of351* callbacks intended to be attached to an event which is still not352* known nor registered by any protocol at that point in time353* @chain: The notification chain dedicated to this specific event tuple354* @hash: The hlist_node used for collision handling355* @enabled: A boolean which records if event's generation has been already356* enabled for this handler as a whole357*358* This structure collects all the information needed to process a received359* event identified by the tuple (proto_id, evt_id, src_id).360* These descriptors are stored in a per-protocol @registered_events_handlers361* table using as a key a value derived from that tuple.362*/363struct scmi_event_handler {364u32 key;365refcount_t users;366struct scmi_registered_event *r_evt;367struct blocking_notifier_head chain;368struct hlist_node hash;369bool enabled;370};371372#define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt)373374static struct scmi_event_handler *375scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key);376static void scmi_put_active_handler(struct scmi_notify_instance *ni,377struct scmi_event_handler *hndl);378static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,379struct scmi_event_handler *hndl);380381/**382* scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it383* @ni: A reference to the notification instance to use384* @evt_key: The key to use to lookup the related notification chain385* @report: The customized event-specific report to pass down to the callbacks386* as their *data parameter.387*/388static inline void389scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni,390u32 evt_key, void *report)391{392int ret;393struct scmi_event_handler *hndl;394395/*396* Here ensure the event handler cannot vanish while using it.397* It is legitimate, though, for an handler not to be found at all here,398* e.g. when it has been unregistered by the user after some events had399* already been queued.400*/401hndl = scmi_get_active_handler(ni, evt_key);402if (!hndl)403return;404405ret = blocking_notifier_call_chain(&hndl->chain,406KEY_XTRACT_EVT_ID(evt_key),407report);408/* Notifiers are NOT supposed to cut the chain ... */409WARN_ON_ONCE(ret & NOTIFY_STOP_MASK);410411scmi_put_active_handler(ni, hndl);412}413414/**415* scmi_process_event_header() - Dequeue and process an event header416* @eq: The queue to use417* @pd: The protocol descriptor to use418*419* Read an event header from the protocol queue into the dedicated scratch420* buffer and looks for a matching registered event; in case an anomalously421* sized read is detected just flush the queue.422*423* Return:424* * a reference to the matching registered event when found425* * ERR_PTR(-EINVAL) when NO registered event could be found426* * NULL when the queue is empty427*/428static inline struct scmi_registered_event *429scmi_process_event_header(struct events_queue *eq,430struct scmi_registered_events_desc *pd)431{432unsigned int outs;433struct scmi_registered_event *r_evt;434435outs = kfifo_out(&eq->kfifo, pd->eh,436sizeof(struct scmi_event_header));437if (!outs)438return NULL;439if (outs != sizeof(struct scmi_event_header)) {440dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n");441kfifo_reset_out(&eq->kfifo);442return NULL;443}444445r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id);446if (!r_evt)447r_evt = ERR_PTR(-EINVAL);448449return r_evt;450}451452/**453* scmi_process_event_payload() - Dequeue and process an event payload454* @eq: The queue to use455* @pd: The protocol descriptor to use456* @r_evt: The registered event descriptor to use457*458* Read an event payload from the protocol queue into the dedicated scratch459* buffer, fills a custom report and then look for matching event handlers and460* call them; skip any unknown event (as marked by scmi_process_event_header())461* and in case an anomalously sized read is detected just flush the queue.462*463* Return: False when the queue is empty464*/465static inline bool466scmi_process_event_payload(struct events_queue *eq,467struct scmi_registered_events_desc *pd,468struct scmi_registered_event *r_evt)469{470u32 src_id, key;471unsigned int outs;472void *report = NULL;473474outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz);475if (!outs)476return false;477478/* Any in-flight event has now been officially processed */479pd->in_flight = NULL;480481if (outs != pd->eh->payld_sz) {482dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n");483kfifo_reset_out(&eq->kfifo);484return false;485}486487if (IS_ERR(r_evt)) {488dev_warn(pd->ni->handle->dev,489"SKIP UNKNOWN EVT - proto:%X evt:%d\n",490pd->id, pd->eh->evt_id);491return true;492}493494report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp,495pd->eh->payld, pd->eh->payld_sz,496r_evt->report, &src_id);497if (!report) {498dev_err(pd->ni->handle->dev,499"report not available - proto:%X evt:%d\n",500pd->id, pd->eh->evt_id);501return true;502}503504/* At first search for a generic ALL src_ids handler... */505key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id);506scmi_lookup_and_call_event_chain(pd->ni, key, report);507508/* ...then search for any specific src_id */509key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id);510scmi_lookup_and_call_event_chain(pd->ni, key, report);511512return true;513}514515/**516* scmi_events_dispatcher() - Common worker logic for all work items.517* @work: The work item to use, which is associated to a dedicated events_queue518*519* Logic:520* 1. dequeue one pending RX notification (queued in SCMI RX ISR context)521* 2. generate a custom event report from the received event message522* 3. lookup for any registered ALL_SRC_IDs handler:523* - > call the related notification chain passing in the report524* 4. lookup for any registered specific SRC_ID handler:525* - > call the related notification chain passing in the report526*527* Note that:528* * a dedicated per-protocol kfifo queue is used: in this way an anomalous529* flood of events cannot saturate other protocols' queues.530* * each per-protocol queue is associated to a distinct work_item, which531* means, in turn, that:532* + all protocols can process their dedicated queues concurrently533* (since notify_wq:max_active != 1)534* + anyway at most one worker instance is allowed to run on the same queue535* concurrently: this ensures that we can have only one concurrent536* reader/writer on the associated kfifo, so that we can use it lock-less537*538* Context: Process context.539*/540static void scmi_events_dispatcher(struct work_struct *work)541{542struct events_queue *eq;543struct scmi_registered_events_desc *pd;544struct scmi_registered_event *r_evt;545546eq = container_of(work, struct events_queue, notify_work);547pd = container_of(eq, struct scmi_registered_events_desc, equeue);548/*549* In order to keep the queue lock-less and the number of memcopies550* to the bare minimum needed, the dispatcher accounts for the551* possibility of per-protocol in-flight events: i.e. an event whose552* reception could end up being split across two subsequent runs of this553* worker, first the header, then the payload.554*/555do {556if (!pd->in_flight) {557r_evt = scmi_process_event_header(eq, pd);558if (!r_evt)559break;560pd->in_flight = r_evt;561} else {562r_evt = pd->in_flight;563}564} while (scmi_process_event_payload(eq, pd, r_evt));565}566567/**568* scmi_notify() - Queues a notification for further deferred processing569* @handle: The handle identifying the platform instance from which the570* dispatched event is generated571* @proto_id: Protocol ID572* @evt_id: Event ID (msgID)573* @buf: Event Message Payload (without the header)574* @len: Event Message Payload size575* @ts: RX Timestamp in nanoseconds (boottime)576*577* Context: Called in interrupt context to queue a received event for578* deferred processing.579*580* Return: 0 on Success581*/582int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id,583const void *buf, size_t len, ktime_t ts)584{585struct scmi_registered_event *r_evt;586struct scmi_event_header eh;587struct scmi_notify_instance *ni;588589ni = scmi_notification_instance_data_get(handle);590if (!ni)591return 0;592593r_evt = SCMI_GET_REVT(ni, proto_id, evt_id);594if (!r_evt)595return -EINVAL;596597if (len > r_evt->evt->max_payld_sz) {598dev_err(handle->dev, "discard badly sized message\n");599return -EINVAL;600}601if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) {602dev_warn(handle->dev,603"queue full, dropping proto_id:%d evt_id:%d ts:%lld\n",604proto_id, evt_id, ktime_to_ns(ts));605return -ENOMEM;606}607608eh.timestamp = ts;609eh.evt_id = evt_id;610eh.payld_sz = len;611/*612* Header and payload are enqueued with two distinct kfifo_in() (so non613* atomic), but this situation is handled properly on the consumer side614* with in-flight events tracking.615*/616kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh));617kfifo_in(&r_evt->proto->equeue.kfifo, buf, len);618/*619* Don't care about return value here since we just want to ensure that620* a work is queued all the times whenever some items have been pushed621* on the kfifo:622* - if work was already queued it will simply fail to queue a new one623* since it is not needed624* - if work was not queued already it will be now, even in case work625* was in fact already running: this behavior avoids any possible race626* when this function pushes new items onto the kfifos after the627* related executing worker had already determined the kfifo to be628* empty and it was terminating.629*/630queue_work(r_evt->proto->equeue.wq,631&r_evt->proto->equeue.notify_work);632633return 0;634}635636/**637* scmi_kfifo_free() - Devres action helper to free the kfifo638* @kfifo: The kfifo to free639*/640static void scmi_kfifo_free(void *kfifo)641{642kfifo_free((struct kfifo *)kfifo);643}644645/**646* scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer647* @ni: A reference to the notification instance to use648* @equeue: The events_queue to initialize649* @sz: Size of the kfifo buffer to allocate650*651* Allocate a buffer for the kfifo and initialize it.652*653* Return: 0 on Success654*/655static int scmi_initialize_events_queue(struct scmi_notify_instance *ni,656struct events_queue *equeue, size_t sz)657{658int ret;659660if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL))661return -ENOMEM;662/* Size could have been roundup to power-of-two */663equeue->sz = kfifo_size(&equeue->kfifo);664665ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free,666&equeue->kfifo);667if (ret)668return ret;669670INIT_WORK(&equeue->notify_work, scmi_events_dispatcher);671equeue->wq = ni->notify_wq;672673return ret;674}675676/**677* scmi_allocate_registered_events_desc() - Allocate a registered events'678* descriptor679* @ni: A reference to the &struct scmi_notify_instance notification instance680* to use681* @proto_id: Protocol ID682* @queue_sz: Size of the associated queue to allocate683* @eh_sz: Size of the event header scratch area to pre-allocate684* @num_events: Number of events to support (size of @registered_events)685* @ops: Pointer to a struct holding references to protocol specific helpers686* needed during events handling687*688* It is supposed to be called only once for each protocol at protocol689* initialization time, so it warns if the requested protocol is found already690* registered.691*692* Return: The allocated and registered descriptor on Success693*/694static struct scmi_registered_events_desc *695scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni,696u8 proto_id, size_t queue_sz, size_t eh_sz,697int num_events,698const struct scmi_event_ops *ops)699{700int ret;701struct scmi_registered_events_desc *pd;702703/* Ensure protocols are up to date */704smp_rmb();705if (WARN_ON(ni->registered_protocols[proto_id]))706return ERR_PTR(-EINVAL);707708pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL);709if (!pd)710return ERR_PTR(-ENOMEM);711pd->id = proto_id;712pd->ops = ops;713pd->ni = ni;714715ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz);716if (ret)717return ERR_PTR(ret);718719pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL);720if (!pd->eh)721return ERR_PTR(-ENOMEM);722pd->eh_sz = eh_sz;723724pd->registered_events = devm_kcalloc(ni->handle->dev, num_events,725sizeof(char *), GFP_KERNEL);726if (!pd->registered_events)727return ERR_PTR(-ENOMEM);728pd->num_events = num_events;729730/* Initialize per protocol handlers table */731mutex_init(&pd->registered_mtx);732hash_init(pd->registered_events_handlers);733734return pd;735}736737/**738* scmi_register_protocol_events() - Register Protocol Events with the core739* @handle: The handle identifying the platform instance against which the740* protocol's events are registered741* @proto_id: Protocol ID742* @ph: SCMI protocol handle.743* @ee: A structure describing the events supported by this protocol.744*745* Used by SCMI Protocols initialization code to register with the notification746* core the list of supported events and their descriptors: takes care to747* pre-allocate and store all needed descriptors, scratch buffers and event748* queues.749*750* Return: 0 on Success751*/752int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,753const struct scmi_protocol_handle *ph,754const struct scmi_protocol_events *ee)755{756int i;757unsigned int num_sources;758size_t payld_sz = 0;759struct scmi_registered_events_desc *pd;760struct scmi_notify_instance *ni;761const struct scmi_event *evt;762763if (!ee || !ee->ops || !ee->evts || !ph ||764(!ee->num_sources && !ee->ops->get_num_sources))765return -EINVAL;766767ni = scmi_notification_instance_data_get(handle);768if (!ni)769return -ENOMEM;770771/* num_sources cannot be <= 0 */772if (ee->num_sources) {773num_sources = ee->num_sources;774} else {775int nsrc = ee->ops->get_num_sources(ph);776777if (nsrc <= 0)778return -EINVAL;779num_sources = nsrc;780}781782evt = ee->evts;783for (i = 0; i < ee->num_events; i++)784payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz);785payld_sz += sizeof(struct scmi_event_header);786787pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz,788payld_sz, ee->num_events,789ee->ops);790if (IS_ERR(pd))791return PTR_ERR(pd);792793pd->ph = ph;794for (i = 0; i < ee->num_events; i++, evt++) {795int id;796struct scmi_registered_event *r_evt;797798r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt),799GFP_KERNEL);800if (!r_evt)801return -ENOMEM;802r_evt->proto = pd;803r_evt->evt = evt;804805r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources,806sizeof(refcount_t), GFP_KERNEL);807if (!r_evt->sources)808return -ENOMEM;809r_evt->num_sources = num_sources;810mutex_init(&r_evt->sources_mtx);811812r_evt->report = devm_kzalloc(ni->handle->dev,813evt->max_report_sz, GFP_KERNEL);814if (!r_evt->report)815return -ENOMEM;816817if (ee->ops->is_notify_supported) {818int supported = 0;819820for (id = 0; id < r_evt->num_sources; id++) {821if (!ee->ops->is_notify_supported(ph, r_evt->evt->id, id))822refcount_set(&r_evt->sources[id], NOTIF_UNSUPP);823else824supported++;825}826827/* Not even one source has been found to be supported */828r_evt->not_supported_by_platform = !supported;829}830831pd->registered_events[i] = r_evt;832/* Ensure events are updated */833smp_wmb();834dev_dbg(handle->dev, "registered event - %lX\n",835MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id));836}837838/* Register protocol and events...it will never be removed */839ni->registered_protocols[proto_id] = pd;840/* Ensure protocols are updated */841smp_wmb();842843/*844* Finalize any pending events' handler which could have been waiting845* for this protocol's events registration.846*/847schedule_work(&ni->init_work);848849return 0;850}851852/**853* scmi_deregister_protocol_events - Deregister protocol events with the core854* @handle: The handle identifying the platform instance against which the855* protocol's events are registered856* @proto_id: Protocol ID857*/858void scmi_deregister_protocol_events(const struct scmi_handle *handle,859u8 proto_id)860{861struct scmi_notify_instance *ni;862struct scmi_registered_events_desc *pd;863864ni = scmi_notification_instance_data_get(handle);865if (!ni)866return;867868pd = ni->registered_protocols[proto_id];869if (!pd)870return;871872ni->registered_protocols[proto_id] = NULL;873/* Ensure protocols are updated */874smp_wmb();875876cancel_work_sync(&pd->equeue.notify_work);877}878879/**880* scmi_allocate_event_handler() - Allocate Event handler881* @ni: A reference to the notification instance to use882* @evt_key: 32bit key uniquely bind to the event identified by the tuple883* (proto_id, evt_id, src_id)884*885* Allocate an event handler and related notification chain associated with886* the provided event handler key.887* Note that, at this point, a related registered_event is still to be888* associated to this handler descriptor (hndl->r_evt == NULL), so the handler889* is initialized as pending.890*891* Context: Assumes to be called with @pending_mtx already acquired.892* Return: the freshly allocated structure on Success893*/894static struct scmi_event_handler *895scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key)896{897struct scmi_event_handler *hndl;898899hndl = kzalloc(sizeof(*hndl), GFP_KERNEL);900if (!hndl)901return NULL;902hndl->key = evt_key;903BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain);904refcount_set(&hndl->users, 1);905/* New handlers are created pending */906hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key);907908return hndl;909}910911/**912* scmi_free_event_handler() - Free the provided Event handler913* @hndl: The event handler structure to free914*915* Context: Assumes to be called with proper locking acquired depending916* on the situation.917*/918static void scmi_free_event_handler(struct scmi_event_handler *hndl)919{920hash_del(&hndl->hash);921kfree(hndl);922}923924/**925* scmi_bind_event_handler() - Helper to attempt binding an handler to an event926* @ni: A reference to the notification instance to use927* @hndl: The event handler to bind928*929* If an associated registered event is found, move the handler from the pending930* into the registered table.931*932* Context: Assumes to be called with @pending_mtx already acquired.933*934* Return: 0 on Success935*/936static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni,937struct scmi_event_handler *hndl)938{939struct scmi_registered_event *r_evt;940941r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key),942KEY_XTRACT_EVT_ID(hndl->key));943if (!r_evt)944return -EINVAL;945946/*947* Remove from pending and insert into registered while getting hold948* of protocol instance.949*/950hash_del(&hndl->hash);951952/* Bailout if event is not supported at all */953if (r_evt->not_supported_by_platform)954return -EOPNOTSUPP;955956/*957* Acquire protocols only for NON pending handlers, so as NOT to trigger958* protocol initialization when a notifier is registered against a still959* not registered protocol, since it would make little sense to force init960* protocols for which still no SCMI driver user exists: they wouldn't961* emit any event anyway till some SCMI driver starts using it.962*/963scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key));964hndl->r_evt = r_evt;965966mutex_lock(&r_evt->proto->registered_mtx);967hash_add(r_evt->proto->registered_events_handlers,968&hndl->hash, hndl->key);969mutex_unlock(&r_evt->proto->registered_mtx);970971return 0;972}973974/**975* scmi_valid_pending_handler() - Helper to check pending status of handlers976* @ni: A reference to the notification instance to use977* @hndl: The event handler to check978*979* An handler is considered pending when its r_evt == NULL, because the related980* event was still unknown at handler's registration time; anyway, since all981* protocols register their supported events once for all at protocols'982* initialization time, a pending handler cannot be considered valid anymore if983* the underlying event (which it is waiting for), belongs to an already984* initialized and registered protocol.985*986* Return: 0 on Success987*/988static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni,989struct scmi_event_handler *hndl)990{991struct scmi_registered_events_desc *pd;992993if (!IS_HNDL_PENDING(hndl))994return -EINVAL;995996pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key));997if (pd)998return -EINVAL;9991000return 0;1001}10021003/**1004* scmi_register_event_handler() - Register whenever possible an Event handler1005* @ni: A reference to the notification instance to use1006* @hndl: The event handler to register1007*1008* At first try to bind an event handler to its associated event, then check if1009* it was at least a valid pending handler: if it was not bound nor valid return1010* false.1011*1012* Valid pending incomplete bindings will be periodically retried by a dedicated1013* worker which is kicked each time a new protocol completes its own1014* registration phase.1015*1016* Context: Assumes to be called with @pending_mtx acquired.1017*1018* Return: 0 on Success1019*/1020static int scmi_register_event_handler(struct scmi_notify_instance *ni,1021struct scmi_event_handler *hndl)1022{1023int ret;10241025ret = scmi_bind_event_handler(ni, hndl);1026if (!ret) {1027dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n",1028hndl->key);1029} else {1030ret = scmi_valid_pending_handler(ni, hndl);1031if (!ret)1032dev_dbg(ni->handle->dev,1033"registered PENDING handler - key:%X\n",1034hndl->key);1035}10361037return ret;1038}10391040/**1041* __scmi_event_handler_get_ops() - Utility to get or create an event handler1042* @ni: A reference to the notification instance to use1043* @evt_key: The event key to use1044* @create: A boolean flag to specify if a handler must be created when1045* not already existent1046*1047* Search for the desired handler matching the key in both the per-protocol1048* registered table and the common pending table:1049* * if found adjust users refcount1050* * if not found and @create is true, create and register the new handler:1051* handler could end up being registered as pending if no matching event1052* could be found.1053*1054* An handler is guaranteed to reside in one and only one of the tables at1055* any one time; to ensure this the whole search and create is performed1056* holding the @pending_mtx lock, with @registered_mtx additionally acquired1057* if needed.1058*1059* Note that when a nested acquisition of these mutexes is needed the locking1060* order is always (same as in @init_work):1061* 1. pending_mtx1062* 2. registered_mtx1063*1064* Events generation is NOT enabled right after creation within this routine1065* since at creation time we usually want to have all setup and ready before1066* events really start flowing.1067*1068* Return: A properly refcounted handler on Success, NULL on Failure1069*/1070static inline struct scmi_event_handler *1071__scmi_event_handler_get_ops(struct scmi_notify_instance *ni,1072u32 evt_key, bool create)1073{1074struct scmi_registered_event *r_evt;1075struct scmi_event_handler *hndl = NULL;10761077r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),1078KEY_XTRACT_EVT_ID(evt_key));10791080if (r_evt && r_evt->not_supported_by_platform)1081return ERR_PTR(-EOPNOTSUPP);10821083mutex_lock(&ni->pending_mtx);1084/* Search registered events at first ... if possible at all */1085if (r_evt) {1086mutex_lock(&r_evt->proto->registered_mtx);1087hndl = KEY_FIND(r_evt->proto->registered_events_handlers,1088hndl, evt_key);1089if (hndl)1090refcount_inc(&hndl->users);1091mutex_unlock(&r_evt->proto->registered_mtx);1092}10931094/* ...then amongst pending. */1095if (!hndl) {1096hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key);1097if (hndl)1098refcount_inc(&hndl->users);1099}11001101/* Create if still not found and required */1102if (!hndl && create) {1103hndl = scmi_allocate_event_handler(ni, evt_key);1104if (hndl && scmi_register_event_handler(ni, hndl)) {1105dev_dbg(ni->handle->dev,1106"purging UNKNOWN handler - key:%X\n",1107hndl->key);1108/* this hndl can be only a pending one */1109scmi_put_handler_unlocked(ni, hndl);1110hndl = ERR_PTR(-EINVAL);1111}1112}1113mutex_unlock(&ni->pending_mtx);11141115return hndl;1116}11171118static struct scmi_event_handler *1119scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key)1120{1121return __scmi_event_handler_get_ops(ni, evt_key, false);1122}11231124static struct scmi_event_handler *1125scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key)1126{1127return __scmi_event_handler_get_ops(ni, evt_key, true);1128}11291130/**1131* scmi_get_active_handler() - Helper to get active handlers only1132* @ni: A reference to the notification instance to use1133* @evt_key: The event key to use1134*1135* Search for the desired handler matching the key only in the per-protocol1136* table of registered handlers: this is called only from the dispatching path1137* so want to be as quick as possible and do not care about pending.1138*1139* Return: A properly refcounted active handler1140*/1141static struct scmi_event_handler *1142scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key)1143{1144struct scmi_registered_event *r_evt;1145struct scmi_event_handler *hndl = NULL;11461147r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key),1148KEY_XTRACT_EVT_ID(evt_key));1149if (r_evt) {1150mutex_lock(&r_evt->proto->registered_mtx);1151hndl = KEY_FIND(r_evt->proto->registered_events_handlers,1152hndl, evt_key);1153if (hndl)1154refcount_inc(&hndl->users);1155mutex_unlock(&r_evt->proto->registered_mtx);1156}11571158return hndl;1159}11601161/**1162* __scmi_enable_evt() - Enable/disable events generation1163* @r_evt: The registered event to act upon1164* @src_id: The src_id to act upon1165* @enable: The action to perform: true->Enable, false->Disable1166*1167* Takes care of proper refcounting while performing enable/disable: handles1168* the special case of ALL sources requests by itself.1169* Returns successfully if at least one of the required src_id has been1170* successfully enabled/disabled.1171*1172* Return: 0 on Success1173*/1174static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt,1175u32 src_id, bool enable)1176{1177int retvals = 0;1178u32 num_sources;1179refcount_t *sid;11801181if (src_id == SRC_ID_MASK) {1182src_id = 0;1183num_sources = r_evt->num_sources;1184} else if (src_id < r_evt->num_sources) {1185num_sources = 1;1186} else {1187return -EINVAL;1188}11891190mutex_lock(&r_evt->sources_mtx);1191if (enable) {1192for (; num_sources; src_id++, num_sources--) {1193int ret = 0;11941195sid = &r_evt->sources[src_id];1196if (refcount_read(sid) == NOTIF_UNSUPP) {1197dev_dbg(r_evt->proto->ph->dev,1198"Notification NOT supported - proto_id:%d evt_id:%d src_id:%d",1199r_evt->proto->id, r_evt->evt->id,1200src_id);1201ret = -EOPNOTSUPP;1202} else if (refcount_read(sid) == 0) {1203ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id,1204src_id);1205if (!ret)1206refcount_set(sid, 1);1207} else {1208refcount_inc(sid);1209}1210retvals += !ret;1211}1212} else {1213for (; num_sources; src_id++, num_sources--) {1214sid = &r_evt->sources[src_id];1215if (refcount_read(sid) == NOTIF_UNSUPP)1216continue;1217if (refcount_dec_and_test(sid))1218REVT_NOTIFY_DISABLE(r_evt,1219r_evt->evt->id, src_id);1220}1221retvals = 1;1222}1223mutex_unlock(&r_evt->sources_mtx);12241225return retvals ? 0 : -EINVAL;1226}12271228static int scmi_enable_events(struct scmi_event_handler *hndl)1229{1230int ret = 0;12311232if (!hndl->enabled) {1233ret = __scmi_enable_evt(hndl->r_evt,1234KEY_XTRACT_SRC_ID(hndl->key), true);1235if (!ret)1236hndl->enabled = true;1237}12381239return ret;1240}12411242static int scmi_disable_events(struct scmi_event_handler *hndl)1243{1244int ret = 0;12451246if (hndl->enabled) {1247ret = __scmi_enable_evt(hndl->r_evt,1248KEY_XTRACT_SRC_ID(hndl->key), false);1249if (!ret)1250hndl->enabled = false;1251}12521253return ret;1254}12551256/**1257* scmi_put_handler_unlocked() - Put an event handler1258* @ni: A reference to the notification instance to use1259* @hndl: The event handler to act upon1260*1261* After having got exclusive access to the registered handlers hashtable,1262* update the refcount and if @hndl is no more in use by anyone:1263* * ask for events' generation disabling1264* * unregister and free the handler itself1265*1266* Context: Assumes all the proper locking has been managed by the caller.1267*1268* Return: True if handler was freed (users dropped to zero)1269*/1270static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni,1271struct scmi_event_handler *hndl)1272{1273bool freed = false;12741275if (refcount_dec_and_test(&hndl->users)) {1276if (!IS_HNDL_PENDING(hndl))1277scmi_disable_events(hndl);1278scmi_free_event_handler(hndl);1279freed = true;1280}12811282return freed;1283}12841285static void scmi_put_handler(struct scmi_notify_instance *ni,1286struct scmi_event_handler *hndl)1287{1288bool freed;1289u8 protocol_id;1290struct scmi_registered_event *r_evt = hndl->r_evt;12911292mutex_lock(&ni->pending_mtx);1293if (r_evt) {1294protocol_id = r_evt->proto->id;1295mutex_lock(&r_evt->proto->registered_mtx);1296}12971298freed = scmi_put_handler_unlocked(ni, hndl);12991300if (r_evt) {1301mutex_unlock(&r_evt->proto->registered_mtx);1302/*1303* Only registered handler acquired protocol; must be here1304* released only AFTER unlocking registered_mtx, since1305* releasing a protocol can trigger its de-initialization1306* (ie. including r_evt and registered_mtx)1307*/1308if (freed)1309scmi_protocol_release(ni->handle, protocol_id);1310}1311mutex_unlock(&ni->pending_mtx);1312}13131314static void scmi_put_active_handler(struct scmi_notify_instance *ni,1315struct scmi_event_handler *hndl)1316{1317bool freed;1318struct scmi_registered_event *r_evt = hndl->r_evt;1319u8 protocol_id = r_evt->proto->id;13201321mutex_lock(&r_evt->proto->registered_mtx);1322freed = scmi_put_handler_unlocked(ni, hndl);1323mutex_unlock(&r_evt->proto->registered_mtx);1324if (freed)1325scmi_protocol_release(ni->handle, protocol_id);1326}13271328/**1329* scmi_event_handler_enable_events() - Enable events associated to an handler1330* @hndl: The Event handler to act upon1331*1332* Return: 0 on Success1333*/1334static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl)1335{1336if (scmi_enable_events(hndl)) {1337pr_err("Failed to ENABLE events for key:%X !\n", hndl->key);1338return -EINVAL;1339}13401341return 0;1342}13431344/**1345* scmi_notifier_register() - Register a notifier_block for an event1346* @handle: The handle identifying the platform instance against which the1347* callback is registered1348* @proto_id: Protocol ID1349* @evt_id: Event ID1350* @src_id: Source ID, when NULL register for events coming form ALL possible1351* sources1352* @nb: A standard notifier block to register for the specified event1353*1354* Generic helper to register a notifier_block against a protocol event.1355*1356* A notifier_block @nb will be registered for each distinct event identified1357* by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain1358* so that:1359*1360* (proto_X, evt_Y, src_Z) --> chain_X_Y_Z1361*1362* @src_id meaning is protocol specific and identifies the origin of the event1363* (like domain_id, sensor_id and so forth).1364*1365* @src_id can be NULL to signify that the caller is interested in receiving1366* notifications from ALL the available sources for that protocol OR simply that1367* the protocol does not support distinct sources.1368*1369* As soon as one user for the specified tuple appears, an handler is created,1370* and that specific event's generation is enabled at the platform level, unless1371* an associated registered event is found missing, meaning that the needed1372* protocol is still to be initialized and the handler has just been registered1373* as still pending.1374*1375* Return: 0 on Success1376*/1377static int scmi_notifier_register(const struct scmi_handle *handle,1378u8 proto_id, u8 evt_id, const u32 *src_id,1379struct notifier_block *nb)1380{1381int ret = 0;1382u32 evt_key;1383struct scmi_event_handler *hndl;1384struct scmi_notify_instance *ni;13851386ni = scmi_notification_instance_data_get(handle);1387if (!ni)1388return -ENODEV;13891390evt_key = MAKE_HASH_KEY(proto_id, evt_id,1391src_id ? *src_id : SRC_ID_MASK);1392hndl = scmi_get_or_create_handler(ni, evt_key);1393if (IS_ERR(hndl))1394return PTR_ERR(hndl);13951396blocking_notifier_chain_register(&hndl->chain, nb);13971398/* Enable events for not pending handlers */1399if (!IS_HNDL_PENDING(hndl)) {1400ret = scmi_event_handler_enable_events(hndl);1401if (ret)1402scmi_put_handler(ni, hndl);1403}14041405return ret;1406}14071408/**1409* scmi_notifier_unregister() - Unregister a notifier_block for an event1410* @handle: The handle identifying the platform instance against which the1411* callback is unregistered1412* @proto_id: Protocol ID1413* @evt_id: Event ID1414* @src_id: Source ID1415* @nb: The notifier_block to unregister1416*1417* Takes care to unregister the provided @nb from the notification chain1418* associated to the specified event and, if there are no more users for the1419* event handler, frees also the associated event handler structures.1420* (this could possibly cause disabling of event's generation at platform level)1421*1422* Return: 0 on Success1423*/1424static int scmi_notifier_unregister(const struct scmi_handle *handle,1425u8 proto_id, u8 evt_id, const u32 *src_id,1426struct notifier_block *nb)1427{1428u32 evt_key;1429struct scmi_event_handler *hndl;1430struct scmi_notify_instance *ni;14311432ni = scmi_notification_instance_data_get(handle);1433if (!ni)1434return -ENODEV;14351436evt_key = MAKE_HASH_KEY(proto_id, evt_id,1437src_id ? *src_id : SRC_ID_MASK);1438hndl = scmi_get_handler(ni, evt_key);1439if (IS_ERR(hndl))1440return PTR_ERR(hndl);14411442/*1443* Note that this chain unregistration call is safe on its own1444* being internally protected by an rwsem.1445*/1446blocking_notifier_chain_unregister(&hndl->chain, nb);1447scmi_put_handler(ni, hndl);14481449/*1450* This balances the initial get issued in @scmi_notifier_register.1451* If this notifier_block happened to be the last known user callback1452* for this event, the handler is here freed and the event's generation1453* stopped.1454*1455* Note that, an ongoing concurrent lookup on the delivery workqueue1456* path could still hold the refcount to 1 even after this routine1457* completes: in such a case it will be the final put on the delivery1458* path which will finally free this unused handler.1459*/1460scmi_put_handler(ni, hndl);14611462return 0;1463}14641465struct scmi_notifier_devres {1466const struct scmi_handle *handle;1467u8 proto_id;1468u8 evt_id;1469u32 __src_id;1470u32 *src_id;1471struct notifier_block *nb;1472};14731474static void scmi_devm_release_notifier(struct device *dev, void *res)1475{1476struct scmi_notifier_devres *dres = res;14771478scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id,1479dres->src_id, dres->nb);1480}14811482/**1483* scmi_devm_notifier_register() - Managed registration of a notifier_block1484* for an event1485* @sdev: A reference to an scmi_device whose embedded struct device is to1486* be used for devres accounting.1487* @proto_id: Protocol ID1488* @evt_id: Event ID1489* @src_id: Source ID, when NULL register for events coming form ALL possible1490* sources1491* @nb: A standard notifier block to register for the specified event1492*1493* Generic devres managed helper to register a notifier_block against a1494* protocol event.1495*1496* Return: 0 on Success1497*/1498static int scmi_devm_notifier_register(struct scmi_device *sdev,1499u8 proto_id, u8 evt_id,1500const u32 *src_id,1501struct notifier_block *nb)1502{1503int ret;1504struct scmi_notifier_devres *dres;15051506dres = devres_alloc(scmi_devm_release_notifier,1507sizeof(*dres), GFP_KERNEL);1508if (!dres)1509return -ENOMEM;15101511ret = scmi_notifier_register(sdev->handle, proto_id,1512evt_id, src_id, nb);1513if (ret) {1514devres_free(dres);1515return ret;1516}15171518dres->handle = sdev->handle;1519dres->proto_id = proto_id;1520dres->evt_id = evt_id;1521dres->nb = nb;1522if (src_id) {1523dres->__src_id = *src_id;1524dres->src_id = &dres->__src_id;1525} else {1526dres->src_id = NULL;1527}1528devres_add(&sdev->dev, dres);15291530return ret;1531}15321533static int scmi_devm_notifier_match(struct device *dev, void *res, void *data)1534{1535struct scmi_notifier_devres *dres = res;1536struct notifier_block *nb = data;15371538if (WARN_ON(!dres || !nb))1539return 0;15401541return dres->nb == nb;1542}15431544/**1545* scmi_devm_notifier_unregister() - Managed un-registration of a1546* notifier_block for an event1547* @sdev: A reference to an scmi_device whose embedded struct device is to1548* be used for devres accounting.1549* @nb: A standard notifier block to register for the specified event1550*1551* Generic devres managed helper to explicitly un-register a notifier_block1552* against a protocol event, which was previously registered using the above1553* @scmi_devm_notifier_register.1554*1555* Return: 0 on Success1556*/1557static int scmi_devm_notifier_unregister(struct scmi_device *sdev,1558struct notifier_block *nb)1559{1560int ret;15611562ret = devres_release(&sdev->dev, scmi_devm_release_notifier,1563scmi_devm_notifier_match, nb);15641565WARN_ON(ret);15661567return ret;1568}15691570/**1571* scmi_protocols_late_init() - Worker for late initialization1572* @work: The work item to use associated to the proper SCMI instance1573*1574* This kicks in whenever a new protocol has completed its own registration via1575* scmi_register_protocol_events(): it is in charge of scanning the table of1576* pending handlers (registered by users while the related protocol was still1577* not initialized) and finalizing their initialization whenever possible;1578* invalid pending handlers are purged at this point in time.1579*/1580static void scmi_protocols_late_init(struct work_struct *work)1581{1582int bkt;1583struct scmi_event_handler *hndl;1584struct scmi_notify_instance *ni;1585struct hlist_node *tmp;15861587ni = container_of(work, struct scmi_notify_instance, init_work);15881589/* Ensure protocols and events are up to date */1590smp_rmb();15911592mutex_lock(&ni->pending_mtx);1593hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) {1594int ret;15951596ret = scmi_bind_event_handler(ni, hndl);1597if (!ret) {1598dev_dbg(ni->handle->dev,1599"finalized PENDING handler - key:%X\n",1600hndl->key);1601ret = scmi_event_handler_enable_events(hndl);1602if (ret) {1603dev_dbg(ni->handle->dev,1604"purging INVALID handler - key:%X\n",1605hndl->key);1606scmi_put_active_handler(ni, hndl);1607}1608} else {1609ret = scmi_valid_pending_handler(ni, hndl);1610if (ret) {1611dev_dbg(ni->handle->dev,1612"purging PENDING handler - key:%X\n",1613hndl->key);1614/* this hndl can be only a pending one */1615scmi_put_handler_unlocked(ni, hndl);1616}1617}1618}1619mutex_unlock(&ni->pending_mtx);1620}16211622/*1623* notify_ops are attached to the handle so that can be accessed1624* directly from an scmi_driver to register its own notifiers.1625*/1626static const struct scmi_notify_ops notify_ops = {1627.devm_event_notifier_register = scmi_devm_notifier_register,1628.devm_event_notifier_unregister = scmi_devm_notifier_unregister,1629.event_notifier_register = scmi_notifier_register,1630.event_notifier_unregister = scmi_notifier_unregister,1631};16321633/**1634* scmi_notification_init() - Initializes Notification Core Support1635* @handle: The handle identifying the platform instance to initialize1636*1637* This function lays out all the basic resources needed by the notification1638* core instance identified by the provided handle: once done, all of the1639* SCMI Protocols can register their events with the core during their own1640* initializations.1641*1642* Note that failing to initialize the core notifications support does not1643* cause the whole SCMI Protocols stack to fail its initialization.1644*1645* SCMI Notification Initialization happens in 2 steps:1646* * initialization: basic common allocations (this function)1647* * registration: protocols asynchronously come into life and registers their1648* own supported list of events with the core; this causes1649* further per-protocol allocations1650*1651* Any user's callback registration attempt, referring a still not registered1652* event, will be registered as pending and finalized later (if possible)1653* by scmi_protocols_late_init() work.1654* This allows for lazy initialization of SCMI Protocols due to late (or1655* missing) SCMI drivers' modules loading.1656*1657* Return: 0 on Success1658*/1659int scmi_notification_init(struct scmi_handle *handle)1660{1661void *gid;1662struct scmi_notify_instance *ni;16631664gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);1665if (!gid)1666return -ENOMEM;16671668ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL);1669if (!ni)1670goto err;16711672ni->gid = gid;1673ni->handle = handle;16741675ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,1676sizeof(char *), GFP_KERNEL);1677if (!ni->registered_protocols)1678goto err;16791680ni->notify_wq = alloc_workqueue(dev_name(handle->dev),1681WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,16820);1683if (!ni->notify_wq)1684goto err;16851686mutex_init(&ni->pending_mtx);1687hash_init(ni->pending_events_handlers);16881689INIT_WORK(&ni->init_work, scmi_protocols_late_init);16901691scmi_notification_instance_data_set(handle, ni);1692handle->notify_ops = ¬ify_ops;1693/* Ensure handle is up to date */1694smp_wmb();16951696dev_info(handle->dev, "Core Enabled.\n");16971698devres_close_group(handle->dev, ni->gid);16991700return 0;17011702err:1703dev_warn(handle->dev, "Initialization Failed.\n");1704devres_release_group(handle->dev, gid);1705return -ENOMEM;1706}17071708/**1709* scmi_notification_exit() - Shutdown and clean Notification core1710* @handle: The handle identifying the platform instance to shutdown1711*/1712void scmi_notification_exit(struct scmi_handle *handle)1713{1714struct scmi_notify_instance *ni;17151716ni = scmi_notification_instance_data_get(handle);1717if (!ni)1718return;1719scmi_notification_instance_data_set(handle, NULL);17201721/* Destroy while letting pending work complete */1722destroy_workqueue(ni->notify_wq);17231724devres_release_group(ni->handle->dev, ni->gid);1725}172617271728