Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/firmware/arm_scmi/driver.c
50346 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* System Control and Management Interface (SCMI) Message Protocol driver
4
*
5
* SCMI Message Protocol is used between the System Control Processor(SCP)
6
* and the Application Processors(AP). The Message Handling Unit(MHU)
7
* provides a mechanism for inter-processor communication between SCP's
8
* Cortex M3 and AP.
9
*
10
* SCP offers control and management of the core/cluster power states,
11
* various power domain DVFS including the core/cluster, certain system
12
* clocks configuration, thermal sensors and many others.
13
*
14
* Copyright (C) 2018-2025 ARM Ltd.
15
*/
16
17
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19
#include <linux/bitmap.h>
20
#include <linux/debugfs.h>
21
#include <linux/device.h>
22
#include <linux/export.h>
23
#include <linux/idr.h>
24
#include <linux/io.h>
25
#include <linux/io-64-nonatomic-hi-lo.h>
26
#include <linux/kernel.h>
27
#include <linux/kmod.h>
28
#include <linux/ktime.h>
29
#include <linux/hashtable.h>
30
#include <linux/list.h>
31
#include <linux/module.h>
32
#include <linux/of.h>
33
#include <linux/platform_device.h>
34
#include <linux/processor.h>
35
#include <linux/refcount.h>
36
#include <linux/slab.h>
37
#include <linux/xarray.h>
38
39
#include "common.h"
40
#include "notify.h"
41
#include "quirks.h"
42
43
#include "raw_mode.h"
44
45
#define CREATE_TRACE_POINTS
46
#include <trace/events/scmi.h>
47
48
#define SCMI_VENDOR_MODULE_ALIAS_FMT "scmi-protocol-0x%02x-%s"
49
50
static DEFINE_IDA(scmi_id);
51
52
static DEFINE_XARRAY(scmi_protocols);
53
54
/* List of all SCMI devices active in system */
55
static LIST_HEAD(scmi_list);
56
/* Protection for the entire list */
57
static DEFINE_MUTEX(scmi_list_mutex);
58
/* Track the unique id for the transfers for debug & profiling purpose */
59
static atomic_t transfer_last_id;
60
61
static struct dentry *scmi_top_dentry;
62
63
/**
64
* struct scmi_xfers_info - Structure to manage transfer information
65
*
66
* @xfer_alloc_table: Bitmap table for allocated messages.
67
* Index of this bitmap table is also used for message
68
* sequence identifier.
69
* @xfer_lock: Protection for message allocation
70
* @max_msg: Maximum number of messages that can be pending
71
* @free_xfers: A free list for available to use xfers. It is initialized with
72
* a number of xfers equal to the maximum allowed in-flight
73
* messages.
74
* @pending_xfers: An hashtable, indexed by msg_hdr.seq, used to keep all the
75
* currently in-flight messages.
76
*/
77
struct scmi_xfers_info {
78
unsigned long *xfer_alloc_table;
79
spinlock_t xfer_lock;
80
int max_msg;
81
struct hlist_head free_xfers;
82
DECLARE_HASHTABLE(pending_xfers, SCMI_PENDING_XFERS_HT_ORDER_SZ);
83
};
84
85
/**
86
* struct scmi_protocol_instance - Describe an initialized protocol instance.
87
* @handle: Reference to the SCMI handle associated to this protocol instance.
88
* @proto: A reference to the protocol descriptor.
89
* @gid: A reference for per-protocol devres management.
90
* @users: A refcount to track effective users of this protocol.
91
* @priv: Reference for optional protocol private data.
92
* @version: Protocol version supported by the platform as detected at runtime.
93
* @negotiated_version: When the platform supports a newer protocol version,
94
* the agent will try to negotiate with the platform the
95
* usage of the newest version known to it, since
96
* backward compatibility is NOT automatically assured.
97
* This field is NON-zero when a successful negotiation
98
* has completed.
99
* @ph: An embedded protocol handle that will be passed down to protocol
100
* initialization code to identify this instance.
101
*
102
* Each protocol is initialized independently once for each SCMI platform in
103
* which is defined by DT and implemented by the SCMI server fw.
104
*/
105
struct scmi_protocol_instance {
106
const struct scmi_handle *handle;
107
const struct scmi_protocol *proto;
108
void *gid;
109
refcount_t users;
110
void *priv;
111
unsigned int version;
112
unsigned int negotiated_version;
113
struct scmi_protocol_handle ph;
114
};
115
116
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
117
118
/**
119
* struct scmi_info - Structure representing a SCMI instance
120
*
121
* @id: A sequence number starting from zero identifying this instance
122
* @dev: Device pointer
123
* @desc: SoC description for this instance
124
* @version: SCMI revision information containing protocol version,
125
* implementation version and (sub-)vendor identification.
126
* @handle: Instance of SCMI handle to send to clients
127
* @tx_minfo: Universal Transmit Message management info
128
* @rx_minfo: Universal Receive Message management info
129
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
130
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
131
* @protocols: IDR for protocols' instance descriptors initialized for
132
* this SCMI instance: populated on protocol's first attempted
133
* usage.
134
* @protocols_mtx: A mutex to protect protocols instances initialization.
135
* @protocols_imp: List of protocols implemented, currently maximum of
136
* scmi_revision_info.num_protocols elements allocated by the
137
* base protocol
138
* @active_protocols: IDR storing device_nodes for protocols actually defined
139
* in the DT and confirmed as implemented by fw.
140
* @notify_priv: Pointer to private data structure specific to notifications.
141
* @node: List head
142
* @users: Number of users of this instance
143
* @bus_nb: A notifier to listen for device bind/unbind on the scmi bus
144
* @dev_req_nb: A notifier to listen for device request/unrequest on the scmi
145
* bus
146
* @devreq_mtx: A mutex to serialize device creation for this SCMI instance
147
* @dbg: A pointer to debugfs related data (if any)
148
* @raw: An opaque reference handle used by SCMI Raw mode.
149
*/
150
struct scmi_info {
151
int id;
152
struct device *dev;
153
const struct scmi_desc *desc;
154
struct scmi_revision_info version;
155
struct scmi_handle handle;
156
struct scmi_xfers_info tx_minfo;
157
struct scmi_xfers_info rx_minfo;
158
struct idr tx_idr;
159
struct idr rx_idr;
160
struct idr protocols;
161
/* Ensure mutual exclusive access to protocols instance array */
162
struct mutex protocols_mtx;
163
u8 *protocols_imp;
164
struct idr active_protocols;
165
void *notify_priv;
166
struct list_head node;
167
int users;
168
struct notifier_block bus_nb;
169
struct notifier_block dev_req_nb;
170
/* Serialize device creation process for this instance */
171
struct mutex devreq_mtx;
172
struct scmi_debug_info *dbg;
173
void *raw;
174
};
175
176
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
177
#define tx_minfo_to_scmi_info(h) container_of(h, struct scmi_info, tx_minfo)
178
#define bus_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, bus_nb)
179
#define req_nb_to_scmi_info(nb) container_of(nb, struct scmi_info, dev_req_nb)
180
181
static void scmi_rx_callback(struct scmi_chan_info *cinfo,
182
u32 msg_hdr, void *priv);
183
static void scmi_bad_message_trace(struct scmi_chan_info *cinfo,
184
u32 msg_hdr, enum scmi_bad_msg err);
185
186
static struct scmi_transport_core_operations scmi_trans_core_ops = {
187
.bad_message_trace = scmi_bad_message_trace,
188
.rx_callback = scmi_rx_callback,
189
};
190
191
static unsigned long
192
scmi_vendor_protocol_signature(unsigned int protocol_id, char *vendor_id,
193
char *sub_vendor_id, u32 impl_ver)
194
{
195
char *signature, *p;
196
unsigned long hash = 0;
197
198
/* vendor_id/sub_vendor_id guaranteed <= SCMI_SHORT_NAME_MAX_SIZE */
199
signature = kasprintf(GFP_KERNEL, "%02X|%s|%s|0x%08X", protocol_id,
200
vendor_id ?: "", sub_vendor_id ?: "", impl_ver);
201
if (!signature)
202
return 0;
203
204
p = signature;
205
while (*p)
206
hash = partial_name_hash(tolower(*p++), hash);
207
hash = end_name_hash(hash);
208
209
kfree(signature);
210
211
return hash;
212
}
213
214
static unsigned long
215
scmi_protocol_key_calculate(int protocol_id, char *vendor_id,
216
char *sub_vendor_id, u32 impl_ver)
217
{
218
if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
219
return protocol_id;
220
else
221
return scmi_vendor_protocol_signature(protocol_id, vendor_id,
222
sub_vendor_id, impl_ver);
223
}
224
225
static const struct scmi_protocol *
226
__scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
227
char *sub_vendor_id, u32 impl_ver)
228
{
229
unsigned long key;
230
struct scmi_protocol *proto = NULL;
231
232
key = scmi_protocol_key_calculate(protocol_id, vendor_id,
233
sub_vendor_id, impl_ver);
234
if (key)
235
proto = xa_load(&scmi_protocols, key);
236
237
return proto;
238
}
239
240
static const struct scmi_protocol *
241
scmi_vendor_protocol_lookup(int protocol_id, char *vendor_id,
242
char *sub_vendor_id, u32 impl_ver)
243
{
244
const struct scmi_protocol *proto = NULL;
245
246
/* Searching for closest match ...*/
247
proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
248
sub_vendor_id, impl_ver);
249
if (proto)
250
return proto;
251
252
/* Any match just on vendor/sub_vendor ? */
253
if (impl_ver) {
254
proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
255
sub_vendor_id, 0);
256
if (proto)
257
return proto;
258
}
259
260
/* Any match just on the vendor ? */
261
if (sub_vendor_id)
262
proto = __scmi_vendor_protocol_lookup(protocol_id, vendor_id,
263
NULL, 0);
264
return proto;
265
}
266
267
static const struct scmi_protocol *
268
scmi_vendor_protocol_get(int protocol_id, struct scmi_revision_info *version)
269
{
270
const struct scmi_protocol *proto;
271
272
proto = scmi_vendor_protocol_lookup(protocol_id, version->vendor_id,
273
version->sub_vendor_id,
274
version->impl_ver);
275
if (!proto) {
276
int ret;
277
278
pr_debug("Looking for '" SCMI_VENDOR_MODULE_ALIAS_FMT "'\n",
279
protocol_id, version->vendor_id);
280
281
/* Note that vendor_id is mandatory for vendor protocols */
282
ret = request_module(SCMI_VENDOR_MODULE_ALIAS_FMT,
283
protocol_id, version->vendor_id);
284
if (ret) {
285
pr_warn("Problem loading module for protocol 0x%x\n",
286
protocol_id);
287
return NULL;
288
}
289
290
/* Lookup again, once modules loaded */
291
proto = scmi_vendor_protocol_lookup(protocol_id,
292
version->vendor_id,
293
version->sub_vendor_id,
294
version->impl_ver);
295
}
296
297
if (proto)
298
pr_info("Loaded SCMI Vendor Protocol 0x%x - %s %s %X\n",
299
protocol_id, proto->vendor_id ?: "",
300
proto->sub_vendor_id ?: "", proto->impl_ver);
301
302
return proto;
303
}
304
305
static const struct scmi_protocol *
306
scmi_protocol_get(int protocol_id, struct scmi_revision_info *version)
307
{
308
const struct scmi_protocol *proto = NULL;
309
310
if (protocol_id < SCMI_PROTOCOL_VENDOR_BASE)
311
proto = xa_load(&scmi_protocols, protocol_id);
312
else
313
proto = scmi_vendor_protocol_get(protocol_id, version);
314
315
if (!proto || !try_module_get(proto->owner)) {
316
pr_warn("SCMI Protocol 0x%x not found!\n", protocol_id);
317
return NULL;
318
}
319
320
pr_debug("Found SCMI Protocol 0x%x\n", protocol_id);
321
322
return proto;
323
}
324
325
static void scmi_protocol_put(const struct scmi_protocol *proto)
326
{
327
if (proto)
328
module_put(proto->owner);
329
}
330
331
static int scmi_vendor_protocol_check(const struct scmi_protocol *proto)
332
{
333
if (!proto->vendor_id) {
334
pr_err("missing vendor_id for protocol 0x%x\n", proto->id);
335
return -EINVAL;
336
}
337
338
if (strlen(proto->vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
339
pr_err("malformed vendor_id for protocol 0x%x\n", proto->id);
340
return -EINVAL;
341
}
342
343
if (proto->sub_vendor_id &&
344
strlen(proto->sub_vendor_id) >= SCMI_SHORT_NAME_MAX_SIZE) {
345
pr_err("malformed sub_vendor_id for protocol 0x%x\n",
346
proto->id);
347
return -EINVAL;
348
}
349
350
return 0;
351
}
352
353
int scmi_protocol_register(const struct scmi_protocol *proto)
354
{
355
int ret;
356
unsigned long key;
357
358
if (!proto) {
359
pr_err("invalid protocol\n");
360
return -EINVAL;
361
}
362
363
if (!proto->instance_init) {
364
pr_err("missing init for protocol 0x%x\n", proto->id);
365
return -EINVAL;
366
}
367
368
if (proto->id >= SCMI_PROTOCOL_VENDOR_BASE &&
369
scmi_vendor_protocol_check(proto))
370
return -EINVAL;
371
372
/*
373
* Calculate a protocol key to register this protocol with the core;
374
* key value 0 is considered invalid.
375
*/
376
key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
377
proto->sub_vendor_id,
378
proto->impl_ver);
379
if (!key)
380
return -EINVAL;
381
382
ret = xa_insert(&scmi_protocols, key, (void *)proto, GFP_KERNEL);
383
if (ret) {
384
pr_err("unable to allocate SCMI protocol slot for 0x%x - err %d\n",
385
proto->id, ret);
386
return ret;
387
}
388
389
pr_debug("Registered SCMI Protocol 0x%x - %s %s 0x%08X\n",
390
proto->id, proto->vendor_id, proto->sub_vendor_id,
391
proto->impl_ver);
392
393
return 0;
394
}
395
EXPORT_SYMBOL_GPL(scmi_protocol_register);
396
397
void scmi_protocol_unregister(const struct scmi_protocol *proto)
398
{
399
unsigned long key;
400
401
key = scmi_protocol_key_calculate(proto->id, proto->vendor_id,
402
proto->sub_vendor_id,
403
proto->impl_ver);
404
if (!key)
405
return;
406
407
xa_erase(&scmi_protocols, key);
408
409
pr_debug("Unregistered SCMI Protocol 0x%x\n", proto->id);
410
}
411
EXPORT_SYMBOL_GPL(scmi_protocol_unregister);
412
413
/**
414
* scmi_create_protocol_devices - Create devices for all pending requests for
415
* this SCMI instance.
416
*
417
* @np: The device node describing the protocol
418
* @info: The SCMI instance descriptor
419
* @prot_id: The protocol ID
420
* @name: The optional name of the device to be created: if not provided this
421
* call will lead to the creation of all the devices currently requested
422
* for the specified protocol.
423
*/
424
static void scmi_create_protocol_devices(struct device_node *np,
425
struct scmi_info *info,
426
int prot_id, const char *name)
427
{
428
mutex_lock(&info->devreq_mtx);
429
scmi_device_create(np, info->dev, prot_id, name);
430
mutex_unlock(&info->devreq_mtx);
431
}
432
433
static void scmi_destroy_protocol_devices(struct scmi_info *info,
434
int prot_id, const char *name)
435
{
436
mutex_lock(&info->devreq_mtx);
437
scmi_device_destroy(info->dev, prot_id, name);
438
mutex_unlock(&info->devreq_mtx);
439
}
440
441
void scmi_notification_instance_data_set(const struct scmi_handle *handle,
442
void *priv)
443
{
444
struct scmi_info *info = handle_to_scmi_info(handle);
445
446
info->notify_priv = priv;
447
/* Ensure updated protocol private date are visible */
448
smp_wmb();
449
}
450
451
void *scmi_notification_instance_data_get(const struct scmi_handle *handle)
452
{
453
struct scmi_info *info = handle_to_scmi_info(handle);
454
455
/* Ensure protocols_private_data has been updated */
456
smp_rmb();
457
return info->notify_priv;
458
}
459
460
/**
461
* scmi_xfer_token_set - Reserve and set new token for the xfer at hand
462
*
463
* @minfo: Pointer to Tx/Rx Message management info based on channel type
464
* @xfer: The xfer to act upon
465
*
466
* Pick the next unused monotonically increasing token and set it into
467
* xfer->hdr.seq: picking a monotonically increasing value avoids immediate
468
* reuse of freshly completed or timed-out xfers, thus mitigating the risk
469
* of incorrect association of a late and expired xfer with a live in-flight
470
* transaction, both happening to re-use the same token identifier.
471
*
472
* Since platform is NOT required to answer our request in-order we should
473
* account for a few rare but possible scenarios:
474
*
475
* - exactly 'next_token' may be NOT available so pick xfer_id >= next_token
476
* using find_next_zero_bit() starting from candidate next_token bit
477
*
478
* - all tokens ahead upto (MSG_TOKEN_ID_MASK - 1) are used in-flight but we
479
* are plenty of free tokens at start, so try a second pass using
480
* find_next_zero_bit() and starting from 0.
481
*
482
* X = used in-flight
483
*
484
* Normal
485
* ------
486
*
487
* |- xfer_id picked
488
* -----------+----------------------------------------------------------
489
* | | |X|X|X| | | | | | ... ... ... ... ... ... ... ... ... ... ...|X|X|
490
* ----------------------------------------------------------------------
491
* ^
492
* |- next_token
493
*
494
* Out-of-order pending at start
495
* -----------------------------
496
*
497
* |- xfer_id picked, last_token fixed
498
* -----+----------------------------------------------------------------
499
* |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... ... ...|X| |
500
* ----------------------------------------------------------------------
501
* ^
502
* |- next_token
503
*
504
*
505
* Out-of-order pending at end
506
* ---------------------------
507
*
508
* |- xfer_id picked, last_token fixed
509
* -----+----------------------------------------------------------------
510
* |X|X| | | | |X|X| ... ... ... ... ... ... ... ... ... ... |X|X|X||X|X|
511
* ----------------------------------------------------------------------
512
* ^
513
* |- next_token
514
*
515
* Context: Assumes to be called with @xfer_lock already acquired.
516
*
517
* Return: 0 on Success or error
518
*/
519
static int scmi_xfer_token_set(struct scmi_xfers_info *minfo,
520
struct scmi_xfer *xfer)
521
{
522
unsigned long xfer_id, next_token;
523
524
/*
525
* Pick a candidate monotonic token in range [0, MSG_TOKEN_MAX - 1]
526
* using the pre-allocated transfer_id as a base.
527
* Note that the global transfer_id is shared across all message types
528
* so there could be holes in the allocated set of monotonic sequence
529
* numbers, but that is going to limit the effectiveness of the
530
* mitigation only in very rare limit conditions.
531
*/
532
next_token = (xfer->transfer_id & (MSG_TOKEN_MAX - 1));
533
534
/* Pick the next available xfer_id >= next_token */
535
xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
536
MSG_TOKEN_MAX, next_token);
537
if (xfer_id == MSG_TOKEN_MAX) {
538
/*
539
* After heavily out-of-order responses, there are no free
540
* tokens ahead, but only at start of xfer_alloc_table so
541
* try again from the beginning.
542
*/
543
xfer_id = find_next_zero_bit(minfo->xfer_alloc_table,
544
MSG_TOKEN_MAX, 0);
545
/*
546
* Something is wrong if we got here since there can be a
547
* maximum number of (MSG_TOKEN_MAX - 1) in-flight messages
548
* but we have not found any free token [0, MSG_TOKEN_MAX - 1].
549
*/
550
if (WARN_ON_ONCE(xfer_id == MSG_TOKEN_MAX))
551
return -ENOMEM;
552
}
553
554
/* Update +/- last_token accordingly if we skipped some hole */
555
if (xfer_id != next_token)
556
atomic_add((int)(xfer_id - next_token), &transfer_last_id);
557
558
xfer->hdr.seq = (u16)xfer_id;
559
560
return 0;
561
}
562
563
/**
564
* scmi_xfer_token_clear - Release the token
565
*
566
* @minfo: Pointer to Tx/Rx Message management info based on channel type
567
* @xfer: The xfer to act upon
568
*/
569
static inline void scmi_xfer_token_clear(struct scmi_xfers_info *minfo,
570
struct scmi_xfer *xfer)
571
{
572
clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
573
}
574
575
/**
576
* scmi_xfer_inflight_register_unlocked - Register the xfer as in-flight
577
*
578
* @xfer: The xfer to register
579
* @minfo: Pointer to Tx/Rx Message management info based on channel type
580
*
581
* Note that this helper assumes that the xfer to be registered as in-flight
582
* had been built using an xfer sequence number which still corresponds to a
583
* free slot in the xfer_alloc_table.
584
*
585
* Context: Assumes to be called with @xfer_lock already acquired.
586
*/
587
static inline void
588
scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
589
struct scmi_xfers_info *minfo)
590
{
591
/* In this context minfo will be tx_minfo due to the xfer pending */
592
struct scmi_info *info = tx_minfo_to_scmi_info(minfo);
593
594
/* Set in-flight */
595
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
596
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
597
scmi_inc_count(info->dbg, XFERS_INFLIGHT);
598
599
xfer->pending = true;
600
}
601
602
/**
603
* scmi_xfer_inflight_register - Try to register an xfer as in-flight
604
*
605
* @xfer: The xfer to register
606
* @minfo: Pointer to Tx/Rx Message management info based on channel type
607
*
608
* Note that this helper does NOT assume anything about the sequence number
609
* that was baked into the provided xfer, so it checks at first if it can
610
* be mapped to a free slot and fails with an error if another xfer with the
611
* same sequence number is currently still registered as in-flight.
612
*
613
* Return: 0 on Success or -EBUSY if sequence number embedded in the xfer
614
* could not rbe mapped to a free slot in the xfer_alloc_table.
615
*/
616
static int scmi_xfer_inflight_register(struct scmi_xfer *xfer,
617
struct scmi_xfers_info *minfo)
618
{
619
int ret = 0;
620
unsigned long flags;
621
622
spin_lock_irqsave(&minfo->xfer_lock, flags);
623
if (!test_bit(xfer->hdr.seq, minfo->xfer_alloc_table))
624
scmi_xfer_inflight_register_unlocked(xfer, minfo);
625
else
626
ret = -EBUSY;
627
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
628
629
return ret;
630
}
631
632
/**
633
* scmi_xfer_raw_inflight_register - An helper to register the given xfer as in
634
* flight on the TX channel, if possible.
635
*
636
* @handle: Pointer to SCMI entity handle
637
* @xfer: The xfer to register
638
*
639
* Return: 0 on Success, error otherwise
640
*/
641
int scmi_xfer_raw_inflight_register(const struct scmi_handle *handle,
642
struct scmi_xfer *xfer)
643
{
644
struct scmi_info *info = handle_to_scmi_info(handle);
645
646
return scmi_xfer_inflight_register(xfer, &info->tx_minfo);
647
}
648
649
/**
650
* scmi_xfer_pending_set - Pick a proper sequence number and mark the xfer
651
* as pending in-flight
652
*
653
* @xfer: The xfer to act upon
654
* @minfo: Pointer to Tx/Rx Message management info based on channel type
655
*
656
* Return: 0 on Success or error otherwise
657
*/
658
static inline int scmi_xfer_pending_set(struct scmi_xfer *xfer,
659
struct scmi_xfers_info *minfo)
660
{
661
int ret;
662
unsigned long flags;
663
664
spin_lock_irqsave(&minfo->xfer_lock, flags);
665
/* Set a new monotonic token as the xfer sequence number */
666
ret = scmi_xfer_token_set(minfo, xfer);
667
if (!ret)
668
scmi_xfer_inflight_register_unlocked(xfer, minfo);
669
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
670
671
return ret;
672
}
673
674
/**
675
* scmi_xfer_get() - Allocate one message
676
*
677
* @handle: Pointer to SCMI entity handle
678
* @minfo: Pointer to Tx/Rx Message management info based on channel type
679
*
680
* Helper function which is used by various message functions that are
681
* exposed to clients of this driver for allocating a message traffic event.
682
*
683
* Picks an xfer from the free list @free_xfers (if any available) and perform
684
* a basic initialization.
685
*
686
* Note that, at this point, still no sequence number is assigned to the
687
* allocated xfer, nor it is registered as a pending transaction.
688
*
689
* The successfully initialized xfer is refcounted.
690
*
691
* Context: Holds @xfer_lock while manipulating @free_xfers.
692
*
693
* Return: An initialized xfer if all went fine, else pointer error.
694
*/
695
static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
696
struct scmi_xfers_info *minfo)
697
{
698
unsigned long flags;
699
struct scmi_xfer *xfer;
700
701
spin_lock_irqsave(&minfo->xfer_lock, flags);
702
if (hlist_empty(&minfo->free_xfers)) {
703
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
704
return ERR_PTR(-ENOMEM);
705
}
706
707
/* grab an xfer from the free_list */
708
xfer = hlist_entry(minfo->free_xfers.first, struct scmi_xfer, node);
709
hlist_del_init(&xfer->node);
710
711
/*
712
* Allocate transfer_id early so that can be used also as base for
713
* monotonic sequence number generation if needed.
714
*/
715
xfer->transfer_id = atomic_inc_return(&transfer_last_id);
716
717
refcount_set(&xfer->users, 1);
718
atomic_set(&xfer->busy, SCMI_XFER_FREE);
719
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
720
721
return xfer;
722
}
723
724
/**
725
* scmi_xfer_raw_get - Helper to get a bare free xfer from the TX channel
726
*
727
* @handle: Pointer to SCMI entity handle
728
*
729
* Note that xfer is taken from the TX channel structures.
730
*
731
* Return: A valid xfer on Success, or an error-pointer otherwise
732
*/
733
struct scmi_xfer *scmi_xfer_raw_get(const struct scmi_handle *handle)
734
{
735
struct scmi_xfer *xfer;
736
struct scmi_info *info = handle_to_scmi_info(handle);
737
738
xfer = scmi_xfer_get(handle, &info->tx_minfo);
739
if (!IS_ERR(xfer))
740
xfer->flags |= SCMI_XFER_FLAG_IS_RAW;
741
742
return xfer;
743
}
744
745
/**
746
* scmi_xfer_raw_channel_get - Helper to get a reference to the proper channel
747
* to use for a specific protocol_id Raw transaction.
748
*
749
* @handle: Pointer to SCMI entity handle
750
* @protocol_id: Identifier of the protocol
751
*
752
* Note that in a regular SCMI stack, usually, a protocol has to be defined in
753
* the DT to have an associated channel and be usable; but in Raw mode any
754
* protocol in range is allowed, re-using the Base channel, so as to enable
755
* fuzzing on any protocol without the need of a fully compiled DT.
756
*
757
* Return: A reference to the channel to use, or an ERR_PTR
758
*/
759
struct scmi_chan_info *
760
scmi_xfer_raw_channel_get(const struct scmi_handle *handle, u8 protocol_id)
761
{
762
struct scmi_chan_info *cinfo;
763
struct scmi_info *info = handle_to_scmi_info(handle);
764
765
cinfo = idr_find(&info->tx_idr, protocol_id);
766
if (!cinfo) {
767
if (protocol_id == SCMI_PROTOCOL_BASE)
768
return ERR_PTR(-EINVAL);
769
/* Use Base channel for protocols not defined for DT */
770
cinfo = idr_find(&info->tx_idr, SCMI_PROTOCOL_BASE);
771
if (!cinfo)
772
return ERR_PTR(-EINVAL);
773
dev_warn_once(handle->dev,
774
"Using Base channel for protocol 0x%X\n",
775
protocol_id);
776
}
777
778
return cinfo;
779
}
780
781
/**
782
* __scmi_xfer_put() - Release a message
783
*
784
* @minfo: Pointer to Tx/Rx Message management info based on channel type
785
* @xfer: message that was reserved by scmi_xfer_get
786
*
787
* After refcount check, possibly release an xfer, clearing the token slot,
788
* removing xfer from @pending_xfers and putting it back into free_xfers.
789
*
790
* This holds a spinlock to maintain integrity of internal data structures.
791
*/
792
static void
793
__scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
794
{
795
unsigned long flags;
796
797
spin_lock_irqsave(&minfo->xfer_lock, flags);
798
if (refcount_dec_and_test(&xfer->users)) {
799
if (xfer->pending) {
800
struct scmi_info *info = tx_minfo_to_scmi_info(minfo);
801
802
scmi_xfer_token_clear(minfo, xfer);
803
hash_del(&xfer->node);
804
xfer->pending = false;
805
806
scmi_dec_count(info->dbg, XFERS_INFLIGHT);
807
}
808
xfer->flags = 0;
809
hlist_add_head(&xfer->node, &minfo->free_xfers);
810
}
811
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
812
}
813
814
/**
815
* scmi_xfer_raw_put - Release an xfer that was taken by @scmi_xfer_raw_get
816
*
817
* @handle: Pointer to SCMI entity handle
818
* @xfer: A reference to the xfer to put
819
*
820
* Note that as with other xfer_put() handlers the xfer is really effectively
821
* released only if there are no more users on the system.
822
*/
823
void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
824
{
825
struct scmi_info *info = handle_to_scmi_info(handle);
826
827
return __scmi_xfer_put(&info->tx_minfo, xfer);
828
}
829
830
/**
831
* scmi_xfer_lookup_unlocked - Helper to lookup an xfer_id
832
*
833
* @minfo: Pointer to Tx/Rx Message management info based on channel type
834
* @xfer_id: Token ID to lookup in @pending_xfers
835
*
836
* Refcounting is untouched.
837
*
838
* Context: Assumes to be called with @xfer_lock already acquired.
839
*
840
* Return: A valid xfer on Success or error otherwise
841
*/
842
static struct scmi_xfer *
843
scmi_xfer_lookup_unlocked(struct scmi_xfers_info *minfo, u16 xfer_id)
844
{
845
struct scmi_xfer *xfer = NULL;
846
847
if (test_bit(xfer_id, minfo->xfer_alloc_table))
848
xfer = XFER_FIND(minfo->pending_xfers, xfer_id);
849
850
return xfer ?: ERR_PTR(-EINVAL);
851
}
852
853
/**
854
* scmi_bad_message_trace - A helper to trace weird messages
855
*
856
* @cinfo: A reference to the channel descriptor on which the message was
857
* received
858
* @msg_hdr: Message header to track
859
* @err: A specific error code used as a status value in traces.
860
*
861
* This helper can be used to trace any kind of weird, incomplete, unexpected,
862
* timed-out message that arrives and as such, can be traced only referring to
863
* the header content, since the payload is missing/unreliable.
864
*/
865
static void scmi_bad_message_trace(struct scmi_chan_info *cinfo, u32 msg_hdr,
866
enum scmi_bad_msg err)
867
{
868
char *tag;
869
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
870
871
switch (MSG_XTRACT_TYPE(msg_hdr)) {
872
case MSG_TYPE_COMMAND:
873
tag = "!RESP";
874
break;
875
case MSG_TYPE_DELAYED_RESP:
876
tag = "!DLYD";
877
break;
878
case MSG_TYPE_NOTIFICATION:
879
tag = "!NOTI";
880
break;
881
default:
882
tag = "!UNKN";
883
break;
884
}
885
886
trace_scmi_msg_dump(info->id, cinfo->id,
887
MSG_XTRACT_PROT_ID(msg_hdr),
888
MSG_XTRACT_ID(msg_hdr), tag,
889
MSG_XTRACT_TOKEN(msg_hdr), err, NULL, 0);
890
}
891
892
/**
893
* scmi_msg_response_validate - Validate message type against state of related
894
* xfer
895
*
896
* @cinfo: A reference to the channel descriptor.
897
* @msg_type: Message type to check
898
* @xfer: A reference to the xfer to validate against @msg_type
899
*
900
* This function checks if @msg_type is congruent with the current state of
901
* a pending @xfer; if an asynchronous delayed response is received before the
902
* related synchronous response (Out-of-Order Delayed Response) the missing
903
* synchronous response is assumed to be OK and completed, carrying on with the
904
* Delayed Response: this is done to address the case in which the underlying
905
* SCMI transport can deliver such out-of-order responses.
906
*
907
* Context: Assumes to be called with xfer->lock already acquired.
908
*
909
* Return: 0 on Success, error otherwise
910
*/
911
static inline int scmi_msg_response_validate(struct scmi_chan_info *cinfo,
912
u8 msg_type,
913
struct scmi_xfer *xfer)
914
{
915
/*
916
* Even if a response was indeed expected on this slot at this point,
917
* a buggy platform could wrongly reply feeding us an unexpected
918
* delayed response we're not prepared to handle: bail-out safely
919
* blaming firmware.
920
*/
921
if (msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done) {
922
dev_err(cinfo->dev,
923
"Delayed Response for %d not expected! Buggy F/W ?\n",
924
xfer->hdr.seq);
925
return -EINVAL;
926
}
927
928
switch (xfer->state) {
929
case SCMI_XFER_SENT_OK:
930
if (msg_type == MSG_TYPE_DELAYED_RESP) {
931
/*
932
* Delayed Response expected but delivered earlier.
933
* Assume message RESPONSE was OK and skip state.
934
*/
935
xfer->hdr.status = SCMI_SUCCESS;
936
xfer->state = SCMI_XFER_RESP_OK;
937
complete(&xfer->done);
938
dev_warn(cinfo->dev,
939
"Received valid OoO Delayed Response for %d\n",
940
xfer->hdr.seq);
941
}
942
break;
943
case SCMI_XFER_RESP_OK:
944
if (msg_type != MSG_TYPE_DELAYED_RESP)
945
return -EINVAL;
946
break;
947
case SCMI_XFER_DRESP_OK:
948
/* No further message expected once in SCMI_XFER_DRESP_OK */
949
return -EINVAL;
950
}
951
952
return 0;
953
}
954
955
/**
956
* scmi_xfer_state_update - Update xfer state
957
*
958
* @xfer: A reference to the xfer to update
959
* @msg_type: Type of message being processed.
960
*
961
* Note that this message is assumed to have been already successfully validated
962
* by @scmi_msg_response_validate(), so here we just update the state.
963
*
964
* Context: Assumes to be called on an xfer exclusively acquired using the
965
* busy flag.
966
*/
967
static inline void scmi_xfer_state_update(struct scmi_xfer *xfer, u8 msg_type)
968
{
969
xfer->hdr.type = msg_type;
970
971
/* Unknown command types were already discarded earlier */
972
if (xfer->hdr.type == MSG_TYPE_COMMAND)
973
xfer->state = SCMI_XFER_RESP_OK;
974
else
975
xfer->state = SCMI_XFER_DRESP_OK;
976
}
977
978
static bool scmi_xfer_acquired(struct scmi_xfer *xfer)
979
{
980
int ret;
981
982
ret = atomic_cmpxchg(&xfer->busy, SCMI_XFER_FREE, SCMI_XFER_BUSY);
983
984
return ret == SCMI_XFER_FREE;
985
}
986
987
/**
988
* scmi_xfer_command_acquire - Helper to lookup and acquire a command xfer
989
*
990
* @cinfo: A reference to the channel descriptor.
991
* @msg_hdr: A message header to use as lookup key
992
*
993
* When a valid xfer is found for the sequence number embedded in the provided
994
* msg_hdr, reference counting is properly updated and exclusive access to this
995
* xfer is granted till released with @scmi_xfer_command_release.
996
*
997
* Return: A valid @xfer on Success or error otherwise.
998
*/
999
static inline struct scmi_xfer *
1000
scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
1001
{
1002
int ret;
1003
unsigned long flags;
1004
struct scmi_xfer *xfer;
1005
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1006
struct scmi_xfers_info *minfo = &info->tx_minfo;
1007
u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
1008
u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
1009
1010
/* Are we even expecting this? */
1011
spin_lock_irqsave(&minfo->xfer_lock, flags);
1012
xfer = scmi_xfer_lookup_unlocked(minfo, xfer_id);
1013
if (IS_ERR(xfer)) {
1014
dev_err(cinfo->dev,
1015
"Message for %d type %d is not expected!\n",
1016
xfer_id, msg_type);
1017
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1018
1019
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
1020
scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED);
1021
1022
return xfer;
1023
}
1024
refcount_inc(&xfer->users);
1025
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
1026
1027
spin_lock_irqsave(&xfer->lock, flags);
1028
ret = scmi_msg_response_validate(cinfo, msg_type, xfer);
1029
/*
1030
* If a pending xfer was found which was also in a congruent state with
1031
* the received message, acquire exclusive access to it setting the busy
1032
* flag.
1033
* Spins only on the rare limit condition of concurrent reception of
1034
* RESP and DRESP for the same xfer.
1035
*/
1036
if (!ret) {
1037
spin_until_cond(scmi_xfer_acquired(xfer));
1038
scmi_xfer_state_update(xfer, msg_type);
1039
}
1040
spin_unlock_irqrestore(&xfer->lock, flags);
1041
1042
if (ret) {
1043
dev_err(cinfo->dev,
1044
"Invalid message type:%d for %d - HDR:0x%X state:%d\n",
1045
msg_type, xfer_id, msg_hdr, xfer->state);
1046
1047
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
1048
scmi_inc_count(info->dbg, ERR_MSG_INVALID);
1049
1050
/* On error the refcount incremented above has to be dropped */
1051
__scmi_xfer_put(minfo, xfer);
1052
xfer = ERR_PTR(-EINVAL);
1053
}
1054
1055
return xfer;
1056
}
1057
1058
static inline void scmi_xfer_command_release(struct scmi_info *info,
1059
struct scmi_xfer *xfer)
1060
{
1061
atomic_set(&xfer->busy, SCMI_XFER_FREE);
1062
__scmi_xfer_put(&info->tx_minfo, xfer);
1063
}
1064
1065
static inline void scmi_clear_channel(struct scmi_info *info,
1066
struct scmi_chan_info *cinfo)
1067
{
1068
if (!cinfo->is_p2a) {
1069
dev_warn(cinfo->dev, "Invalid clear on A2P channel !\n");
1070
return;
1071
}
1072
1073
if (info->desc->ops->clear_channel)
1074
info->desc->ops->clear_channel(cinfo);
1075
}
1076
1077
static void scmi_handle_notification(struct scmi_chan_info *cinfo,
1078
u32 msg_hdr, void *priv)
1079
{
1080
struct scmi_xfer *xfer;
1081
struct device *dev = cinfo->dev;
1082
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1083
struct scmi_xfers_info *minfo = &info->rx_minfo;
1084
ktime_t ts;
1085
1086
ts = ktime_get_boottime();
1087
xfer = scmi_xfer_get(cinfo->handle, minfo);
1088
if (IS_ERR(xfer)) {
1089
dev_err(dev, "failed to get free message slot (%ld)\n",
1090
PTR_ERR(xfer));
1091
1092
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
1093
scmi_inc_count(info->dbg, ERR_MSG_NOMEM);
1094
1095
scmi_clear_channel(info, cinfo);
1096
return;
1097
}
1098
1099
unpack_scmi_header(msg_hdr, &xfer->hdr);
1100
if (priv)
1101
/* Ensure order between xfer->priv store and following ops */
1102
smp_store_mb(xfer->priv, priv);
1103
info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
1104
xfer);
1105
1106
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1107
xfer->hdr.id, "NOTI", xfer->hdr.seq,
1108
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
1109
scmi_inc_count(info->dbg, NOTIFICATION_OK);
1110
1111
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
1112
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
1113
1114
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1115
xfer->hdr.protocol_id, xfer->hdr.seq,
1116
MSG_TYPE_NOTIFICATION);
1117
1118
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1119
xfer->hdr.seq = MSG_XTRACT_TOKEN(msg_hdr);
1120
scmi_raw_message_report(info->raw, xfer, SCMI_RAW_NOTIF_QUEUE,
1121
cinfo->id);
1122
}
1123
1124
__scmi_xfer_put(minfo, xfer);
1125
1126
scmi_clear_channel(info, cinfo);
1127
}
1128
1129
static void scmi_handle_response(struct scmi_chan_info *cinfo,
1130
u32 msg_hdr, void *priv)
1131
{
1132
struct scmi_xfer *xfer;
1133
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1134
1135
xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
1136
if (IS_ERR(xfer)) {
1137
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
1138
scmi_raw_error_report(info->raw, cinfo, msg_hdr, priv);
1139
1140
if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
1141
scmi_clear_channel(info, cinfo);
1142
return;
1143
}
1144
1145
/* rx.len could be shrunk in the sync do_xfer, so reset to maxsz */
1146
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
1147
xfer->rx.len = info->desc->max_msg_size;
1148
1149
if (priv)
1150
/* Ensure order between xfer->priv store and following ops */
1151
smp_store_mb(xfer->priv, priv);
1152
info->desc->ops->fetch_response(cinfo, xfer);
1153
1154
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1155
xfer->hdr.id,
1156
xfer->hdr.type == MSG_TYPE_DELAYED_RESP ?
1157
(!SCMI_XFER_IS_RAW(xfer) ? "DLYD" : "dlyd") :
1158
(!SCMI_XFER_IS_RAW(xfer) ? "RESP" : "resp"),
1159
xfer->hdr.seq, xfer->hdr.status,
1160
xfer->rx.buf, xfer->rx.len);
1161
1162
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
1163
xfer->hdr.protocol_id, xfer->hdr.seq,
1164
xfer->hdr.type);
1165
1166
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
1167
scmi_clear_channel(info, cinfo);
1168
complete(xfer->async_done);
1169
scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK);
1170
} else {
1171
complete(&xfer->done);
1172
scmi_inc_count(info->dbg, RESPONSE_OK);
1173
}
1174
1175
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1176
/*
1177
* When in polling mode avoid to queue the Raw xfer on the IRQ
1178
* RX path since it will be already queued at the end of the TX
1179
* poll loop.
1180
*/
1181
if (!xfer->hdr.poll_completion ||
1182
xfer->hdr.type == MSG_TYPE_DELAYED_RESP)
1183
scmi_raw_message_report(info->raw, xfer,
1184
SCMI_RAW_REPLY_QUEUE,
1185
cinfo->id);
1186
}
1187
1188
scmi_xfer_command_release(info, xfer);
1189
}
1190
1191
/**
1192
* scmi_rx_callback() - callback for receiving messages
1193
*
1194
* @cinfo: SCMI channel info
1195
* @msg_hdr: Message header
1196
* @priv: Transport specific private data.
1197
*
1198
* Processes one received message to appropriate transfer information and
1199
* signals completion of the transfer.
1200
*
1201
* NOTE: This function will be invoked in IRQ context, hence should be
1202
* as optimal as possible.
1203
*/
1204
static void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr,
1205
void *priv)
1206
{
1207
u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
1208
1209
switch (msg_type) {
1210
case MSG_TYPE_NOTIFICATION:
1211
scmi_handle_notification(cinfo, msg_hdr, priv);
1212
break;
1213
case MSG_TYPE_COMMAND:
1214
case MSG_TYPE_DELAYED_RESP:
1215
scmi_handle_response(cinfo, msg_hdr, priv);
1216
break;
1217
default:
1218
WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
1219
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNKNOWN);
1220
break;
1221
}
1222
}
1223
1224
/**
1225
* xfer_put() - Release a transmit message
1226
*
1227
* @ph: Pointer to SCMI protocol handle
1228
* @xfer: message that was reserved by xfer_get_init
1229
*/
1230
static void xfer_put(const struct scmi_protocol_handle *ph,
1231
struct scmi_xfer *xfer)
1232
{
1233
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1234
struct scmi_info *info = handle_to_scmi_info(pi->handle);
1235
1236
__scmi_xfer_put(&info->tx_minfo, xfer);
1237
}
1238
1239
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
1240
struct scmi_xfer *xfer, ktime_t stop,
1241
bool *ooo)
1242
{
1243
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1244
1245
/*
1246
* Poll also on xfer->done so that polling can be forcibly terminated
1247
* in case of out-of-order receptions of delayed responses
1248
*/
1249
return info->desc->ops->poll_done(cinfo, xfer) ||
1250
(*ooo = try_wait_for_completion(&xfer->done)) ||
1251
ktime_after(ktime_get(), stop);
1252
}
1253
1254
static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
1255
struct scmi_chan_info *cinfo,
1256
struct scmi_xfer *xfer, unsigned int timeout_ms)
1257
{
1258
int ret = 0;
1259
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1260
1261
if (xfer->hdr.poll_completion) {
1262
/*
1263
* Real polling is needed only if transport has NOT declared
1264
* itself to support synchronous commands replies.
1265
*/
1266
if (!desc->sync_cmds_completed_on_ret) {
1267
bool ooo = false;
1268
1269
/*
1270
* Poll on xfer using transport provided .poll_done();
1271
* assumes no completion interrupt was available.
1272
*/
1273
ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms);
1274
1275
spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer,
1276
stop, &ooo));
1277
if (!ooo && !info->desc->ops->poll_done(cinfo, xfer)) {
1278
dev_err(dev,
1279
"timed out in resp(caller: %pS) - polling\n",
1280
(void *)_RET_IP_);
1281
ret = -ETIMEDOUT;
1282
scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT);
1283
}
1284
}
1285
1286
if (!ret) {
1287
unsigned long flags;
1288
1289
/*
1290
* Do not fetch_response if an out-of-order delayed
1291
* response is being processed.
1292
*/
1293
spin_lock_irqsave(&xfer->lock, flags);
1294
if (xfer->state == SCMI_XFER_SENT_OK) {
1295
desc->ops->fetch_response(cinfo, xfer);
1296
xfer->state = SCMI_XFER_RESP_OK;
1297
}
1298
spin_unlock_irqrestore(&xfer->lock, flags);
1299
1300
/* Trace polled replies. */
1301
trace_scmi_msg_dump(info->id, cinfo->id,
1302
xfer->hdr.protocol_id, xfer->hdr.id,
1303
!SCMI_XFER_IS_RAW(xfer) ?
1304
"RESP" : "resp",
1305
xfer->hdr.seq, xfer->hdr.status,
1306
xfer->rx.buf, xfer->rx.len);
1307
scmi_inc_count(info->dbg, RESPONSE_POLLED_OK);
1308
1309
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
1310
scmi_raw_message_report(info->raw, xfer,
1311
SCMI_RAW_REPLY_QUEUE,
1312
cinfo->id);
1313
}
1314
}
1315
} else {
1316
/* And we wait for the response. */
1317
if (!wait_for_completion_timeout(&xfer->done,
1318
msecs_to_jiffies(timeout_ms))) {
1319
dev_err(dev, "timed out in resp(caller: %pS)\n",
1320
(void *)_RET_IP_);
1321
ret = -ETIMEDOUT;
1322
scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT);
1323
}
1324
}
1325
1326
return ret;
1327
}
1328
1329
/**
1330
* scmi_wait_for_message_response - An helper to group all the possible ways of
1331
* waiting for a synchronous message response.
1332
*
1333
* @cinfo: SCMI channel info
1334
* @xfer: Reference to the transfer being waited for.
1335
*
1336
* Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on
1337
* configuration flags like xfer->hdr.poll_completion.
1338
*
1339
* Return: 0 on Success, error otherwise.
1340
*/
1341
static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo,
1342
struct scmi_xfer *xfer)
1343
{
1344
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1345
struct device *dev = info->dev;
1346
1347
trace_scmi_xfer_response_wait(xfer->transfer_id, xfer->hdr.id,
1348
xfer->hdr.protocol_id, xfer->hdr.seq,
1349
info->desc->max_rx_timeout_ms,
1350
xfer->hdr.poll_completion);
1351
1352
return scmi_wait_for_reply(dev, info->desc, cinfo, xfer,
1353
info->desc->max_rx_timeout_ms);
1354
}
1355
1356
/**
1357
* scmi_xfer_raw_wait_for_message_response - An helper to wait for a message
1358
* reply to an xfer raw request on a specific channel for the required timeout.
1359
*
1360
* @cinfo: SCMI channel info
1361
* @xfer: Reference to the transfer being waited for.
1362
* @timeout_ms: The maximum timeout in milliseconds
1363
*
1364
* Return: 0 on Success, error otherwise.
1365
*/
1366
int scmi_xfer_raw_wait_for_message_response(struct scmi_chan_info *cinfo,
1367
struct scmi_xfer *xfer,
1368
unsigned int timeout_ms)
1369
{
1370
int ret;
1371
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
1372
struct device *dev = info->dev;
1373
1374
ret = scmi_wait_for_reply(dev, info->desc, cinfo, xfer, timeout_ms);
1375
if (ret)
1376
dev_dbg(dev, "timed out in RAW response - HDR:%08X\n",
1377
pack_scmi_header(&xfer->hdr));
1378
1379
return ret;
1380
}
1381
1382
/**
1383
* do_xfer() - Do one transfer
1384
*
1385
* @ph: Pointer to SCMI protocol handle
1386
* @xfer: Transfer to initiate and wait for response
1387
*
1388
* Return: -ETIMEDOUT in case of no response, if transmit error,
1389
* return corresponding error, else if all goes well,
1390
* return 0.
1391
*/
1392
static int do_xfer(const struct scmi_protocol_handle *ph,
1393
struct scmi_xfer *xfer)
1394
{
1395
int ret;
1396
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1397
struct scmi_info *info = handle_to_scmi_info(pi->handle);
1398
struct device *dev = info->dev;
1399
struct scmi_chan_info *cinfo;
1400
1401
/* Check for polling request on custom command xfers at first */
1402
if (xfer->hdr.poll_completion &&
1403
!is_transport_polling_capable(info->desc)) {
1404
dev_warn_once(dev,
1405
"Polling mode is not supported by transport.\n");
1406
scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED);
1407
return -EINVAL;
1408
}
1409
1410
cinfo = idr_find(&info->tx_idr, pi->proto->id);
1411
if (unlikely(!cinfo)) {
1412
scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND);
1413
return -EINVAL;
1414
}
1415
/* True ONLY if also supported by transport. */
1416
if (is_polling_enabled(cinfo, info->desc))
1417
xfer->hdr.poll_completion = true;
1418
1419
/*
1420
* Initialise protocol id now from protocol handle to avoid it being
1421
* overridden by mistake (or malice) by the protocol code mangling with
1422
* the scmi_xfer structure prior to this.
1423
*/
1424
xfer->hdr.protocol_id = pi->proto->id;
1425
reinit_completion(&xfer->done);
1426
1427
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
1428
xfer->hdr.protocol_id, xfer->hdr.seq,
1429
xfer->hdr.poll_completion,
1430
scmi_inflight_count(&info->handle));
1431
1432
/* Clear any stale status */
1433
xfer->hdr.status = SCMI_SUCCESS;
1434
xfer->state = SCMI_XFER_SENT_OK;
1435
/*
1436
* Even though spinlocking is not needed here since no race is possible
1437
* on xfer->state due to the monotonically increasing tokens allocation,
1438
* we must anyway ensure xfer->state initialization is not re-ordered
1439
* after the .send_message() to be sure that on the RX path an early
1440
* ISR calling scmi_rx_callback() cannot see an old stale xfer->state.
1441
*/
1442
smp_mb();
1443
1444
ret = info->desc->ops->send_message(cinfo, xfer);
1445
if (ret < 0) {
1446
dev_dbg(dev, "Failed to send message %d\n", ret);
1447
scmi_inc_count(info->dbg, SENT_FAIL);
1448
return ret;
1449
}
1450
1451
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
1452
xfer->hdr.id, "CMND", xfer->hdr.seq,
1453
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
1454
scmi_inc_count(info->dbg, SENT_OK);
1455
1456
ret = scmi_wait_for_message_response(cinfo, xfer);
1457
if (!ret && xfer->hdr.status) {
1458
ret = scmi_to_linux_errno(xfer->hdr.status);
1459
scmi_inc_count(info->dbg, ERR_PROTOCOL);
1460
}
1461
1462
if (info->desc->ops->mark_txdone)
1463
info->desc->ops->mark_txdone(cinfo, ret, xfer);
1464
1465
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
1466
xfer->hdr.protocol_id, xfer->hdr.seq, ret,
1467
scmi_inflight_count(&info->handle));
1468
1469
return ret;
1470
}
1471
1472
static void reset_rx_to_maxsz(const struct scmi_protocol_handle *ph,
1473
struct scmi_xfer *xfer)
1474
{
1475
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1476
struct scmi_info *info = handle_to_scmi_info(pi->handle);
1477
1478
xfer->rx.len = info->desc->max_msg_size;
1479
}
1480
1481
/**
1482
* do_xfer_with_response() - Do one transfer and wait until the delayed
1483
* response is received
1484
*
1485
* @ph: Pointer to SCMI protocol handle
1486
* @xfer: Transfer to initiate and wait for response
1487
*
1488
* Using asynchronous commands in atomic/polling mode should be avoided since
1489
* it could cause long busy-waiting here, so ignore polling for the delayed
1490
* response and WARN if it was requested for this command transaction since
1491
* upper layers should refrain from issuing such kind of requests.
1492
*
1493
* The only other option would have been to refrain from using any asynchronous
1494
* command even if made available, when an atomic transport is detected, and
1495
* instead forcibly use the synchronous version (thing that can be easily
1496
* attained at the protocol layer), but this would also have led to longer
1497
* stalls of the channel for synchronous commands and possibly timeouts.
1498
* (in other words there is usually a good reason if a platform provides an
1499
* asynchronous version of a command and we should prefer to use it...just not
1500
* when using atomic/polling mode)
1501
*
1502
* Return: -ETIMEDOUT in case of no delayed response, if transmit error,
1503
* return corresponding error, else if all goes well, return 0.
1504
*/
1505
static int do_xfer_with_response(const struct scmi_protocol_handle *ph,
1506
struct scmi_xfer *xfer)
1507
{
1508
int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
1509
DECLARE_COMPLETION_ONSTACK(async_response);
1510
1511
xfer->async_done = &async_response;
1512
1513
/*
1514
* Delayed responses should not be polled, so an async command should
1515
* not have been used when requiring an atomic/poll context; WARN and
1516
* perform instead a sleeping wait.
1517
* (Note Async + IgnoreDelayedResponses are sent via do_xfer)
1518
*/
1519
WARN_ON_ONCE(xfer->hdr.poll_completion);
1520
1521
ret = do_xfer(ph, xfer);
1522
if (!ret) {
1523
if (!wait_for_completion_timeout(xfer->async_done, timeout)) {
1524
dev_err(ph->dev,
1525
"timed out in delayed resp(caller: %pS)\n",
1526
(void *)_RET_IP_);
1527
ret = -ETIMEDOUT;
1528
} else if (xfer->hdr.status) {
1529
ret = scmi_to_linux_errno(xfer->hdr.status);
1530
}
1531
}
1532
1533
xfer->async_done = NULL;
1534
return ret;
1535
}
1536
1537
/**
1538
* xfer_get_init() - Allocate and initialise one message for transmit
1539
*
1540
* @ph: Pointer to SCMI protocol handle
1541
* @msg_id: Message identifier
1542
* @tx_size: transmit message size
1543
* @rx_size: receive message size
1544
* @p: pointer to the allocated and initialised message
1545
*
1546
* This function allocates the message using @scmi_xfer_get and
1547
* initialise the header.
1548
*
1549
* Return: 0 if all went fine with @p pointing to message, else
1550
* corresponding error.
1551
*/
1552
static int xfer_get_init(const struct scmi_protocol_handle *ph,
1553
u8 msg_id, size_t tx_size, size_t rx_size,
1554
struct scmi_xfer **p)
1555
{
1556
int ret;
1557
struct scmi_xfer *xfer;
1558
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1559
struct scmi_info *info = handle_to_scmi_info(pi->handle);
1560
struct scmi_xfers_info *minfo = &info->tx_minfo;
1561
struct device *dev = info->dev;
1562
1563
/* Ensure we have sane transfer sizes */
1564
if (rx_size > info->desc->max_msg_size ||
1565
tx_size > info->desc->max_msg_size)
1566
return -ERANGE;
1567
1568
xfer = scmi_xfer_get(pi->handle, minfo);
1569
if (IS_ERR(xfer)) {
1570
ret = PTR_ERR(xfer);
1571
dev_err(dev, "failed to get free message slot(%d)\n", ret);
1572
return ret;
1573
}
1574
1575
/* Pick a sequence number and register this xfer as in-flight */
1576
ret = scmi_xfer_pending_set(xfer, minfo);
1577
if (ret) {
1578
dev_err(pi->handle->dev,
1579
"Failed to get monotonic token %d\n", ret);
1580
__scmi_xfer_put(minfo, xfer);
1581
return ret;
1582
}
1583
1584
xfer->tx.len = tx_size;
1585
xfer->rx.len = rx_size ? : info->desc->max_msg_size;
1586
xfer->hdr.type = MSG_TYPE_COMMAND;
1587
xfer->hdr.id = msg_id;
1588
xfer->hdr.poll_completion = false;
1589
1590
*p = xfer;
1591
1592
return 0;
1593
}
1594
1595
/**
1596
* version_get() - command to get the revision of the SCMI entity
1597
*
1598
* @ph: Pointer to SCMI protocol handle
1599
* @version: Holds returned version of protocol.
1600
*
1601
* Updates the SCMI information in the internal data structure.
1602
*
1603
* Return: 0 if all went fine, else return appropriate error.
1604
*/
1605
static int version_get(const struct scmi_protocol_handle *ph, u32 *version)
1606
{
1607
int ret;
1608
__le32 *rev_info;
1609
struct scmi_xfer *t;
1610
1611
ret = xfer_get_init(ph, PROTOCOL_VERSION, 0, sizeof(*version), &t);
1612
if (ret)
1613
return ret;
1614
1615
ret = do_xfer(ph, t);
1616
if (!ret) {
1617
rev_info = t->rx.buf;
1618
*version = le32_to_cpu(*rev_info);
1619
}
1620
1621
xfer_put(ph, t);
1622
return ret;
1623
}
1624
1625
/**
1626
* scmi_set_protocol_priv - Set protocol specific data at init time
1627
*
1628
* @ph: A reference to the protocol handle.
1629
* @priv: The private data to set.
1630
* @version: The detected protocol version for the core to register.
1631
*
1632
* Return: 0 on Success
1633
*/
1634
static int scmi_set_protocol_priv(const struct scmi_protocol_handle *ph,
1635
void *priv, u32 version)
1636
{
1637
struct scmi_protocol_instance *pi = ph_to_pi(ph);
1638
1639
pi->priv = priv;
1640
pi->version = version;
1641
1642
return 0;
1643
}
1644
1645
/**
1646
* scmi_get_protocol_priv - Set protocol specific data at init time
1647
*
1648
* @ph: A reference to the protocol handle.
1649
*
1650
* Return: Protocol private data if any was set.
1651
*/
1652
static void *scmi_get_protocol_priv(const struct scmi_protocol_handle *ph)
1653
{
1654
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1655
1656
return pi->priv;
1657
}
1658
1659
static const struct scmi_xfer_ops xfer_ops = {
1660
.version_get = version_get,
1661
.xfer_get_init = xfer_get_init,
1662
.reset_rx_to_maxsz = reset_rx_to_maxsz,
1663
.do_xfer = do_xfer,
1664
.do_xfer_with_response = do_xfer_with_response,
1665
.xfer_put = xfer_put,
1666
};
1667
1668
struct scmi_msg_resp_domain_name_get {
1669
__le32 flags;
1670
u8 name[SCMI_MAX_STR_SIZE];
1671
};
1672
1673
/**
1674
* scmi_common_extended_name_get - Common helper to get extended resources name
1675
* @ph: A protocol handle reference.
1676
* @cmd_id: The specific command ID to use.
1677
* @res_id: The specific resource ID to use.
1678
* @flags: A pointer to specific flags to use, if any.
1679
* @name: A pointer to the preallocated area where the retrieved name will be
1680
* stored as a NULL terminated string.
1681
* @len: The len in bytes of the @name char array.
1682
*
1683
* Return: 0 on Succcess
1684
*/
1685
static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
1686
u8 cmd_id, u32 res_id, u32 *flags,
1687
char *name, size_t len)
1688
{
1689
int ret;
1690
size_t txlen;
1691
struct scmi_xfer *t;
1692
struct scmi_msg_resp_domain_name_get *resp;
1693
1694
txlen = !flags ? sizeof(res_id) : sizeof(res_id) + sizeof(*flags);
1695
ret = ph->xops->xfer_get_init(ph, cmd_id, txlen, sizeof(*resp), &t);
1696
if (ret)
1697
goto out;
1698
1699
put_unaligned_le32(res_id, t->tx.buf);
1700
if (flags)
1701
put_unaligned_le32(*flags, t->tx.buf + sizeof(res_id));
1702
resp = t->rx.buf;
1703
1704
ret = ph->xops->do_xfer(ph, t);
1705
if (!ret)
1706
strscpy(name, resp->name, len);
1707
1708
ph->xops->xfer_put(ph, t);
1709
out:
1710
if (ret)
1711
dev_warn(ph->dev,
1712
"Failed to get extended name - id:%u (ret:%d). Using %s\n",
1713
res_id, ret, name);
1714
return ret;
1715
}
1716
1717
/**
1718
* scmi_common_get_max_msg_size - Get maximum message size
1719
* @ph: A protocol handle reference.
1720
*
1721
* Return: Maximum message size for the current protocol.
1722
*/
1723
static int scmi_common_get_max_msg_size(const struct scmi_protocol_handle *ph)
1724
{
1725
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1726
struct scmi_info *info = handle_to_scmi_info(pi->handle);
1727
1728
return info->desc->max_msg_size;
1729
}
1730
1731
/**
1732
* scmi_protocol_msg_check - Check protocol message attributes
1733
*
1734
* @ph: A reference to the protocol handle.
1735
* @message_id: The ID of the message to check.
1736
* @attributes: A parameter to optionally return the retrieved message
1737
* attributes, in case of Success.
1738
*
1739
* An helper to check protocol message attributes for a specific protocol
1740
* and message pair.
1741
*
1742
* Return: 0 on SUCCESS
1743
*/
1744
static int scmi_protocol_msg_check(const struct scmi_protocol_handle *ph,
1745
u32 message_id, u32 *attributes)
1746
{
1747
int ret;
1748
struct scmi_xfer *t;
1749
1750
ret = xfer_get_init(ph, PROTOCOL_MESSAGE_ATTRIBUTES,
1751
sizeof(__le32), 0, &t);
1752
if (ret)
1753
return ret;
1754
1755
put_unaligned_le32(message_id, t->tx.buf);
1756
ret = do_xfer(ph, t);
1757
if (!ret && attributes)
1758
*attributes = get_unaligned_le32(t->rx.buf);
1759
xfer_put(ph, t);
1760
1761
return ret;
1762
}
1763
1764
/**
1765
* struct scmi_iterator - Iterator descriptor
1766
* @msg: A reference to the message TX buffer; filled by @prepare_message with
1767
* a proper custom command payload for each multi-part command request.
1768
* @resp: A reference to the response RX buffer; used by @update_state and
1769
* @process_response to parse the multi-part replies.
1770
* @t: A reference to the underlying xfer initialized and used transparently by
1771
* the iterator internal routines.
1772
* @ph: A reference to the associated protocol handle to be used.
1773
* @ops: A reference to the custom provided iterator operations.
1774
* @state: The current iterator state; used and updated in turn by the iterators
1775
* internal routines and by the caller-provided @scmi_iterator_ops.
1776
* @priv: A reference to optional private data as provided by the caller and
1777
* passed back to the @@scmi_iterator_ops.
1778
*/
1779
struct scmi_iterator {
1780
void *msg;
1781
void *resp;
1782
struct scmi_xfer *t;
1783
const struct scmi_protocol_handle *ph;
1784
struct scmi_iterator_ops *ops;
1785
struct scmi_iterator_state state;
1786
void *priv;
1787
};
1788
1789
static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
1790
struct scmi_iterator_ops *ops,
1791
unsigned int max_resources, u8 msg_id,
1792
size_t tx_size, void *priv)
1793
{
1794
int ret;
1795
struct scmi_iterator *i;
1796
1797
i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
1798
if (!i)
1799
return ERR_PTR(-ENOMEM);
1800
1801
i->ph = ph;
1802
i->ops = ops;
1803
i->priv = priv;
1804
1805
ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
1806
if (ret) {
1807
devm_kfree(ph->dev, i);
1808
return ERR_PTR(ret);
1809
}
1810
1811
i->state.max_resources = max_resources;
1812
i->msg = i->t->tx.buf;
1813
i->resp = i->t->rx.buf;
1814
1815
return i;
1816
}
1817
1818
static int scmi_iterator_run(void *iter)
1819
{
1820
int ret = -EINVAL;
1821
struct scmi_iterator_ops *iops;
1822
const struct scmi_protocol_handle *ph;
1823
struct scmi_iterator_state *st;
1824
struct scmi_iterator *i = iter;
1825
1826
if (!i || !i->ops || !i->ph)
1827
return ret;
1828
1829
iops = i->ops;
1830
ph = i->ph;
1831
st = &i->state;
1832
1833
do {
1834
iops->prepare_message(i->msg, st->desc_index, i->priv);
1835
ret = ph->xops->do_xfer(ph, i->t);
1836
if (ret)
1837
break;
1838
1839
st->rx_len = i->t->rx.len;
1840
ret = iops->update_state(st, i->resp, i->priv);
1841
if (ret)
1842
break;
1843
1844
if (st->num_returned > st->max_resources - st->desc_index) {
1845
dev_err(ph->dev,
1846
"No. of resources can't exceed %d\n",
1847
st->max_resources);
1848
ret = -EINVAL;
1849
break;
1850
}
1851
1852
for (st->loop_idx = 0; st->loop_idx < st->num_returned;
1853
st->loop_idx++) {
1854
ret = iops->process_response(ph, i->resp, st, i->priv);
1855
if (ret)
1856
goto out;
1857
}
1858
1859
st->desc_index += st->num_returned;
1860
ph->xops->reset_rx_to_maxsz(ph, i->t);
1861
/*
1862
* check for both returned and remaining to avoid infinite
1863
* loop due to buggy firmware
1864
*/
1865
} while (st->num_returned && st->num_remaining);
1866
1867
out:
1868
/* Finalize and destroy iterator */
1869
ph->xops->xfer_put(ph, i->t);
1870
devm_kfree(ph->dev, i);
1871
1872
return ret;
1873
}
1874
1875
struct scmi_msg_get_fc_info {
1876
__le32 domain;
1877
__le32 message_id;
1878
};
1879
1880
struct scmi_msg_resp_desc_fc {
1881
__le32 attr;
1882
#define SUPPORTS_DOORBELL(x) ((x) & BIT(0))
1883
#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x))
1884
__le32 rate_limit;
1885
__le32 chan_addr_low;
1886
__le32 chan_addr_high;
1887
__le32 chan_size;
1888
__le32 db_addr_low;
1889
__le32 db_addr_high;
1890
__le32 db_set_lmask;
1891
__le32 db_set_hmask;
1892
__le32 db_preserve_lmask;
1893
__le32 db_preserve_hmask;
1894
};
1895
1896
#define QUIRK_PERF_FC_FORCE \
1897
({ \
1898
if (pi->proto->id == SCMI_PROTOCOL_PERF && \
1899
message_id == 0x8 /* PERF_LEVEL_GET */) \
1900
attributes |= BIT(0); \
1901
})
1902
1903
static void
1904
scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph,
1905
u8 describe_id, u32 message_id, u32 valid_size,
1906
u32 domain, void __iomem **p_addr,
1907
struct scmi_fc_db_info **p_db, u32 *rate_limit)
1908
{
1909
int ret;
1910
u32 flags;
1911
u64 phys_addr;
1912
u32 attributes;
1913
u8 size;
1914
void __iomem *addr;
1915
struct scmi_xfer *t;
1916
struct scmi_fc_db_info *db = NULL;
1917
struct scmi_msg_get_fc_info *info;
1918
struct scmi_msg_resp_desc_fc *resp;
1919
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
1920
1921
/* Check if the MSG_ID supports fastchannel */
1922
ret = scmi_protocol_msg_check(ph, message_id, &attributes);
1923
SCMI_QUIRK(perf_level_get_fc_force, QUIRK_PERF_FC_FORCE);
1924
if (ret || !MSG_SUPPORTS_FASTCHANNEL(attributes)) {
1925
dev_dbg(ph->dev,
1926
"Skip FC init for 0x%02X/%d domain:%d - ret:%d\n",
1927
pi->proto->id, message_id, domain, ret);
1928
return;
1929
}
1930
1931
if (!p_addr) {
1932
ret = -EINVAL;
1933
goto err_out;
1934
}
1935
1936
ret = ph->xops->xfer_get_init(ph, describe_id,
1937
sizeof(*info), sizeof(*resp), &t);
1938
if (ret)
1939
goto err_out;
1940
1941
info = t->tx.buf;
1942
info->domain = cpu_to_le32(domain);
1943
info->message_id = cpu_to_le32(message_id);
1944
1945
/*
1946
* Bail out on error leaving fc_info addresses zeroed; this includes
1947
* the case in which the requested domain/message_id does NOT support
1948
* fastchannels at all.
1949
*/
1950
ret = ph->xops->do_xfer(ph, t);
1951
if (ret)
1952
goto err_xfer;
1953
1954
resp = t->rx.buf;
1955
flags = le32_to_cpu(resp->attr);
1956
size = le32_to_cpu(resp->chan_size);
1957
if (size != valid_size) {
1958
ret = -EINVAL;
1959
goto err_xfer;
1960
}
1961
1962
if (rate_limit)
1963
*rate_limit = le32_to_cpu(resp->rate_limit) & GENMASK(19, 0);
1964
1965
phys_addr = le32_to_cpu(resp->chan_addr_low);
1966
phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32;
1967
addr = devm_ioremap(ph->dev, phys_addr, size);
1968
if (!addr) {
1969
ret = -EADDRNOTAVAIL;
1970
goto err_xfer;
1971
}
1972
1973
*p_addr = addr;
1974
1975
if (p_db && SUPPORTS_DOORBELL(flags)) {
1976
db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL);
1977
if (!db) {
1978
ret = -ENOMEM;
1979
goto err_db;
1980
}
1981
1982
size = 1 << DOORBELL_REG_WIDTH(flags);
1983
phys_addr = le32_to_cpu(resp->db_addr_low);
1984
phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32;
1985
addr = devm_ioremap(ph->dev, phys_addr, size);
1986
if (!addr) {
1987
ret = -EADDRNOTAVAIL;
1988
goto err_db_mem;
1989
}
1990
1991
db->addr = addr;
1992
db->width = size;
1993
db->set = le32_to_cpu(resp->db_set_lmask);
1994
db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32;
1995
db->mask = le32_to_cpu(resp->db_preserve_lmask);
1996
db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32;
1997
1998
*p_db = db;
1999
}
2000
2001
ph->xops->xfer_put(ph, t);
2002
2003
dev_dbg(ph->dev,
2004
"Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n",
2005
pi->proto->id, message_id, domain);
2006
2007
return;
2008
2009
err_db_mem:
2010
devm_kfree(ph->dev, db);
2011
2012
err_db:
2013
*p_addr = NULL;
2014
2015
err_xfer:
2016
ph->xops->xfer_put(ph, t);
2017
2018
err_out:
2019
dev_warn(ph->dev,
2020
"Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n",
2021
pi->proto->id, message_id, domain, ret);
2022
}
2023
2024
#define SCMI_PROTO_FC_RING_DB(w) \
2025
do { \
2026
u##w val = 0; \
2027
\
2028
if (db->mask) \
2029
val = ioread##w(db->addr) & db->mask; \
2030
iowrite##w((u##w)db->set | val, db->addr); \
2031
} while (0)
2032
2033
static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db)
2034
{
2035
if (!db || !db->addr)
2036
return;
2037
2038
if (db->width == 1)
2039
SCMI_PROTO_FC_RING_DB(8);
2040
else if (db->width == 2)
2041
SCMI_PROTO_FC_RING_DB(16);
2042
else if (db->width == 4)
2043
SCMI_PROTO_FC_RING_DB(32);
2044
else /* db->width == 8 */
2045
SCMI_PROTO_FC_RING_DB(64);
2046
}
2047
2048
static const struct scmi_proto_helpers_ops helpers_ops = {
2049
.extended_name_get = scmi_common_extended_name_get,
2050
.get_max_msg_size = scmi_common_get_max_msg_size,
2051
.iter_response_init = scmi_iterator_init,
2052
.iter_response_run = scmi_iterator_run,
2053
.protocol_msg_check = scmi_protocol_msg_check,
2054
.fastchannel_init = scmi_common_fastchannel_init,
2055
.fastchannel_db_ring = scmi_common_fastchannel_db_ring,
2056
};
2057
2058
/**
2059
* scmi_revision_area_get - Retrieve version memory area.
2060
*
2061
* @ph: A reference to the protocol handle.
2062
*
2063
* A helper to grab the version memory area reference during SCMI Base protocol
2064
* initialization.
2065
*
2066
* Return: A reference to the version memory area associated to the SCMI
2067
* instance underlying this protocol handle.
2068
*/
2069
struct scmi_revision_info *
2070
scmi_revision_area_get(const struct scmi_protocol_handle *ph)
2071
{
2072
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2073
2074
return pi->handle->version;
2075
}
2076
2077
/**
2078
* scmi_protocol_version_negotiate - Negotiate protocol version
2079
*
2080
* @ph: A reference to the protocol handle.
2081
*
2082
* An helper to negotiate a protocol version different from the latest
2083
* advertised as supported from the platform: on Success backward
2084
* compatibility is assured by the platform.
2085
*
2086
* Return: 0 on Success
2087
*/
2088
static int scmi_protocol_version_negotiate(struct scmi_protocol_handle *ph)
2089
{
2090
int ret;
2091
struct scmi_xfer *t;
2092
struct scmi_protocol_instance *pi = ph_to_pi(ph);
2093
2094
/* At first check if NEGOTIATE_PROTOCOL_VERSION is supported ... */
2095
ret = scmi_protocol_msg_check(ph, NEGOTIATE_PROTOCOL_VERSION, NULL);
2096
if (ret)
2097
return ret;
2098
2099
/* ... then attempt protocol version negotiation */
2100
ret = xfer_get_init(ph, NEGOTIATE_PROTOCOL_VERSION,
2101
sizeof(__le32), 0, &t);
2102
if (ret)
2103
return ret;
2104
2105
put_unaligned_le32(pi->proto->supported_version, t->tx.buf);
2106
ret = do_xfer(ph, t);
2107
if (!ret)
2108
pi->negotiated_version = pi->proto->supported_version;
2109
2110
xfer_put(ph, t);
2111
2112
return ret;
2113
}
2114
2115
/**
2116
* scmi_alloc_init_protocol_instance - Allocate and initialize a protocol
2117
* instance descriptor.
2118
* @info: The reference to the related SCMI instance.
2119
* @proto: The protocol descriptor.
2120
*
2121
* Allocate a new protocol instance descriptor, using the provided @proto
2122
* description, against the specified SCMI instance @info, and initialize it;
2123
* all resources management is handled via a dedicated per-protocol devres
2124
* group.
2125
*
2126
* Context: Assumes to be called with @protocols_mtx already acquired.
2127
* Return: A reference to a freshly allocated and initialized protocol instance
2128
* or ERR_PTR on failure. On failure the @proto reference is at first
2129
* put using @scmi_protocol_put() before releasing all the devres group.
2130
*/
2131
static struct scmi_protocol_instance *
2132
scmi_alloc_init_protocol_instance(struct scmi_info *info,
2133
const struct scmi_protocol *proto)
2134
{
2135
int ret = -ENOMEM;
2136
void *gid;
2137
struct scmi_protocol_instance *pi;
2138
const struct scmi_handle *handle = &info->handle;
2139
2140
/* Protocol specific devres group */
2141
gid = devres_open_group(handle->dev, NULL, GFP_KERNEL);
2142
if (!gid) {
2143
scmi_protocol_put(proto);
2144
goto out;
2145
}
2146
2147
pi = devm_kzalloc(handle->dev, sizeof(*pi), GFP_KERNEL);
2148
if (!pi)
2149
goto clean;
2150
2151
pi->gid = gid;
2152
pi->proto = proto;
2153
pi->handle = handle;
2154
pi->ph.dev = handle->dev;
2155
pi->ph.xops = &xfer_ops;
2156
pi->ph.hops = &helpers_ops;
2157
pi->ph.set_priv = scmi_set_protocol_priv;
2158
pi->ph.get_priv = scmi_get_protocol_priv;
2159
refcount_set(&pi->users, 1);
2160
/* proto->init is assured NON NULL by scmi_protocol_register */
2161
ret = pi->proto->instance_init(&pi->ph);
2162
if (ret)
2163
goto clean;
2164
2165
ret = idr_alloc(&info->protocols, pi, proto->id, proto->id + 1,
2166
GFP_KERNEL);
2167
if (ret != proto->id)
2168
goto clean;
2169
2170
/*
2171
* Warn but ignore events registration errors since we do not want
2172
* to skip whole protocols if their notifications are messed up.
2173
*/
2174
if (pi->proto->events) {
2175
ret = scmi_register_protocol_events(handle, pi->proto->id,
2176
&pi->ph,
2177
pi->proto->events);
2178
if (ret)
2179
dev_warn(handle->dev,
2180
"Protocol:%X - Events Registration Failed - err:%d\n",
2181
pi->proto->id, ret);
2182
}
2183
2184
devres_close_group(handle->dev, pi->gid);
2185
dev_dbg(handle->dev, "Initialized protocol: 0x%X\n", pi->proto->id);
2186
2187
if (pi->version > proto->supported_version) {
2188
ret = scmi_protocol_version_negotiate(&pi->ph);
2189
if (!ret) {
2190
dev_info(handle->dev,
2191
"Protocol 0x%X successfully negotiated version 0x%X\n",
2192
proto->id, pi->negotiated_version);
2193
} else {
2194
dev_warn(handle->dev,
2195
"Detected UNSUPPORTED higher version 0x%X for protocol 0x%X.\n",
2196
pi->version, pi->proto->id);
2197
dev_warn(handle->dev,
2198
"Trying version 0x%X. Backward compatibility is NOT assured.\n",
2199
pi->proto->supported_version);
2200
}
2201
}
2202
2203
return pi;
2204
2205
clean:
2206
/* Take care to put the protocol module's owner before releasing all */
2207
scmi_protocol_put(proto);
2208
devres_release_group(handle->dev, gid);
2209
out:
2210
return ERR_PTR(ret);
2211
}
2212
2213
/**
2214
* scmi_get_protocol_instance - Protocol initialization helper.
2215
* @handle: A reference to the SCMI platform instance.
2216
* @protocol_id: The protocol being requested.
2217
*
2218
* In case the required protocol has never been requested before for this
2219
* instance, allocate and initialize all the needed structures while handling
2220
* resource allocation with a dedicated per-protocol devres subgroup.
2221
*
2222
* Return: A reference to an initialized protocol instance or error on failure:
2223
* in particular returns -EPROBE_DEFER when the desired protocol could
2224
* NOT be found.
2225
*/
2226
static struct scmi_protocol_instance * __must_check
2227
scmi_get_protocol_instance(const struct scmi_handle *handle, u8 protocol_id)
2228
{
2229
struct scmi_protocol_instance *pi;
2230
struct scmi_info *info = handle_to_scmi_info(handle);
2231
2232
mutex_lock(&info->protocols_mtx);
2233
pi = idr_find(&info->protocols, protocol_id);
2234
2235
if (pi) {
2236
refcount_inc(&pi->users);
2237
} else {
2238
const struct scmi_protocol *proto;
2239
2240
/* Fails if protocol not registered on bus */
2241
proto = scmi_protocol_get(protocol_id, &info->version);
2242
if (proto)
2243
pi = scmi_alloc_init_protocol_instance(info, proto);
2244
else
2245
pi = ERR_PTR(-EPROBE_DEFER);
2246
}
2247
mutex_unlock(&info->protocols_mtx);
2248
2249
return pi;
2250
}
2251
2252
/**
2253
* scmi_protocol_acquire - Protocol acquire
2254
* @handle: A reference to the SCMI platform instance.
2255
* @protocol_id: The protocol being requested.
2256
*
2257
* Register a new user for the requested protocol on the specified SCMI
2258
* platform instance, possibly triggering its initialization on first user.
2259
*
2260
* Return: 0 if protocol was acquired successfully.
2261
*/
2262
int scmi_protocol_acquire(const struct scmi_handle *handle, u8 protocol_id)
2263
{
2264
return PTR_ERR_OR_ZERO(scmi_get_protocol_instance(handle, protocol_id));
2265
}
2266
2267
/**
2268
* scmi_protocol_release - Protocol de-initialization helper.
2269
* @handle: A reference to the SCMI platform instance.
2270
* @protocol_id: The protocol being requested.
2271
*
2272
* Remove one user for the specified protocol and triggers de-initialization
2273
* and resources de-allocation once the last user has gone.
2274
*/
2275
void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id)
2276
{
2277
struct scmi_info *info = handle_to_scmi_info(handle);
2278
struct scmi_protocol_instance *pi;
2279
2280
mutex_lock(&info->protocols_mtx);
2281
pi = idr_find(&info->protocols, protocol_id);
2282
if (WARN_ON(!pi))
2283
goto out;
2284
2285
if (refcount_dec_and_test(&pi->users)) {
2286
void *gid = pi->gid;
2287
2288
if (pi->proto->events)
2289
scmi_deregister_protocol_events(handle, protocol_id);
2290
2291
if (pi->proto->instance_deinit)
2292
pi->proto->instance_deinit(&pi->ph);
2293
2294
idr_remove(&info->protocols, protocol_id);
2295
2296
scmi_protocol_put(pi->proto);
2297
2298
devres_release_group(handle->dev, gid);
2299
dev_dbg(handle->dev, "De-Initialized protocol: 0x%X\n",
2300
protocol_id);
2301
}
2302
2303
out:
2304
mutex_unlock(&info->protocols_mtx);
2305
}
2306
2307
void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
2308
u8 *prot_imp)
2309
{
2310
const struct scmi_protocol_instance *pi = ph_to_pi(ph);
2311
struct scmi_info *info = handle_to_scmi_info(pi->handle);
2312
2313
info->protocols_imp = prot_imp;
2314
}
2315
2316
static bool
2317
scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
2318
{
2319
int i;
2320
struct scmi_info *info = handle_to_scmi_info(handle);
2321
struct scmi_revision_info *rev = handle->version;
2322
2323
if (!info->protocols_imp)
2324
return false;
2325
2326
for (i = 0; i < rev->num_protocols; i++)
2327
if (info->protocols_imp[i] == prot_id)
2328
return true;
2329
return false;
2330
}
2331
2332
struct scmi_protocol_devres {
2333
const struct scmi_handle *handle;
2334
u8 protocol_id;
2335
};
2336
2337
static void scmi_devm_release_protocol(struct device *dev, void *res)
2338
{
2339
struct scmi_protocol_devres *dres = res;
2340
2341
scmi_protocol_release(dres->handle, dres->protocol_id);
2342
}
2343
2344
static struct scmi_protocol_instance __must_check *
2345
scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id)
2346
{
2347
struct scmi_protocol_instance *pi;
2348
struct scmi_protocol_devres *dres;
2349
2350
dres = devres_alloc(scmi_devm_release_protocol,
2351
sizeof(*dres), GFP_KERNEL);
2352
if (!dres)
2353
return ERR_PTR(-ENOMEM);
2354
2355
pi = scmi_get_protocol_instance(sdev->handle, protocol_id);
2356
if (IS_ERR(pi)) {
2357
devres_free(dres);
2358
return pi;
2359
}
2360
2361
dres->handle = sdev->handle;
2362
dres->protocol_id = protocol_id;
2363
devres_add(&sdev->dev, dres);
2364
2365
return pi;
2366
}
2367
2368
/**
2369
* scmi_devm_protocol_get - Devres managed get protocol operations and handle
2370
* @sdev: A reference to an scmi_device whose embedded struct device is to
2371
* be used for devres accounting.
2372
* @protocol_id: The protocol being requested.
2373
* @ph: A pointer reference used to pass back the associated protocol handle.
2374
*
2375
* Get hold of a protocol accounting for its usage, eventually triggering its
2376
* initialization, and returning the protocol specific operations and related
2377
* protocol handle which will be used as first argument in most of the
2378
* protocols operations methods.
2379
* Being a devres based managed method, protocol hold will be automatically
2380
* released, and possibly de-initialized on last user, once the SCMI driver
2381
* owning the scmi_device is unbound from it.
2382
*
2383
* Return: A reference to the requested protocol operations or error.
2384
* Must be checked for errors by caller.
2385
*/
2386
static const void __must_check *
2387
scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id,
2388
struct scmi_protocol_handle **ph)
2389
{
2390
struct scmi_protocol_instance *pi;
2391
2392
if (!ph)
2393
return ERR_PTR(-EINVAL);
2394
2395
pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2396
if (IS_ERR(pi))
2397
return pi;
2398
2399
*ph = &pi->ph;
2400
2401
return pi->proto->ops;
2402
}
2403
2404
/**
2405
* scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol
2406
* @sdev: A reference to an scmi_device whose embedded struct device is to
2407
* be used for devres accounting.
2408
* @protocol_id: The protocol being requested.
2409
*
2410
* Get hold of a protocol accounting for its usage, possibly triggering its
2411
* initialization but without getting access to its protocol specific operations
2412
* and handle.
2413
*
2414
* Being a devres based managed method, protocol hold will be automatically
2415
* released, and possibly de-initialized on last user, once the SCMI driver
2416
* owning the scmi_device is unbound from it.
2417
*
2418
* Return: 0 on SUCCESS
2419
*/
2420
static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev,
2421
u8 protocol_id)
2422
{
2423
struct scmi_protocol_instance *pi;
2424
2425
pi = scmi_devres_protocol_instance_get(sdev, protocol_id);
2426
if (IS_ERR(pi))
2427
return PTR_ERR(pi);
2428
2429
return 0;
2430
}
2431
2432
static int scmi_devm_protocol_match(struct device *dev, void *res, void *data)
2433
{
2434
struct scmi_protocol_devres *dres = res;
2435
2436
if (WARN_ON(!dres || !data))
2437
return 0;
2438
2439
return dres->protocol_id == *((u8 *)data);
2440
}
2441
2442
/**
2443
* scmi_devm_protocol_put - Devres managed put protocol operations and handle
2444
* @sdev: A reference to an scmi_device whose embedded struct device is to
2445
* be used for devres accounting.
2446
* @protocol_id: The protocol being requested.
2447
*
2448
* Explicitly release a protocol hold previously obtained calling the above
2449
* @scmi_devm_protocol_get.
2450
*/
2451
static void scmi_devm_protocol_put(struct scmi_device *sdev, u8 protocol_id)
2452
{
2453
int ret;
2454
2455
ret = devres_release(&sdev->dev, scmi_devm_release_protocol,
2456
scmi_devm_protocol_match, &protocol_id);
2457
WARN_ON(ret);
2458
}
2459
2460
/**
2461
* scmi_is_transport_atomic - Method to check if underlying transport for an
2462
* SCMI instance is configured as atomic.
2463
*
2464
* @handle: A reference to the SCMI platform instance.
2465
* @atomic_threshold: An optional return value for the system wide currently
2466
* configured threshold for atomic operations.
2467
*
2468
* Return: True if transport is configured as atomic
2469
*/
2470
static bool scmi_is_transport_atomic(const struct scmi_handle *handle,
2471
unsigned int *atomic_threshold)
2472
{
2473
bool ret;
2474
struct scmi_info *info = handle_to_scmi_info(handle);
2475
2476
ret = info->desc->atomic_enabled &&
2477
is_transport_polling_capable(info->desc);
2478
if (ret && atomic_threshold)
2479
*atomic_threshold = info->desc->atomic_threshold;
2480
2481
return ret;
2482
}
2483
2484
/**
2485
* scmi_handle_get() - Get the SCMI handle for a device
2486
*
2487
* @dev: pointer to device for which we want SCMI handle
2488
*
2489
* NOTE: The function does not track individual clients of the framework
2490
* and is expected to be maintained by caller of SCMI protocol library.
2491
* scmi_handle_put must be balanced with successful scmi_handle_get
2492
*
2493
* Return: pointer to handle if successful, NULL on error
2494
*/
2495
static struct scmi_handle *scmi_handle_get(struct device *dev)
2496
{
2497
struct list_head *p;
2498
struct scmi_info *info;
2499
struct scmi_handle *handle = NULL;
2500
2501
mutex_lock(&scmi_list_mutex);
2502
list_for_each(p, &scmi_list) {
2503
info = list_entry(p, struct scmi_info, node);
2504
if (dev->parent == info->dev) {
2505
info->users++;
2506
handle = &info->handle;
2507
break;
2508
}
2509
}
2510
mutex_unlock(&scmi_list_mutex);
2511
2512
return handle;
2513
}
2514
2515
/**
2516
* scmi_handle_put() - Release the handle acquired by scmi_handle_get
2517
*
2518
* @handle: handle acquired by scmi_handle_get
2519
*
2520
* NOTE: The function does not track individual clients of the framework
2521
* and is expected to be maintained by caller of SCMI protocol library.
2522
* scmi_handle_put must be balanced with successful scmi_handle_get
2523
*
2524
* Return: 0 is successfully released
2525
* if null was passed, it returns -EINVAL;
2526
*/
2527
static int scmi_handle_put(const struct scmi_handle *handle)
2528
{
2529
struct scmi_info *info;
2530
2531
if (!handle)
2532
return -EINVAL;
2533
2534
info = handle_to_scmi_info(handle);
2535
mutex_lock(&scmi_list_mutex);
2536
if (!WARN_ON(!info->users))
2537
info->users--;
2538
mutex_unlock(&scmi_list_mutex);
2539
2540
return 0;
2541
}
2542
2543
static void scmi_device_link_add(struct device *consumer,
2544
struct device *supplier)
2545
{
2546
struct device_link *link;
2547
2548
link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER);
2549
2550
WARN_ON(!link);
2551
}
2552
2553
static void scmi_set_handle(struct scmi_device *scmi_dev)
2554
{
2555
scmi_dev->handle = scmi_handle_get(&scmi_dev->dev);
2556
if (scmi_dev->handle)
2557
scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev);
2558
}
2559
2560
static int __scmi_xfer_info_init(struct scmi_info *sinfo,
2561
struct scmi_xfers_info *info)
2562
{
2563
int i;
2564
struct scmi_xfer *xfer;
2565
struct device *dev = sinfo->dev;
2566
const struct scmi_desc *desc = sinfo->desc;
2567
2568
/* Pre-allocated messages, no more than what hdr.seq can support */
2569
if (WARN_ON(!info->max_msg || info->max_msg > MSG_TOKEN_MAX)) {
2570
dev_err(dev,
2571
"Invalid maximum messages %d, not in range [1 - %lu]\n",
2572
info->max_msg, MSG_TOKEN_MAX);
2573
return -EINVAL;
2574
}
2575
2576
hash_init(info->pending_xfers);
2577
2578
/* Allocate a bitmask sized to hold MSG_TOKEN_MAX tokens */
2579
info->xfer_alloc_table = devm_bitmap_zalloc(dev, MSG_TOKEN_MAX,
2580
GFP_KERNEL);
2581
if (!info->xfer_alloc_table)
2582
return -ENOMEM;
2583
2584
/*
2585
* Preallocate a number of xfers equal to max inflight messages,
2586
* pre-initialize the buffer pointer to pre-allocated buffers and
2587
* attach all of them to the free list
2588
*/
2589
INIT_HLIST_HEAD(&info->free_xfers);
2590
for (i = 0; i < info->max_msg; i++) {
2591
xfer = devm_kzalloc(dev, sizeof(*xfer), GFP_KERNEL);
2592
if (!xfer)
2593
return -ENOMEM;
2594
2595
xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
2596
GFP_KERNEL);
2597
if (!xfer->rx.buf)
2598
return -ENOMEM;
2599
2600
xfer->tx.buf = xfer->rx.buf;
2601
init_completion(&xfer->done);
2602
spin_lock_init(&xfer->lock);
2603
2604
/* Add initialized xfer to the free list */
2605
hlist_add_head(&xfer->node, &info->free_xfers);
2606
}
2607
2608
spin_lock_init(&info->xfer_lock);
2609
2610
return 0;
2611
}
2612
2613
static int scmi_channels_max_msg_configure(struct scmi_info *sinfo)
2614
{
2615
const struct scmi_desc *desc = sinfo->desc;
2616
2617
if (!desc->ops->get_max_msg) {
2618
sinfo->tx_minfo.max_msg = desc->max_msg;
2619
sinfo->rx_minfo.max_msg = desc->max_msg;
2620
} else {
2621
struct scmi_chan_info *base_cinfo;
2622
2623
base_cinfo = idr_find(&sinfo->tx_idr, SCMI_PROTOCOL_BASE);
2624
if (!base_cinfo)
2625
return -EINVAL;
2626
sinfo->tx_minfo.max_msg = desc->ops->get_max_msg(base_cinfo);
2627
2628
/* RX channel is optional so can be skipped */
2629
base_cinfo = idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE);
2630
if (base_cinfo)
2631
sinfo->rx_minfo.max_msg =
2632
desc->ops->get_max_msg(base_cinfo);
2633
}
2634
2635
return 0;
2636
}
2637
2638
static int scmi_xfer_info_init(struct scmi_info *sinfo)
2639
{
2640
int ret;
2641
2642
ret = scmi_channels_max_msg_configure(sinfo);
2643
if (ret)
2644
return ret;
2645
2646
ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
2647
if (!ret && !idr_is_empty(&sinfo->rx_idr))
2648
ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
2649
2650
return ret;
2651
}
2652
2653
static int scmi_chan_setup(struct scmi_info *info, struct device_node *of_node,
2654
int prot_id, bool tx)
2655
{
2656
int ret, idx;
2657
char name[32];
2658
struct scmi_chan_info *cinfo;
2659
struct idr *idr;
2660
struct scmi_device *tdev = NULL;
2661
2662
/* Transmit channel is first entry i.e. index 0 */
2663
idx = tx ? 0 : 1;
2664
idr = tx ? &info->tx_idr : &info->rx_idr;
2665
2666
if (!info->desc->ops->chan_available(of_node, idx)) {
2667
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
2668
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
2669
return -EINVAL;
2670
goto idr_alloc;
2671
}
2672
2673
cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
2674
if (!cinfo)
2675
return -ENOMEM;
2676
2677
cinfo->is_p2a = !tx;
2678
cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms;
2679
cinfo->max_msg_size = info->desc->max_msg_size;
2680
2681
/* Create a unique name for this transport device */
2682
snprintf(name, 32, "__scmi_transport_device_%s_%02X",
2683
idx ? "rx" : "tx", prot_id);
2684
/* Create a uniquely named, dedicated transport device for this chan */
2685
tdev = scmi_device_create(of_node, info->dev, prot_id, name);
2686
if (!tdev) {
2687
dev_err(info->dev,
2688
"failed to create transport device (%s)\n", name);
2689
devm_kfree(info->dev, cinfo);
2690
return -EINVAL;
2691
}
2692
of_node_get(of_node);
2693
2694
cinfo->id = prot_id;
2695
cinfo->dev = &tdev->dev;
2696
ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
2697
if (ret) {
2698
of_node_put(of_node);
2699
scmi_device_destroy(info->dev, prot_id, name);
2700
devm_kfree(info->dev, cinfo);
2701
return ret;
2702
}
2703
2704
if (tx && is_polling_required(cinfo, info->desc)) {
2705
if (is_transport_polling_capable(info->desc))
2706
dev_info(&tdev->dev,
2707
"Enabled polling mode TX channel - prot_id:%d\n",
2708
prot_id);
2709
else
2710
dev_warn(&tdev->dev,
2711
"Polling mode NOT supported by transport.\n");
2712
}
2713
2714
idr_alloc:
2715
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
2716
if (ret != prot_id) {
2717
dev_err(info->dev,
2718
"unable to allocate SCMI idr slot err %d\n", ret);
2719
/* Destroy channel and device only if created by this call. */
2720
if (tdev) {
2721
of_node_put(of_node);
2722
scmi_device_destroy(info->dev, prot_id, name);
2723
devm_kfree(info->dev, cinfo);
2724
}
2725
return ret;
2726
}
2727
2728
cinfo->handle = &info->handle;
2729
return 0;
2730
}
2731
2732
static inline int
2733
scmi_txrx_setup(struct scmi_info *info, struct device_node *of_node,
2734
int prot_id)
2735
{
2736
int ret = scmi_chan_setup(info, of_node, prot_id, true);
2737
2738
if (!ret) {
2739
/* Rx is optional, report only memory errors */
2740
ret = scmi_chan_setup(info, of_node, prot_id, false);
2741
if (ret && ret != -ENOMEM)
2742
ret = 0;
2743
}
2744
2745
if (ret)
2746
dev_err(info->dev,
2747
"failed to setup channel for protocol:0x%X\n", prot_id);
2748
2749
return ret;
2750
}
2751
2752
/**
2753
* scmi_channels_setup - Helper to initialize all required channels
2754
*
2755
* @info: The SCMI instance descriptor.
2756
*
2757
* Initialize all the channels found described in the DT against the underlying
2758
* configured transport using custom defined dedicated devices instead of
2759
* borrowing devices from the SCMI drivers; this way channels are initialized
2760
* upfront during core SCMI stack probing and are no more coupled with SCMI
2761
* devices used by SCMI drivers.
2762
*
2763
* Note that, even though a pair of TX/RX channels is associated to each
2764
* protocol defined in the DT, a distinct freshly initialized channel is
2765
* created only if the DT node for the protocol at hand describes a dedicated
2766
* channel: in all the other cases the common BASE protocol channel is reused.
2767
*
2768
* Return: 0 on Success
2769
*/
2770
static int scmi_channels_setup(struct scmi_info *info)
2771
{
2772
int ret;
2773
struct device_node *top_np = info->dev->of_node;
2774
2775
/* Initialize a common generic channel at first */
2776
ret = scmi_txrx_setup(info, top_np, SCMI_PROTOCOL_BASE);
2777
if (ret)
2778
return ret;
2779
2780
for_each_available_child_of_node_scoped(top_np, child) {
2781
u32 prot_id;
2782
2783
if (of_property_read_u32(child, "reg", &prot_id))
2784
continue;
2785
2786
if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
2787
dev_err(info->dev,
2788
"Out of range protocol %d\n", prot_id);
2789
2790
ret = scmi_txrx_setup(info, child, prot_id);
2791
if (ret)
2792
return ret;
2793
}
2794
2795
return 0;
2796
}
2797
2798
static int scmi_chan_destroy(int id, void *p, void *idr)
2799
{
2800
struct scmi_chan_info *cinfo = p;
2801
2802
if (cinfo->dev) {
2803
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
2804
struct scmi_device *sdev = to_scmi_dev(cinfo->dev);
2805
2806
of_node_put(cinfo->dev->of_node);
2807
scmi_device_destroy(info->dev, id, sdev->name);
2808
cinfo->dev = NULL;
2809
}
2810
2811
idr_remove(idr, id);
2812
2813
return 0;
2814
}
2815
2816
static void scmi_cleanup_channels(struct scmi_info *info, struct idr *idr)
2817
{
2818
/* At first free all channels at the transport layer ... */
2819
idr_for_each(idr, info->desc->ops->chan_free, idr);
2820
2821
/* ...then destroy all underlying devices */
2822
idr_for_each(idr, scmi_chan_destroy, idr);
2823
2824
idr_destroy(idr);
2825
}
2826
2827
static void scmi_cleanup_txrx_channels(struct scmi_info *info)
2828
{
2829
scmi_cleanup_channels(info, &info->tx_idr);
2830
2831
scmi_cleanup_channels(info, &info->rx_idr);
2832
}
2833
2834
static int scmi_bus_notifier(struct notifier_block *nb,
2835
unsigned long action, void *data)
2836
{
2837
struct scmi_info *info = bus_nb_to_scmi_info(nb);
2838
struct scmi_device *sdev = to_scmi_dev(data);
2839
2840
/* Skip devices of different SCMI instances */
2841
if (sdev->dev.parent != info->dev)
2842
return NOTIFY_DONE;
2843
2844
switch (action) {
2845
case BUS_NOTIFY_BIND_DRIVER:
2846
/* setup handle now as the transport is ready */
2847
scmi_set_handle(sdev);
2848
break;
2849
case BUS_NOTIFY_UNBOUND_DRIVER:
2850
scmi_handle_put(sdev->handle);
2851
sdev->handle = NULL;
2852
break;
2853
default:
2854
return NOTIFY_DONE;
2855
}
2856
2857
dev_dbg(info->dev, "Device %s (%s) is now %s\n", dev_name(&sdev->dev),
2858
sdev->name, action == BUS_NOTIFY_BIND_DRIVER ?
2859
"about to be BOUND." : "UNBOUND.");
2860
2861
return NOTIFY_OK;
2862
}
2863
2864
static int scmi_device_request_notifier(struct notifier_block *nb,
2865
unsigned long action, void *data)
2866
{
2867
struct device_node *np;
2868
struct scmi_device_id *id_table = data;
2869
struct scmi_info *info = req_nb_to_scmi_info(nb);
2870
2871
np = idr_find(&info->active_protocols, id_table->protocol_id);
2872
if (!np)
2873
return NOTIFY_DONE;
2874
2875
dev_dbg(info->dev, "%sRequested device (%s) for protocol 0x%x\n",
2876
action == SCMI_BUS_NOTIFY_DEVICE_REQUEST ? "" : "UN-",
2877
id_table->name, id_table->protocol_id);
2878
2879
switch (action) {
2880
case SCMI_BUS_NOTIFY_DEVICE_REQUEST:
2881
scmi_create_protocol_devices(np, info, id_table->protocol_id,
2882
id_table->name);
2883
break;
2884
case SCMI_BUS_NOTIFY_DEVICE_UNREQUEST:
2885
scmi_destroy_protocol_devices(info, id_table->protocol_id,
2886
id_table->name);
2887
break;
2888
default:
2889
return NOTIFY_DONE;
2890
}
2891
2892
return NOTIFY_OK;
2893
}
2894
2895
static const char * const dbg_counter_strs[] = {
2896
"sent_ok",
2897
"sent_fail",
2898
"sent_fail_polling_unsupported",
2899
"sent_fail_channel_not_found",
2900
"response_ok",
2901
"notification_ok",
2902
"delayed_response_ok",
2903
"xfers_response_timeout",
2904
"xfers_response_polled_timeout",
2905
"response_polled_ok",
2906
"err_msg_unexpected",
2907
"err_msg_invalid",
2908
"err_msg_nomem",
2909
"err_protocol",
2910
"xfers_inflight",
2911
};
2912
2913
static ssize_t reset_all_on_write(struct file *filp, const char __user *buf,
2914
size_t count, loff_t *ppos)
2915
{
2916
struct scmi_debug_info *dbg = filp->private_data;
2917
2918
for (int i = 0; i < SCMI_DEBUG_COUNTERS_LAST; i++)
2919
atomic_set(&dbg->counters[i], 0);
2920
2921
return count;
2922
}
2923
2924
static const struct file_operations fops_reset_counts = {
2925
.owner = THIS_MODULE,
2926
.open = simple_open,
2927
.write = reset_all_on_write,
2928
};
2929
2930
static void scmi_debugfs_counters_setup(struct scmi_debug_info *dbg,
2931
struct dentry *trans)
2932
{
2933
struct dentry *counters;
2934
int idx;
2935
2936
counters = debugfs_create_dir("counters", trans);
2937
2938
for (idx = 0; idx < SCMI_DEBUG_COUNTERS_LAST; idx++)
2939
debugfs_create_atomic_t(dbg_counter_strs[idx], 0600, counters,
2940
&dbg->counters[idx]);
2941
2942
debugfs_create_file("reset", 0200, counters, dbg, &fops_reset_counts);
2943
}
2944
2945
static void scmi_debugfs_common_cleanup(void *d)
2946
{
2947
struct scmi_debug_info *dbg = d;
2948
2949
if (!dbg)
2950
return;
2951
2952
debugfs_remove_recursive(dbg->top_dentry);
2953
kfree(dbg->name);
2954
kfree(dbg->type);
2955
}
2956
2957
static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
2958
{
2959
char top_dir[16];
2960
struct dentry *trans, *top_dentry;
2961
struct scmi_debug_info *dbg;
2962
const char *c_ptr = NULL;
2963
2964
dbg = devm_kzalloc(info->dev, sizeof(*dbg), GFP_KERNEL);
2965
if (!dbg)
2966
return NULL;
2967
2968
dbg->name = kstrdup(of_node_full_name(info->dev->of_node), GFP_KERNEL);
2969
if (!dbg->name) {
2970
devm_kfree(info->dev, dbg);
2971
return NULL;
2972
}
2973
2974
of_property_read_string(info->dev->of_node, "compatible", &c_ptr);
2975
dbg->type = kstrdup(c_ptr, GFP_KERNEL);
2976
if (!dbg->type) {
2977
kfree(dbg->name);
2978
devm_kfree(info->dev, dbg);
2979
return NULL;
2980
}
2981
2982
snprintf(top_dir, 16, "%d", info->id);
2983
top_dentry = debugfs_create_dir(top_dir, scmi_top_dentry);
2984
trans = debugfs_create_dir("transport", top_dentry);
2985
2986
dbg->is_atomic = info->desc->atomic_enabled &&
2987
is_transport_polling_capable(info->desc);
2988
2989
debugfs_create_str("instance_name", 0400, top_dentry,
2990
(char **)&dbg->name);
2991
2992
debugfs_create_u32("atomic_threshold_us", 0400, top_dentry,
2993
(u32 *)&info->desc->atomic_threshold);
2994
2995
debugfs_create_str("type", 0400, trans, (char **)&dbg->type);
2996
2997
debugfs_create_bool("is_atomic", 0400, trans, &dbg->is_atomic);
2998
2999
debugfs_create_u32("max_rx_timeout_ms", 0400, trans,
3000
(u32 *)&info->desc->max_rx_timeout_ms);
3001
3002
debugfs_create_u32("max_msg_size", 0400, trans,
3003
(u32 *)&info->desc->max_msg_size);
3004
3005
debugfs_create_u32("tx_max_msg", 0400, trans,
3006
(u32 *)&info->tx_minfo.max_msg);
3007
3008
debugfs_create_u32("rx_max_msg", 0400, trans,
3009
(u32 *)&info->rx_minfo.max_msg);
3010
3011
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
3012
scmi_debugfs_counters_setup(dbg, trans);
3013
3014
dbg->top_dentry = top_dentry;
3015
3016
if (devm_add_action_or_reset(info->dev,
3017
scmi_debugfs_common_cleanup, dbg))
3018
return NULL;
3019
3020
return dbg;
3021
}
3022
3023
static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
3024
{
3025
int id, num_chans = 0, ret = 0;
3026
struct scmi_chan_info *cinfo;
3027
u8 channels[SCMI_MAX_CHANNELS] = {};
3028
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
3029
3030
/* Enumerate all channels to collect their ids */
3031
idr_for_each_entry(&info->tx_idr, cinfo, id) {
3032
/*
3033
* Cannot happen, but be defensive.
3034
* Zero as num_chans is ok, warn and carry on.
3035
*/
3036
if (num_chans >= SCMI_MAX_CHANNELS || !cinfo) {
3037
dev_warn(info->dev,
3038
"SCMI RAW - Error enumerating channels\n");
3039
break;
3040
}
3041
3042
if (!test_bit(cinfo->id, protos)) {
3043
channels[num_chans++] = cinfo->id;
3044
set_bit(cinfo->id, protos);
3045
}
3046
}
3047
3048
info->raw = scmi_raw_mode_init(&info->handle, info->dbg->top_dentry,
3049
info->id, channels, num_chans,
3050
info->desc, info->tx_minfo.max_msg);
3051
if (IS_ERR(info->raw)) {
3052
dev_err(info->dev, "Failed to initialize SCMI RAW Mode !\n");
3053
ret = PTR_ERR(info->raw);
3054
info->raw = NULL;
3055
}
3056
3057
return ret;
3058
}
3059
3060
static const struct scmi_desc *scmi_transport_setup(struct device *dev)
3061
{
3062
struct scmi_transport *trans;
3063
int ret;
3064
3065
trans = dev_get_platdata(dev);
3066
if (!trans || !trans->supplier || !trans->core_ops)
3067
return NULL;
3068
3069
if (!device_link_add(dev, trans->supplier, DL_FLAG_AUTOREMOVE_CONSUMER)) {
3070
dev_err(dev,
3071
"Adding link to supplier transport device failed\n");
3072
return NULL;
3073
}
3074
3075
/* Provide core transport ops */
3076
*trans->core_ops = &scmi_trans_core_ops;
3077
3078
dev_info(dev, "Using %s\n", dev_driver_string(trans->supplier));
3079
3080
ret = of_property_read_u32(dev->of_node, "arm,max-rx-timeout-ms",
3081
&trans->desc.max_rx_timeout_ms);
3082
if (ret && ret != -EINVAL)
3083
dev_err(dev, "Malformed arm,max-rx-timeout-ms DT property.\n");
3084
3085
ret = of_property_read_u32(dev->of_node, "arm,max-msg-size",
3086
&trans->desc.max_msg_size);
3087
if (ret && ret != -EINVAL)
3088
dev_err(dev, "Malformed arm,max-msg-size DT property.\n");
3089
3090
ret = of_property_read_u32(dev->of_node, "arm,max-msg",
3091
&trans->desc.max_msg);
3092
if (ret && ret != -EINVAL)
3093
dev_err(dev, "Malformed arm,max-msg DT property.\n");
3094
3095
dev_info(dev,
3096
"SCMI max-rx-timeout: %dms / max-msg-size: %dbytes / max-msg: %d\n",
3097
trans->desc.max_rx_timeout_ms, trans->desc.max_msg_size,
3098
trans->desc.max_msg);
3099
3100
/* System wide atomic threshold for atomic ops .. if any */
3101
if (!of_property_read_u32(dev->of_node, "atomic-threshold-us",
3102
&trans->desc.atomic_threshold))
3103
dev_info(dev,
3104
"SCMI System wide atomic threshold set to %u us\n",
3105
trans->desc.atomic_threshold);
3106
3107
return &trans->desc;
3108
}
3109
3110
static void scmi_enable_matching_quirks(struct scmi_info *info)
3111
{
3112
struct scmi_revision_info *rev = &info->version;
3113
3114
dev_dbg(info->dev, "Looking for quirks matching: %s/%s/0x%08X\n",
3115
rev->vendor_id, rev->sub_vendor_id, rev->impl_ver);
3116
3117
/* Enable applicable quirks */
3118
scmi_quirks_enable(info->dev, rev->vendor_id,
3119
rev->sub_vendor_id, rev->impl_ver);
3120
}
3121
3122
static int scmi_probe(struct platform_device *pdev)
3123
{
3124
int ret;
3125
char *err_str = "probe failure\n";
3126
struct scmi_handle *handle;
3127
const struct scmi_desc *desc;
3128
struct scmi_info *info;
3129
bool coex = IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT_COEX);
3130
struct device *dev = &pdev->dev;
3131
struct device_node *child, *np = dev->of_node;
3132
3133
desc = scmi_transport_setup(dev);
3134
if (!desc) {
3135
err_str = "transport invalid\n";
3136
ret = -EINVAL;
3137
goto out_err;
3138
}
3139
3140
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
3141
if (!info)
3142
return -ENOMEM;
3143
3144
info->id = ida_alloc_min(&scmi_id, 0, GFP_KERNEL);
3145
if (info->id < 0)
3146
return info->id;
3147
3148
info->dev = dev;
3149
info->desc = desc;
3150
info->bus_nb.notifier_call = scmi_bus_notifier;
3151
info->dev_req_nb.notifier_call = scmi_device_request_notifier;
3152
INIT_LIST_HEAD(&info->node);
3153
idr_init(&info->protocols);
3154
mutex_init(&info->protocols_mtx);
3155
idr_init(&info->active_protocols);
3156
mutex_init(&info->devreq_mtx);
3157
3158
platform_set_drvdata(pdev, info);
3159
idr_init(&info->tx_idr);
3160
idr_init(&info->rx_idr);
3161
3162
handle = &info->handle;
3163
handle->dev = info->dev;
3164
handle->version = &info->version;
3165
handle->devm_protocol_acquire = scmi_devm_protocol_acquire;
3166
handle->devm_protocol_get = scmi_devm_protocol_get;
3167
handle->devm_protocol_put = scmi_devm_protocol_put;
3168
handle->is_transport_atomic = scmi_is_transport_atomic;
3169
3170
/* Setup all channels described in the DT at first */
3171
ret = scmi_channels_setup(info);
3172
if (ret) {
3173
err_str = "failed to setup channels\n";
3174
goto clear_ida;
3175
}
3176
3177
ret = bus_register_notifier(&scmi_bus_type, &info->bus_nb);
3178
if (ret) {
3179
err_str = "failed to register bus notifier\n";
3180
goto clear_txrx_setup;
3181
}
3182
3183
ret = blocking_notifier_chain_register(&scmi_requested_devices_nh,
3184
&info->dev_req_nb);
3185
if (ret) {
3186
err_str = "failed to register device notifier\n";
3187
goto clear_bus_notifier;
3188
}
3189
3190
ret = scmi_xfer_info_init(info);
3191
if (ret) {
3192
err_str = "failed to init xfers pool\n";
3193
goto clear_dev_req_notifier;
3194
}
3195
3196
if (scmi_top_dentry) {
3197
info->dbg = scmi_debugfs_common_setup(info);
3198
if (!info->dbg)
3199
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
3200
3201
if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
3202
ret = scmi_debugfs_raw_mode_setup(info);
3203
if (!coex) {
3204
if (ret)
3205
goto clear_dev_req_notifier;
3206
3207
/* Bail out anyway when coex disabled. */
3208
return 0;
3209
}
3210
3211
/* Coex enabled, carry on in any case. */
3212
dev_info(dev, "SCMI RAW Mode COEX enabled !\n");
3213
}
3214
}
3215
3216
if (scmi_notification_init(handle))
3217
dev_err(dev, "SCMI Notifications NOT available.\n");
3218
3219
if (info->desc->atomic_enabled &&
3220
!is_transport_polling_capable(info->desc))
3221
dev_err(dev,
3222
"Transport is not polling capable. Atomic mode not supported.\n");
3223
3224
/*
3225
* Trigger SCMI Base protocol initialization.
3226
* It's mandatory and won't be ever released/deinit until the
3227
* SCMI stack is shutdown/unloaded as a whole.
3228
*/
3229
ret = scmi_protocol_acquire(handle, SCMI_PROTOCOL_BASE);
3230
if (ret) {
3231
err_str = "unable to communicate with SCMI\n";
3232
if (coex) {
3233
dev_err(dev, "%s", err_str);
3234
return 0;
3235
}
3236
goto notification_exit;
3237
}
3238
3239
mutex_lock(&scmi_list_mutex);
3240
list_add_tail(&info->node, &scmi_list);
3241
mutex_unlock(&scmi_list_mutex);
3242
3243
scmi_enable_matching_quirks(info);
3244
3245
for_each_available_child_of_node(np, child) {
3246
u32 prot_id;
3247
3248
if (of_property_read_u32(child, "reg", &prot_id))
3249
continue;
3250
3251
if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
3252
dev_err(dev, "Out of range protocol %d\n", prot_id);
3253
3254
if (!scmi_is_protocol_implemented(handle, prot_id)) {
3255
dev_err(dev, "SCMI protocol %d not implemented\n",
3256
prot_id);
3257
continue;
3258
}
3259
3260
/*
3261
* Save this valid DT protocol descriptor amongst
3262
* @active_protocols for this SCMI instance/
3263
*/
3264
ret = idr_alloc(&info->active_protocols, child,
3265
prot_id, prot_id + 1, GFP_KERNEL);
3266
if (ret != prot_id) {
3267
dev_err(dev, "SCMI protocol %d already activated. Skip\n",
3268
prot_id);
3269
continue;
3270
}
3271
3272
of_node_get(child);
3273
scmi_create_protocol_devices(child, info, prot_id, NULL);
3274
}
3275
3276
return 0;
3277
3278
notification_exit:
3279
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
3280
scmi_raw_mode_cleanup(info->raw);
3281
scmi_notification_exit(&info->handle);
3282
clear_dev_req_notifier:
3283
blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
3284
&info->dev_req_nb);
3285
clear_bus_notifier:
3286
bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3287
clear_txrx_setup:
3288
scmi_cleanup_txrx_channels(info);
3289
clear_ida:
3290
ida_free(&scmi_id, info->id);
3291
3292
out_err:
3293
return dev_err_probe(dev, ret, "%s", err_str);
3294
}
3295
3296
static void scmi_remove(struct platform_device *pdev)
3297
{
3298
int id;
3299
struct scmi_info *info = platform_get_drvdata(pdev);
3300
struct device_node *child;
3301
3302
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT))
3303
scmi_raw_mode_cleanup(info->raw);
3304
3305
mutex_lock(&scmi_list_mutex);
3306
if (info->users)
3307
dev_warn(&pdev->dev,
3308
"Still active SCMI users will be forcibly unbound.\n");
3309
list_del(&info->node);
3310
mutex_unlock(&scmi_list_mutex);
3311
3312
scmi_notification_exit(&info->handle);
3313
3314
mutex_lock(&info->protocols_mtx);
3315
idr_destroy(&info->protocols);
3316
mutex_unlock(&info->protocols_mtx);
3317
3318
idr_for_each_entry(&info->active_protocols, child, id)
3319
of_node_put(child);
3320
idr_destroy(&info->active_protocols);
3321
3322
blocking_notifier_chain_unregister(&scmi_requested_devices_nh,
3323
&info->dev_req_nb);
3324
bus_unregister_notifier(&scmi_bus_type, &info->bus_nb);
3325
3326
/* Safe to free channels since no more users */
3327
scmi_cleanup_txrx_channels(info);
3328
3329
ida_free(&scmi_id, info->id);
3330
}
3331
3332
static ssize_t protocol_version_show(struct device *dev,
3333
struct device_attribute *attr, char *buf)
3334
{
3335
struct scmi_info *info = dev_get_drvdata(dev);
3336
3337
return sprintf(buf, "%u.%u\n", info->version.major_ver,
3338
info->version.minor_ver);
3339
}
3340
static DEVICE_ATTR_RO(protocol_version);
3341
3342
static ssize_t firmware_version_show(struct device *dev,
3343
struct device_attribute *attr, char *buf)
3344
{
3345
struct scmi_info *info = dev_get_drvdata(dev);
3346
3347
return sprintf(buf, "0x%x\n", info->version.impl_ver);
3348
}
3349
static DEVICE_ATTR_RO(firmware_version);
3350
3351
static ssize_t vendor_id_show(struct device *dev,
3352
struct device_attribute *attr, char *buf)
3353
{
3354
struct scmi_info *info = dev_get_drvdata(dev);
3355
3356
return sprintf(buf, "%s\n", info->version.vendor_id);
3357
}
3358
static DEVICE_ATTR_RO(vendor_id);
3359
3360
static ssize_t sub_vendor_id_show(struct device *dev,
3361
struct device_attribute *attr, char *buf)
3362
{
3363
struct scmi_info *info = dev_get_drvdata(dev);
3364
3365
return sprintf(buf, "%s\n", info->version.sub_vendor_id);
3366
}
3367
static DEVICE_ATTR_RO(sub_vendor_id);
3368
3369
static struct attribute *versions_attrs[] = {
3370
&dev_attr_firmware_version.attr,
3371
&dev_attr_protocol_version.attr,
3372
&dev_attr_vendor_id.attr,
3373
&dev_attr_sub_vendor_id.attr,
3374
NULL,
3375
};
3376
ATTRIBUTE_GROUPS(versions);
3377
3378
static struct platform_driver scmi_driver = {
3379
.driver = {
3380
.name = "arm-scmi",
3381
.suppress_bind_attrs = true,
3382
.dev_groups = versions_groups,
3383
},
3384
.probe = scmi_probe,
3385
.remove = scmi_remove,
3386
};
3387
3388
static struct dentry *scmi_debugfs_init(void)
3389
{
3390
struct dentry *d;
3391
3392
d = debugfs_create_dir("scmi", NULL);
3393
if (IS_ERR(d)) {
3394
pr_err("Could NOT create SCMI top dentry.\n");
3395
return NULL;
3396
}
3397
3398
return d;
3399
}
3400
3401
int scmi_inflight_count(const struct scmi_handle *handle)
3402
{
3403
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
3404
struct scmi_info *info = handle_to_scmi_info(handle);
3405
3406
if (!info->dbg)
3407
return 0;
3408
3409
return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
3410
} else {
3411
return 0;
3412
}
3413
}
3414
3415
static int __init scmi_driver_init(void)
3416
{
3417
scmi_quirks_initialize();
3418
3419
/* Bail out if no SCMI transport was configured */
3420
if (WARN_ON(!IS_ENABLED(CONFIG_ARM_SCMI_HAVE_TRANSPORT)))
3421
return -EINVAL;
3422
3423
if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_SHMEM))
3424
scmi_trans_core_ops.shmem = scmi_shared_mem_operations_get();
3425
3426
if (IS_ENABLED(CONFIG_ARM_SCMI_HAVE_MSG))
3427
scmi_trans_core_ops.msg = scmi_message_operations_get();
3428
3429
if (IS_ENABLED(CONFIG_ARM_SCMI_NEED_DEBUGFS))
3430
scmi_top_dentry = scmi_debugfs_init();
3431
3432
scmi_base_register();
3433
3434
scmi_clock_register();
3435
scmi_perf_register();
3436
scmi_power_register();
3437
scmi_reset_register();
3438
scmi_sensors_register();
3439
scmi_voltage_register();
3440
scmi_system_register();
3441
scmi_powercap_register();
3442
scmi_pinctrl_register();
3443
3444
return platform_driver_register(&scmi_driver);
3445
}
3446
module_init(scmi_driver_init);
3447
3448
static void __exit scmi_driver_exit(void)
3449
{
3450
scmi_base_unregister();
3451
3452
scmi_clock_unregister();
3453
scmi_perf_unregister();
3454
scmi_power_unregister();
3455
scmi_reset_unregister();
3456
scmi_sensors_unregister();
3457
scmi_voltage_unregister();
3458
scmi_system_unregister();
3459
scmi_powercap_unregister();
3460
scmi_pinctrl_unregister();
3461
3462
platform_driver_unregister(&scmi_driver);
3463
3464
debugfs_remove_recursive(scmi_top_dentry);
3465
}
3466
module_exit(scmi_driver_exit);
3467
3468
MODULE_ALIAS("platform:arm-scmi");
3469
MODULE_AUTHOR("Sudeep Holla <[email protected]>");
3470
MODULE_DESCRIPTION("ARM SCMI protocol driver");
3471
MODULE_LICENSE("GPL v2");
3472
3473