Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/firmware/arm_scmi/perf.c
26428 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* System Control and Management Interface (SCMI) Performance Protocol
4
*
5
* Copyright (C) 2018-2023 ARM Ltd.
6
*/
7
8
#define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
9
10
#include <linux/bits.h>
11
#include <linux/hashtable.h>
12
#include <linux/io.h>
13
#include <linux/log2.h>
14
#include <linux/module.h>
15
#include <linux/of.h>
16
#include <linux/platform_device.h>
17
#include <linux/pm_opp.h>
18
#include <linux/scmi_protocol.h>
19
#include <linux/sort.h>
20
#include <linux/xarray.h>
21
22
#include <trace/events/scmi.h>
23
24
#include "protocols.h"
25
#include "notify.h"
26
27
/* Updated only after ALL the mandatory features for that version are merged */
28
#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x40000
29
30
#define MAX_OPPS 32
31
32
enum scmi_performance_protocol_cmd {
33
PERF_DOMAIN_ATTRIBUTES = 0x3,
34
PERF_DESCRIBE_LEVELS = 0x4,
35
PERF_LIMITS_SET = 0x5,
36
PERF_LIMITS_GET = 0x6,
37
PERF_LEVEL_SET = 0x7,
38
PERF_LEVEL_GET = 0x8,
39
PERF_NOTIFY_LIMITS = 0x9,
40
PERF_NOTIFY_LEVEL = 0xa,
41
PERF_DESCRIBE_FASTCHANNEL = 0xb,
42
PERF_DOMAIN_NAME_GET = 0xc,
43
};
44
45
enum {
46
PERF_FC_LEVEL,
47
PERF_FC_LIMIT,
48
PERF_FC_MAX,
49
};
50
51
struct scmi_opp {
52
u32 perf;
53
u32 power;
54
u32 trans_latency_us;
55
u32 indicative_freq;
56
u32 level_index;
57
struct hlist_node hash;
58
};
59
60
struct scmi_msg_resp_perf_attributes {
61
__le16 num_domains;
62
__le16 flags;
63
#define POWER_SCALE_IN_MILLIWATT(x) ((x) & BIT(0))
64
#define POWER_SCALE_IN_MICROWATT(x) ((x) & BIT(1))
65
__le32 stats_addr_low;
66
__le32 stats_addr_high;
67
__le32 stats_size;
68
};
69
70
struct scmi_msg_resp_perf_domain_attributes {
71
__le32 flags;
72
#define SUPPORTS_SET_LIMITS(x) ((x) & BIT(31))
73
#define SUPPORTS_SET_PERF_LVL(x) ((x) & BIT(30))
74
#define SUPPORTS_PERF_LIMIT_NOTIFY(x) ((x) & BIT(29))
75
#define SUPPORTS_PERF_LEVEL_NOTIFY(x) ((x) & BIT(28))
76
#define SUPPORTS_PERF_FASTCHANNELS(x) ((x) & BIT(27))
77
#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(26))
78
#define SUPPORTS_LEVEL_INDEXING(x) ((x) & BIT(25))
79
__le32 rate_limit_us;
80
__le32 sustained_freq_khz;
81
__le32 sustained_perf_level;
82
u8 name[SCMI_SHORT_NAME_MAX_SIZE];
83
};
84
85
struct scmi_msg_perf_describe_levels {
86
__le32 domain;
87
__le32 level_index;
88
};
89
90
struct scmi_perf_set_limits {
91
__le32 domain;
92
__le32 max_level;
93
__le32 min_level;
94
};
95
96
struct scmi_perf_get_limits {
97
__le32 max_level;
98
__le32 min_level;
99
};
100
101
struct scmi_perf_set_level {
102
__le32 domain;
103
__le32 level;
104
};
105
106
struct scmi_perf_notify_level_or_limits {
107
__le32 domain;
108
__le32 notify_enable;
109
};
110
111
struct scmi_perf_limits_notify_payld {
112
__le32 agent_id;
113
__le32 domain_id;
114
__le32 range_max;
115
__le32 range_min;
116
};
117
118
struct scmi_perf_level_notify_payld {
119
__le32 agent_id;
120
__le32 domain_id;
121
__le32 performance_level;
122
};
123
124
struct scmi_msg_resp_perf_describe_levels {
125
__le16 num_returned;
126
__le16 num_remaining;
127
struct {
128
__le32 perf_val;
129
__le32 power;
130
__le16 transition_latency_us;
131
__le16 reserved;
132
} opp[];
133
};
134
135
struct scmi_msg_resp_perf_describe_levels_v4 {
136
__le16 num_returned;
137
__le16 num_remaining;
138
struct {
139
__le32 perf_val;
140
__le32 power;
141
__le16 transition_latency_us;
142
__le16 reserved;
143
__le32 indicative_freq;
144
__le32 level_index;
145
} opp[];
146
};
147
148
struct perf_dom_info {
149
u32 id;
150
bool set_limits;
151
bool perf_limit_notify;
152
bool perf_level_notify;
153
bool perf_fastchannels;
154
bool level_indexing_mode;
155
u32 opp_count;
156
u32 rate_limit_us;
157
u32 sustained_freq_khz;
158
u32 sustained_perf_level;
159
unsigned long mult_factor;
160
struct scmi_perf_domain_info info;
161
struct scmi_opp opp[MAX_OPPS];
162
struct scmi_fc_info *fc_info;
163
struct xarray opps_by_idx;
164
struct xarray opps_by_lvl;
165
DECLARE_HASHTABLE(opps_by_freq, ilog2(MAX_OPPS));
166
};
167
168
#define LOOKUP_BY_FREQ(__htp, __freq) \
169
({ \
170
/* u32 cast is needed to pick right hash func */ \
171
u32 f_ = (u32)(__freq); \
172
struct scmi_opp *_opp; \
173
\
174
hash_for_each_possible((__htp), _opp, hash, f_) \
175
if (_opp->indicative_freq == f_) \
176
break; \
177
_opp; \
178
})
179
180
struct scmi_perf_info {
181
u32 version;
182
u16 num_domains;
183
enum scmi_power_scale power_scale;
184
u64 stats_addr;
185
u32 stats_size;
186
bool notify_lvl_cmd;
187
bool notify_lim_cmd;
188
struct perf_dom_info *dom_info;
189
};
190
191
static enum scmi_performance_protocol_cmd evt_2_cmd[] = {
192
PERF_NOTIFY_LIMITS,
193
PERF_NOTIFY_LEVEL,
194
};
195
196
static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
197
struct scmi_perf_info *pi)
198
{
199
int ret;
200
struct scmi_xfer *t;
201
struct scmi_msg_resp_perf_attributes *attr;
202
203
ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0,
204
sizeof(*attr), &t);
205
if (ret)
206
return ret;
207
208
attr = t->rx.buf;
209
210
ret = ph->xops->do_xfer(ph, t);
211
if (!ret) {
212
u16 flags = le16_to_cpu(attr->flags);
213
214
pi->num_domains = le16_to_cpu(attr->num_domains);
215
216
if (POWER_SCALE_IN_MILLIWATT(flags))
217
pi->power_scale = SCMI_POWER_MILLIWATTS;
218
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
219
if (POWER_SCALE_IN_MICROWATT(flags))
220
pi->power_scale = SCMI_POWER_MICROWATTS;
221
222
pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
223
(u64)le32_to_cpu(attr->stats_addr_high) << 32;
224
pi->stats_size = le32_to_cpu(attr->stats_size);
225
}
226
227
ph->xops->xfer_put(ph, t);
228
229
if (!ret) {
230
if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LEVEL, NULL))
231
pi->notify_lvl_cmd = true;
232
233
if (!ph->hops->protocol_msg_check(ph, PERF_NOTIFY_LIMITS, NULL))
234
pi->notify_lim_cmd = true;
235
}
236
237
return ret;
238
}
239
240
static void scmi_perf_xa_destroy(void *data)
241
{
242
int domain;
243
struct scmi_perf_info *pinfo = data;
244
245
for (domain = 0; domain < pinfo->num_domains; domain++) {
246
xa_destroy(&((pinfo->dom_info + domain)->opps_by_idx));
247
xa_destroy(&((pinfo->dom_info + domain)->opps_by_lvl));
248
}
249
}
250
251
static int
252
scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
253
struct perf_dom_info *dom_info,
254
bool notify_lim_cmd, bool notify_lvl_cmd,
255
u32 version)
256
{
257
int ret;
258
u32 flags;
259
struct scmi_xfer *t;
260
struct scmi_msg_resp_perf_domain_attributes *attr;
261
262
ret = ph->xops->xfer_get_init(ph, PERF_DOMAIN_ATTRIBUTES,
263
sizeof(dom_info->id), sizeof(*attr), &t);
264
if (ret)
265
return ret;
266
267
put_unaligned_le32(dom_info->id, t->tx.buf);
268
attr = t->rx.buf;
269
270
ret = ph->xops->do_xfer(ph, t);
271
if (!ret) {
272
flags = le32_to_cpu(attr->flags);
273
274
dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
275
dom_info->info.set_perf = SUPPORTS_SET_PERF_LVL(flags);
276
if (notify_lim_cmd)
277
dom_info->perf_limit_notify =
278
SUPPORTS_PERF_LIMIT_NOTIFY(flags);
279
if (notify_lvl_cmd)
280
dom_info->perf_level_notify =
281
SUPPORTS_PERF_LEVEL_NOTIFY(flags);
282
dom_info->perf_fastchannels = SUPPORTS_PERF_FASTCHANNELS(flags);
283
if (PROTOCOL_REV_MAJOR(version) >= 0x4)
284
dom_info->level_indexing_mode =
285
SUPPORTS_LEVEL_INDEXING(flags);
286
dom_info->rate_limit_us = le32_to_cpu(attr->rate_limit_us) &
287
GENMASK(19, 0);
288
dom_info->sustained_freq_khz =
289
le32_to_cpu(attr->sustained_freq_khz);
290
dom_info->sustained_perf_level =
291
le32_to_cpu(attr->sustained_perf_level);
292
/*
293
* sustained_freq_khz = mult_factor * sustained_perf_level
294
* mult_factor must be non zero positive integer(not fraction)
295
*/
296
if (!dom_info->sustained_freq_khz ||
297
!dom_info->sustained_perf_level ||
298
dom_info->level_indexing_mode) {
299
/* CPUFreq converts to kHz, hence default 1000 */
300
dom_info->mult_factor = 1000;
301
} else {
302
dom_info->mult_factor =
303
(dom_info->sustained_freq_khz * 1000UL)
304
/ dom_info->sustained_perf_level;
305
if ((dom_info->sustained_freq_khz * 1000UL) %
306
dom_info->sustained_perf_level)
307
dev_warn(ph->dev,
308
"multiplier for domain %d rounded\n",
309
dom_info->id);
310
}
311
if (!dom_info->mult_factor)
312
dev_warn(ph->dev,
313
"Wrong sustained perf/frequency(domain %d)\n",
314
dom_info->id);
315
316
strscpy(dom_info->info.name, attr->name,
317
SCMI_SHORT_NAME_MAX_SIZE);
318
}
319
320
ph->xops->xfer_put(ph, t);
321
322
/*
323
* If supported overwrite short name with the extended one;
324
* on error just carry on and use already provided short name.
325
*/
326
if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
327
SUPPORTS_EXTENDED_NAMES(flags))
328
ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET,
329
dom_info->id, NULL, dom_info->info.name,
330
SCMI_MAX_STR_SIZE);
331
332
xa_init(&dom_info->opps_by_lvl);
333
if (dom_info->level_indexing_mode) {
334
xa_init(&dom_info->opps_by_idx);
335
hash_init(dom_info->opps_by_freq);
336
}
337
338
return ret;
339
}
340
341
static int opp_cmp_func(const void *opp1, const void *opp2)
342
{
343
const struct scmi_opp *t1 = opp1, *t2 = opp2;
344
345
return t1->perf - t2->perf;
346
}
347
348
struct scmi_perf_ipriv {
349
u32 version;
350
struct perf_dom_info *perf_dom;
351
};
352
353
static void iter_perf_levels_prepare_message(void *message,
354
unsigned int desc_index,
355
const void *priv)
356
{
357
struct scmi_msg_perf_describe_levels *msg = message;
358
const struct scmi_perf_ipriv *p = priv;
359
360
msg->domain = cpu_to_le32(p->perf_dom->id);
361
/* Set the number of OPPs to be skipped/already read */
362
msg->level_index = cpu_to_le32(desc_index);
363
}
364
365
static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
366
const void *response, void *priv)
367
{
368
const struct scmi_msg_resp_perf_describe_levels *r = response;
369
370
st->num_returned = le16_to_cpu(r->num_returned);
371
st->num_remaining = le16_to_cpu(r->num_remaining);
372
373
return 0;
374
}
375
376
static inline int
377
process_response_opp(struct device *dev, struct perf_dom_info *dom,
378
struct scmi_opp *opp, unsigned int loop_idx,
379
const struct scmi_msg_resp_perf_describe_levels *r)
380
{
381
int ret;
382
383
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
384
opp->power = le32_to_cpu(r->opp[loop_idx].power);
385
opp->trans_latency_us =
386
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
387
388
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
389
if (ret) {
390
dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
391
opp->perf, dom->info.name, ret);
392
return ret;
393
}
394
395
return 0;
396
}
397
398
static inline int
399
process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
400
struct scmi_opp *opp, unsigned int loop_idx,
401
const struct scmi_msg_resp_perf_describe_levels_v4 *r)
402
{
403
int ret;
404
405
opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
406
opp->power = le32_to_cpu(r->opp[loop_idx].power);
407
opp->trans_latency_us =
408
le16_to_cpu(r->opp[loop_idx].transition_latency_us);
409
410
ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
411
if (ret) {
412
dev_info(dev, FW_BUG "Failed to add opps_by_lvl at %d for %s - ret:%d\n",
413
opp->perf, dom->info.name, ret);
414
return ret;
415
}
416
417
/* Note that PERF v4 reports always five 32-bit words */
418
opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
419
if (dom->level_indexing_mode) {
420
opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
421
422
ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
423
GFP_KERNEL);
424
if (ret) {
425
dev_warn(dev,
426
"Failed to add opps_by_idx at %d for %s - ret:%d\n",
427
opp->level_index, dom->info.name, ret);
428
429
/* Cleanup by_lvl too */
430
xa_erase(&dom->opps_by_lvl, opp->perf);
431
432
return ret;
433
}
434
435
hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
436
}
437
438
return 0;
439
}
440
441
static int
442
iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
443
const void *response,
444
struct scmi_iterator_state *st, void *priv)
445
{
446
int ret;
447
struct scmi_opp *opp;
448
struct scmi_perf_ipriv *p = priv;
449
450
opp = &p->perf_dom->opp[p->perf_dom->opp_count];
451
if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
452
ret = process_response_opp(ph->dev, p->perf_dom, opp,
453
st->loop_idx, response);
454
else
455
ret = process_response_opp_v4(ph->dev, p->perf_dom, opp,
456
st->loop_idx, response);
457
458
/* Skip BAD duplicates received from firmware */
459
if (ret)
460
return ret == -EBUSY ? 0 : ret;
461
462
p->perf_dom->opp_count++;
463
464
dev_dbg(ph->dev, "Level %d Power %d Latency %dus Ifreq %d Index %d\n",
465
opp->perf, opp->power, opp->trans_latency_us,
466
opp->indicative_freq, opp->level_index);
467
468
return 0;
469
}
470
471
static int
472
scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph,
473
struct perf_dom_info *perf_dom, u32 version)
474
{
475
int ret;
476
void *iter;
477
struct scmi_iterator_ops ops = {
478
.prepare_message = iter_perf_levels_prepare_message,
479
.update_state = iter_perf_levels_update_state,
480
.process_response = iter_perf_levels_process_response,
481
};
482
struct scmi_perf_ipriv ppriv = {
483
.version = version,
484
.perf_dom = perf_dom,
485
};
486
487
iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
488
PERF_DESCRIBE_LEVELS,
489
sizeof(struct scmi_msg_perf_describe_levels),
490
&ppriv);
491
if (IS_ERR(iter))
492
return PTR_ERR(iter);
493
494
ret = ph->hops->iter_response_run(iter);
495
if (ret)
496
return ret;
497
498
if (perf_dom->opp_count)
499
sort(perf_dom->opp, perf_dom->opp_count,
500
sizeof(struct scmi_opp), opp_cmp_func, NULL);
501
502
return ret;
503
}
504
505
static int scmi_perf_num_domains_get(const struct scmi_protocol_handle *ph)
506
{
507
struct scmi_perf_info *pi = ph->get_priv(ph);
508
509
return pi->num_domains;
510
}
511
512
static inline struct perf_dom_info *
513
scmi_perf_domain_lookup(const struct scmi_protocol_handle *ph, u32 domain)
514
{
515
struct scmi_perf_info *pi = ph->get_priv(ph);
516
517
if (domain >= pi->num_domains)
518
return ERR_PTR(-EINVAL);
519
520
return pi->dom_info + domain;
521
}
522
523
static const struct scmi_perf_domain_info *
524
scmi_perf_info_get(const struct scmi_protocol_handle *ph, u32 domain)
525
{
526
struct perf_dom_info *dom;
527
528
dom = scmi_perf_domain_lookup(ph, domain);
529
if (IS_ERR(dom))
530
return ERR_PTR(-EINVAL);
531
532
return &dom->info;
533
}
534
535
static int scmi_perf_msg_limits_set(const struct scmi_protocol_handle *ph,
536
u32 domain, u32 max_perf, u32 min_perf)
537
{
538
int ret;
539
struct scmi_xfer *t;
540
struct scmi_perf_set_limits *limits;
541
542
ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_SET,
543
sizeof(*limits), 0, &t);
544
if (ret)
545
return ret;
546
547
limits = t->tx.buf;
548
limits->domain = cpu_to_le32(domain);
549
limits->max_level = cpu_to_le32(max_perf);
550
limits->min_level = cpu_to_le32(min_perf);
551
552
ret = ph->xops->do_xfer(ph, t);
553
554
ph->xops->xfer_put(ph, t);
555
return ret;
556
}
557
558
static int __scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
559
struct perf_dom_info *dom, u32 max_perf,
560
u32 min_perf)
561
{
562
if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) {
563
struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
564
565
trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET,
566
dom->id, min_perf, max_perf);
567
iowrite32(max_perf, fci->set_addr);
568
iowrite32(min_perf, fci->set_addr + 4);
569
ph->hops->fastchannel_db_ring(fci->set_db);
570
return 0;
571
}
572
573
return scmi_perf_msg_limits_set(ph, dom->id, max_perf, min_perf);
574
}
575
576
static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
577
u32 domain, u32 max_perf, u32 min_perf)
578
{
579
struct scmi_perf_info *pi = ph->get_priv(ph);
580
struct perf_dom_info *dom;
581
582
dom = scmi_perf_domain_lookup(ph, domain);
583
if (IS_ERR(dom))
584
return PTR_ERR(dom);
585
586
if (!dom->set_limits)
587
return -EOPNOTSUPP;
588
589
if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
590
return -EINVAL;
591
592
if (dom->level_indexing_mode) {
593
struct scmi_opp *opp;
594
595
if (min_perf) {
596
opp = xa_load(&dom->opps_by_lvl, min_perf);
597
if (!opp)
598
return -EIO;
599
600
min_perf = opp->level_index;
601
}
602
603
if (max_perf) {
604
opp = xa_load(&dom->opps_by_lvl, max_perf);
605
if (!opp)
606
return -EIO;
607
608
max_perf = opp->level_index;
609
}
610
}
611
612
return __scmi_perf_limits_set(ph, dom, max_perf, min_perf);
613
}
614
615
static int scmi_perf_msg_limits_get(const struct scmi_protocol_handle *ph,
616
u32 domain, u32 *max_perf, u32 *min_perf)
617
{
618
int ret;
619
struct scmi_xfer *t;
620
struct scmi_perf_get_limits *limits;
621
622
ret = ph->xops->xfer_get_init(ph, PERF_LIMITS_GET,
623
sizeof(__le32), 0, &t);
624
if (ret)
625
return ret;
626
627
put_unaligned_le32(domain, t->tx.buf);
628
629
ret = ph->xops->do_xfer(ph, t);
630
if (!ret) {
631
limits = t->rx.buf;
632
633
*max_perf = le32_to_cpu(limits->max_level);
634
*min_perf = le32_to_cpu(limits->min_level);
635
}
636
637
ph->xops->xfer_put(ph, t);
638
return ret;
639
}
640
641
static int __scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
642
struct perf_dom_info *dom, u32 *max_perf,
643
u32 *min_perf)
644
{
645
if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) {
646
struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT];
647
648
*max_perf = ioread32(fci->get_addr);
649
*min_perf = ioread32(fci->get_addr + 4);
650
trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET,
651
dom->id, *min_perf, *max_perf);
652
return 0;
653
}
654
655
return scmi_perf_msg_limits_get(ph, dom->id, max_perf, min_perf);
656
}
657
658
static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph,
659
u32 domain, u32 *max_perf, u32 *min_perf)
660
{
661
int ret;
662
struct perf_dom_info *dom;
663
664
dom = scmi_perf_domain_lookup(ph, domain);
665
if (IS_ERR(dom))
666
return PTR_ERR(dom);
667
668
ret = __scmi_perf_limits_get(ph, dom, max_perf, min_perf);
669
if (ret)
670
return ret;
671
672
if (dom->level_indexing_mode) {
673
struct scmi_opp *opp;
674
675
opp = xa_load(&dom->opps_by_idx, *min_perf);
676
if (!opp)
677
return -EIO;
678
679
*min_perf = opp->perf;
680
681
opp = xa_load(&dom->opps_by_idx, *max_perf);
682
if (!opp)
683
return -EIO;
684
685
*max_perf = opp->perf;
686
}
687
688
return 0;
689
}
690
691
static int scmi_perf_msg_level_set(const struct scmi_protocol_handle *ph,
692
u32 domain, u32 level, bool poll)
693
{
694
int ret;
695
struct scmi_xfer *t;
696
struct scmi_perf_set_level *lvl;
697
698
ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_SET, sizeof(*lvl), 0, &t);
699
if (ret)
700
return ret;
701
702
t->hdr.poll_completion = poll;
703
lvl = t->tx.buf;
704
lvl->domain = cpu_to_le32(domain);
705
lvl->level = cpu_to_le32(level);
706
707
ret = ph->xops->do_xfer(ph, t);
708
709
ph->xops->xfer_put(ph, t);
710
return ret;
711
}
712
713
static int __scmi_perf_level_set(const struct scmi_protocol_handle *ph,
714
struct perf_dom_info *dom, u32 level,
715
bool poll)
716
{
717
if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) {
718
struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL];
719
720
trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET,
721
dom->id, level, 0);
722
iowrite32(level, fci->set_addr);
723
ph->hops->fastchannel_db_ring(fci->set_db);
724
return 0;
725
}
726
727
return scmi_perf_msg_level_set(ph, dom->id, level, poll);
728
}
729
730
static int scmi_perf_level_set(const struct scmi_protocol_handle *ph,
731
u32 domain, u32 level, bool poll)
732
{
733
struct perf_dom_info *dom;
734
735
dom = scmi_perf_domain_lookup(ph, domain);
736
if (IS_ERR(dom))
737
return PTR_ERR(dom);
738
739
if (!dom->info.set_perf)
740
return -EOPNOTSUPP;
741
742
if (dom->level_indexing_mode) {
743
struct scmi_opp *opp;
744
745
opp = xa_load(&dom->opps_by_lvl, level);
746
if (!opp)
747
return -EIO;
748
749
level = opp->level_index;
750
}
751
752
return __scmi_perf_level_set(ph, dom, level, poll);
753
}
754
755
static int scmi_perf_msg_level_get(const struct scmi_protocol_handle *ph,
756
u32 domain, u32 *level, bool poll)
757
{
758
int ret;
759
struct scmi_xfer *t;
760
761
ret = ph->xops->xfer_get_init(ph, PERF_LEVEL_GET,
762
sizeof(u32), sizeof(u32), &t);
763
if (ret)
764
return ret;
765
766
t->hdr.poll_completion = poll;
767
put_unaligned_le32(domain, t->tx.buf);
768
769
ret = ph->xops->do_xfer(ph, t);
770
if (!ret)
771
*level = get_unaligned_le32(t->rx.buf);
772
773
ph->xops->xfer_put(ph, t);
774
return ret;
775
}
776
777
static int __scmi_perf_level_get(const struct scmi_protocol_handle *ph,
778
struct perf_dom_info *dom, u32 *level,
779
bool poll)
780
{
781
if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) {
782
*level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr);
783
trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET,
784
dom->id, *level, 0);
785
return 0;
786
}
787
788
return scmi_perf_msg_level_get(ph, dom->id, level, poll);
789
}
790
791
static int scmi_perf_level_get(const struct scmi_protocol_handle *ph,
792
u32 domain, u32 *level, bool poll)
793
{
794
int ret;
795
struct perf_dom_info *dom;
796
797
dom = scmi_perf_domain_lookup(ph, domain);
798
if (IS_ERR(dom))
799
return PTR_ERR(dom);
800
801
ret = __scmi_perf_level_get(ph, dom, level, poll);
802
if (ret)
803
return ret;
804
805
if (dom->level_indexing_mode) {
806
struct scmi_opp *opp;
807
808
opp = xa_load(&dom->opps_by_idx, *level);
809
if (!opp)
810
return -EIO;
811
812
*level = opp->perf;
813
}
814
815
return 0;
816
}
817
818
static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph,
819
u32 domain, int message_id,
820
bool enable)
821
{
822
int ret;
823
struct scmi_xfer *t;
824
struct scmi_perf_notify_level_or_limits *notify;
825
826
ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
827
if (ret)
828
return ret;
829
830
notify = t->tx.buf;
831
notify->domain = cpu_to_le32(domain);
832
notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
833
834
ret = ph->xops->do_xfer(ph, t);
835
836
ph->xops->xfer_put(ph, t);
837
return ret;
838
}
839
840
static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph,
841
struct perf_dom_info *dom)
842
{
843
struct scmi_fc_info *fc;
844
845
fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL);
846
if (!fc)
847
return;
848
849
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
850
PERF_LEVEL_GET, 4, dom->id,
851
&fc[PERF_FC_LEVEL].get_addr, NULL,
852
&fc[PERF_FC_LEVEL].rate_limit);
853
854
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
855
PERF_LIMITS_GET, 8, dom->id,
856
&fc[PERF_FC_LIMIT].get_addr, NULL,
857
&fc[PERF_FC_LIMIT].rate_limit);
858
859
if (dom->info.set_perf)
860
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
861
PERF_LEVEL_SET, 4, dom->id,
862
&fc[PERF_FC_LEVEL].set_addr,
863
&fc[PERF_FC_LEVEL].set_db,
864
&fc[PERF_FC_LEVEL].rate_limit);
865
866
if (dom->set_limits)
867
ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL,
868
PERF_LIMITS_SET, 8, dom->id,
869
&fc[PERF_FC_LIMIT].set_addr,
870
&fc[PERF_FC_LIMIT].set_db,
871
&fc[PERF_FC_LIMIT].rate_limit);
872
873
dom->fc_info = fc;
874
}
875
876
static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
877
struct device *dev, u32 domain)
878
{
879
int idx, ret;
880
unsigned long freq;
881
struct dev_pm_opp_data data = {};
882
struct perf_dom_info *dom;
883
884
dom = scmi_perf_domain_lookup(ph, domain);
885
if (IS_ERR(dom))
886
return PTR_ERR(dom);
887
888
for (idx = 0; idx < dom->opp_count; idx++) {
889
if (!dom->level_indexing_mode)
890
freq = dom->opp[idx].perf * dom->mult_factor;
891
else
892
freq = dom->opp[idx].indicative_freq * dom->mult_factor;
893
894
/* All OPPs above the sustained frequency are treated as turbo */
895
data.turbo = freq > dom->sustained_freq_khz * 1000UL;
896
897
data.level = dom->opp[idx].perf;
898
data.freq = freq;
899
900
ret = dev_pm_opp_add_dynamic(dev, &data);
901
if (ret) {
902
dev_warn(dev, "[%d][%s]: Failed to add OPP[%d] %lu\n",
903
domain, dom->info.name, idx, freq);
904
dev_pm_opp_remove_all_dynamic(dev);
905
return ret;
906
}
907
908
dev_dbg(dev, "[%d][%s]:: Registered OPP[%d] %lu\n",
909
domain, dom->info.name, idx, freq);
910
}
911
return 0;
912
}
913
914
static int
915
scmi_dvfs_transition_latency_get(const struct scmi_protocol_handle *ph,
916
u32 domain)
917
{
918
struct perf_dom_info *dom;
919
920
dom = scmi_perf_domain_lookup(ph, domain);
921
if (IS_ERR(dom))
922
return PTR_ERR(dom);
923
924
/* uS to nS */
925
return dom->opp[dom->opp_count - 1].trans_latency_us * 1000;
926
}
927
928
static int
929
scmi_dvfs_rate_limit_get(const struct scmi_protocol_handle *ph,
930
u32 domain, u32 *rate_limit)
931
{
932
struct perf_dom_info *dom;
933
934
if (!rate_limit)
935
return -EINVAL;
936
937
dom = scmi_perf_domain_lookup(ph, domain);
938
if (IS_ERR(dom))
939
return PTR_ERR(dom);
940
941
*rate_limit = dom->rate_limit_us;
942
return 0;
943
}
944
945
static int scmi_dvfs_freq_set(const struct scmi_protocol_handle *ph, u32 domain,
946
unsigned long freq, bool poll)
947
{
948
unsigned int level;
949
struct perf_dom_info *dom;
950
951
dom = scmi_perf_domain_lookup(ph, domain);
952
if (IS_ERR(dom))
953
return PTR_ERR(dom);
954
955
if (!dom->level_indexing_mode) {
956
level = freq / dom->mult_factor;
957
} else {
958
struct scmi_opp *opp;
959
960
opp = LOOKUP_BY_FREQ(dom->opps_by_freq,
961
freq / dom->mult_factor);
962
if (!opp)
963
return -EIO;
964
965
level = opp->level_index;
966
}
967
968
return __scmi_perf_level_set(ph, dom, level, poll);
969
}
970
971
static int scmi_dvfs_freq_get(const struct scmi_protocol_handle *ph, u32 domain,
972
unsigned long *freq, bool poll)
973
{
974
int ret;
975
u32 level;
976
struct perf_dom_info *dom;
977
978
dom = scmi_perf_domain_lookup(ph, domain);
979
if (IS_ERR(dom))
980
return PTR_ERR(dom);
981
982
ret = __scmi_perf_level_get(ph, dom, &level, poll);
983
if (ret)
984
return ret;
985
986
if (!dom->level_indexing_mode) {
987
*freq = level * dom->mult_factor;
988
} else {
989
struct scmi_opp *opp;
990
991
opp = xa_load(&dom->opps_by_idx, level);
992
if (!opp)
993
return -EIO;
994
995
*freq = opp->indicative_freq * dom->mult_factor;
996
}
997
998
return ret;
999
}
1000
1001
static int scmi_dvfs_est_power_get(const struct scmi_protocol_handle *ph,
1002
u32 domain, unsigned long *freq,
1003
unsigned long *power)
1004
{
1005
struct perf_dom_info *dom;
1006
unsigned long opp_freq;
1007
int idx, ret = -EINVAL;
1008
struct scmi_opp *opp;
1009
1010
dom = scmi_perf_domain_lookup(ph, domain);
1011
if (IS_ERR(dom))
1012
return PTR_ERR(dom);
1013
1014
for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) {
1015
if (!dom->level_indexing_mode)
1016
opp_freq = opp->perf * dom->mult_factor;
1017
else
1018
opp_freq = opp->indicative_freq * dom->mult_factor;
1019
1020
if (opp_freq < *freq)
1021
continue;
1022
1023
*freq = opp_freq;
1024
*power = opp->power;
1025
ret = 0;
1026
break;
1027
}
1028
1029
return ret;
1030
}
1031
1032
static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph,
1033
u32 domain)
1034
{
1035
struct perf_dom_info *dom;
1036
1037
dom = scmi_perf_domain_lookup(ph, domain);
1038
if (IS_ERR(dom))
1039
return false;
1040
1041
return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr;
1042
}
1043
1044
static int scmi_fast_switch_rate_limit(const struct scmi_protocol_handle *ph,
1045
u32 domain, u32 *rate_limit)
1046
{
1047
struct perf_dom_info *dom;
1048
1049
if (!rate_limit)
1050
return -EINVAL;
1051
1052
dom = scmi_perf_domain_lookup(ph, domain);
1053
if (IS_ERR(dom))
1054
return PTR_ERR(dom);
1055
1056
if (!dom->fc_info)
1057
return -EINVAL;
1058
1059
*rate_limit = dom->fc_info[PERF_FC_LEVEL].rate_limit;
1060
return 0;
1061
}
1062
1063
static enum scmi_power_scale
1064
scmi_power_scale_get(const struct scmi_protocol_handle *ph)
1065
{
1066
struct scmi_perf_info *pi = ph->get_priv(ph);
1067
1068
return pi->power_scale;
1069
}
1070
1071
static const struct scmi_perf_proto_ops perf_proto_ops = {
1072
.num_domains_get = scmi_perf_num_domains_get,
1073
.info_get = scmi_perf_info_get,
1074
.limits_set = scmi_perf_limits_set,
1075
.limits_get = scmi_perf_limits_get,
1076
.level_set = scmi_perf_level_set,
1077
.level_get = scmi_perf_level_get,
1078
.transition_latency_get = scmi_dvfs_transition_latency_get,
1079
.rate_limit_get = scmi_dvfs_rate_limit_get,
1080
.device_opps_add = scmi_dvfs_device_opps_add,
1081
.freq_set = scmi_dvfs_freq_set,
1082
.freq_get = scmi_dvfs_freq_get,
1083
.est_power_get = scmi_dvfs_est_power_get,
1084
.fast_switch_possible = scmi_fast_switch_possible,
1085
.fast_switch_rate_limit = scmi_fast_switch_rate_limit,
1086
.power_scale_get = scmi_power_scale_get,
1087
};
1088
1089
static bool scmi_perf_notify_supported(const struct scmi_protocol_handle *ph,
1090
u8 evt_id, u32 src_id)
1091
{
1092
bool supported;
1093
struct perf_dom_info *dom;
1094
1095
if (evt_id >= ARRAY_SIZE(evt_2_cmd))
1096
return false;
1097
1098
dom = scmi_perf_domain_lookup(ph, src_id);
1099
if (IS_ERR(dom))
1100
return false;
1101
1102
if (evt_id == SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED)
1103
supported = dom->perf_limit_notify;
1104
else
1105
supported = dom->perf_level_notify;
1106
1107
return supported;
1108
}
1109
1110
static int scmi_perf_set_notify_enabled(const struct scmi_protocol_handle *ph,
1111
u8 evt_id, u32 src_id, bool enable)
1112
{
1113
int ret, cmd_id;
1114
1115
if (evt_id >= ARRAY_SIZE(evt_2_cmd))
1116
return -EINVAL;
1117
1118
cmd_id = evt_2_cmd[evt_id];
1119
ret = scmi_perf_level_limits_notify(ph, src_id, cmd_id, enable);
1120
if (ret)
1121
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
1122
evt_id, src_id, ret);
1123
1124
return ret;
1125
}
1126
1127
static int
1128
scmi_perf_xlate_opp_to_freq(struct perf_dom_info *dom,
1129
unsigned int index, unsigned long *freq)
1130
{
1131
struct scmi_opp *opp;
1132
1133
if (!dom || !freq)
1134
return -EINVAL;
1135
1136
if (!dom->level_indexing_mode) {
1137
opp = xa_load(&dom->opps_by_lvl, index);
1138
if (!opp)
1139
return -ENODEV;
1140
1141
*freq = opp->perf * dom->mult_factor;
1142
} else {
1143
opp = xa_load(&dom->opps_by_idx, index);
1144
if (!opp)
1145
return -ENODEV;
1146
1147
*freq = opp->indicative_freq * dom->mult_factor;
1148
}
1149
1150
return 0;
1151
}
1152
1153
static void *scmi_perf_fill_custom_report(const struct scmi_protocol_handle *ph,
1154
u8 evt_id, ktime_t timestamp,
1155
const void *payld, size_t payld_sz,
1156
void *report, u32 *src_id)
1157
{
1158
int ret;
1159
void *rep = NULL;
1160
struct perf_dom_info *dom;
1161
1162
switch (evt_id) {
1163
case SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED:
1164
{
1165
const struct scmi_perf_limits_notify_payld *p = payld;
1166
struct scmi_perf_limits_report *r = report;
1167
unsigned long freq_min, freq_max;
1168
1169
if (sizeof(*p) != payld_sz)
1170
break;
1171
1172
r->timestamp = timestamp;
1173
r->agent_id = le32_to_cpu(p->agent_id);
1174
r->domain_id = le32_to_cpu(p->domain_id);
1175
r->range_max = le32_to_cpu(p->range_max);
1176
r->range_min = le32_to_cpu(p->range_min);
1177
/* Check if the reported domain exist at all */
1178
dom = scmi_perf_domain_lookup(ph, r->domain_id);
1179
if (IS_ERR(dom))
1180
break;
1181
/*
1182
* Event will be reported from this point on...
1183
* ...even if, later, xlated frequencies were not retrieved.
1184
*/
1185
*src_id = r->domain_id;
1186
rep = r;
1187
1188
ret = scmi_perf_xlate_opp_to_freq(dom, r->range_max, &freq_max);
1189
if (ret)
1190
break;
1191
1192
ret = scmi_perf_xlate_opp_to_freq(dom, r->range_min, &freq_min);
1193
if (ret)
1194
break;
1195
1196
/* Report translated freqs ONLY if both available */
1197
r->range_max_freq = freq_max;
1198
r->range_min_freq = freq_min;
1199
1200
break;
1201
}
1202
case SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED:
1203
{
1204
const struct scmi_perf_level_notify_payld *p = payld;
1205
struct scmi_perf_level_report *r = report;
1206
unsigned long freq;
1207
1208
if (sizeof(*p) != payld_sz)
1209
break;
1210
1211
r->timestamp = timestamp;
1212
r->agent_id = le32_to_cpu(p->agent_id);
1213
r->domain_id = le32_to_cpu(p->domain_id);
1214
/* Report translated freqs ONLY if available */
1215
r->performance_level = le32_to_cpu(p->performance_level);
1216
/* Check if the reported domain exist at all */
1217
dom = scmi_perf_domain_lookup(ph, r->domain_id);
1218
if (IS_ERR(dom))
1219
break;
1220
/*
1221
* Event will be reported from this point on...
1222
* ...even if, later, xlated frequencies were not retrieved.
1223
*/
1224
*src_id = r->domain_id;
1225
rep = r;
1226
1227
/* Report translated freqs ONLY if available */
1228
ret = scmi_perf_xlate_opp_to_freq(dom, r->performance_level,
1229
&freq);
1230
if (ret)
1231
break;
1232
1233
r->performance_level_freq = freq;
1234
1235
break;
1236
}
1237
default:
1238
break;
1239
}
1240
1241
return rep;
1242
}
1243
1244
static int scmi_perf_get_num_sources(const struct scmi_protocol_handle *ph)
1245
{
1246
struct scmi_perf_info *pi = ph->get_priv(ph);
1247
1248
if (!pi)
1249
return -EINVAL;
1250
1251
return pi->num_domains;
1252
}
1253
1254
static const struct scmi_event perf_events[] = {
1255
{
1256
.id = SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED,
1257
.max_payld_sz = sizeof(struct scmi_perf_limits_notify_payld),
1258
.max_report_sz = sizeof(struct scmi_perf_limits_report),
1259
},
1260
{
1261
.id = SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED,
1262
.max_payld_sz = sizeof(struct scmi_perf_level_notify_payld),
1263
.max_report_sz = sizeof(struct scmi_perf_level_report),
1264
},
1265
};
1266
1267
static const struct scmi_event_ops perf_event_ops = {
1268
.is_notify_supported = scmi_perf_notify_supported,
1269
.get_num_sources = scmi_perf_get_num_sources,
1270
.set_notify_enabled = scmi_perf_set_notify_enabled,
1271
.fill_custom_report = scmi_perf_fill_custom_report,
1272
};
1273
1274
static const struct scmi_protocol_events perf_protocol_events = {
1275
.queue_sz = SCMI_PROTO_QUEUE_SZ,
1276
.ops = &perf_event_ops,
1277
.evts = perf_events,
1278
.num_events = ARRAY_SIZE(perf_events),
1279
};
1280
1281
static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
1282
{
1283
int domain, ret;
1284
u32 version;
1285
struct scmi_perf_info *pinfo;
1286
1287
ret = ph->xops->version_get(ph, &version);
1288
if (ret)
1289
return ret;
1290
1291
dev_dbg(ph->dev, "Performance Version %d.%d\n",
1292
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
1293
1294
pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL);
1295
if (!pinfo)
1296
return -ENOMEM;
1297
1298
pinfo->version = version;
1299
1300
ret = scmi_perf_attributes_get(ph, pinfo);
1301
if (ret)
1302
return ret;
1303
1304
pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
1305
sizeof(*pinfo->dom_info), GFP_KERNEL);
1306
if (!pinfo->dom_info)
1307
return -ENOMEM;
1308
1309
for (domain = 0; domain < pinfo->num_domains; domain++) {
1310
struct perf_dom_info *dom = pinfo->dom_info + domain;
1311
1312
dom->id = domain;
1313
scmi_perf_domain_attributes_get(ph, dom, pinfo->notify_lim_cmd,
1314
pinfo->notify_lvl_cmd, version);
1315
scmi_perf_describe_levels_get(ph, dom, version);
1316
1317
if (dom->perf_fastchannels)
1318
scmi_perf_domain_init_fc(ph, dom);
1319
}
1320
1321
ret = devm_add_action_or_reset(ph->dev, scmi_perf_xa_destroy, pinfo);
1322
if (ret)
1323
return ret;
1324
1325
return ph->set_priv(ph, pinfo, version);
1326
}
1327
1328
static const struct scmi_protocol scmi_perf = {
1329
.id = SCMI_PROTOCOL_PERF,
1330
.owner = THIS_MODULE,
1331
.instance_init = &scmi_perf_protocol_init,
1332
.ops = &perf_proto_ops,
1333
.events = &perf_protocol_events,
1334
.supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
1335
};
1336
1337
DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(perf, scmi_perf)
1338
1339