Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
53415 views
1
/*
2
* Copyright 2020 Advanced Micro Devices, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*/
22
23
#define SWSMU_CODE_LAYER_L4
24
25
#include "amdgpu.h"
26
#include "amdgpu_smu.h"
27
#include "smu_cmn.h"
28
#include "soc15_common.h"
29
30
/*
31
* DO NOT use these for err/warn/info/debug messages.
32
* Use dev_err, dev_warn, dev_info and dev_dbg instead.
33
* They are more MGPU friendly.
34
*/
35
#undef pr_err
36
#undef pr_warn
37
#undef pr_info
38
#undef pr_debug
39
40
#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
41
42
const int link_speed[] = {25, 50, 80, 160, 320, 640};
43
44
#undef __SMU_DUMMY_MAP
45
#define __SMU_DUMMY_MAP(type) #type
46
static const char * const __smu_message_names[] = {
47
SMU_MESSAGE_TYPES
48
};
49
50
#define smu_cmn_call_asic_func(intf, smu, args...) \
51
((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
52
(smu)->ppt_funcs->intf(smu, ##args) : \
53
-ENOTSUPP) : \
54
-EINVAL)
55
56
#define SMU_MSG_V1_DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
57
#define SMU_MSG_V1_DEFAULT_RATELIMIT_BURST 10
58
59
static const char *smu_get_message_name(struct smu_context *smu,
60
enum smu_message_type type)
61
{
62
if (type >= SMU_MSG_MAX_COUNT)
63
return "unknown smu message";
64
65
return __smu_message_names[type];
66
}
67
68
/* Redefine the SMU error codes here.
69
*
70
* Note that these definitions are redundant and should be removed
71
* when the SMU has exported a unified header file containing these
72
* macros, which header file we can just include and use the SMU's
73
* macros. At the moment, these error codes are defined by the SMU
74
* per-ASIC unfortunately, yet we're a one driver for all ASICs.
75
*/
76
#define SMU_RESP_NONE 0
77
#define SMU_RESP_OK 1
78
#define SMU_RESP_CMD_FAIL 0xFF
79
#define SMU_RESP_CMD_UNKNOWN 0xFE
80
#define SMU_RESP_CMD_BAD_PREREQ 0xFD
81
#define SMU_RESP_BUSY_OTHER 0xFC
82
#define SMU_RESP_DEBUG_END 0xFB
83
84
#define SMU_RESP_UNEXP (~0U)
85
86
static int smu_msg_v1_send_debug_msg(struct smu_msg_ctl *ctl, u32 msg, u32 param)
87
{
88
struct amdgpu_device *adev = ctl->smu->adev;
89
struct smu_msg_config *cfg = &ctl->config;
90
91
if (!(ctl->flags & SMU_MSG_CTL_DEBUG_MAILBOX))
92
return -EOPNOTSUPP;
93
94
mutex_lock(&ctl->lock);
95
96
WREG32(cfg->debug_param_reg, param);
97
WREG32(cfg->debug_msg_reg, msg);
98
WREG32(cfg->debug_resp_reg, 0);
99
100
mutex_unlock(&ctl->lock);
101
102
return 0;
103
}
104
105
static int __smu_cmn_send_debug_msg(struct smu_msg_ctl *ctl,
106
u32 msg,
107
u32 param)
108
{
109
if (!ctl->ops || !ctl->ops->send_debug_msg)
110
return -EOPNOTSUPP;
111
112
return ctl->ops->send_debug_msg(ctl, msg, param);
113
}
114
115
/**
116
* smu_cmn_wait_for_response -- wait for response from the SMU
117
* @smu: pointer to an SMU context
118
*
119
* Wait for status from the SMU.
120
*
121
* Return 0 on success, -errno on error, indicating the execution
122
* status and result of the message being waited for. See
123
* smu_msg_v1_decode_response() for details of the -errno.
124
*/
125
int smu_cmn_wait_for_response(struct smu_context *smu)
126
{
127
return smu_msg_wait_response(&smu->msg_ctl, 0);
128
}
129
130
/**
131
* smu_cmn_send_smc_msg_with_param -- send a message with parameter
132
* @smu: pointer to an SMU context
133
* @msg: message to send
134
* @param: parameter to send to the SMU
135
* @read_arg: pointer to u32 to return a value from the SMU back
136
* to the caller
137
*
138
* Send the message @msg with parameter @param to the SMU, wait for
139
* completion of the command, and return back a value from the SMU in
140
* @read_arg pointer.
141
*
142
* Return 0 on success, -errno when a problem is encountered sending
143
* message or receiving reply. If there is a PCI bus recovery or
144
* the destination is a virtual GPU which does not allow this message
145
* type, the message is simply dropped and success is also returned.
146
* See smu_msg_v1_decode_response() for details of the -errno.
147
*
148
* If we weren't able to send the message to the SMU, we also print
149
* the error to the standard log.
150
*
151
* Command completion status is printed only if the -errno is
152
* -EREMOTEIO, indicating that the SMU returned back an
153
* undefined/unknown/unspecified result. All other cases are
154
* well-defined, not printed, but instead given back to the client to
155
* decide what further to do.
156
*
157
* The return value, @read_arg is read back regardless, to give back
158
* more information to the client, which on error would most likely be
159
* @param, but we can't assume that. This also eliminates more
160
* conditionals.
161
*/
162
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
163
enum smu_message_type msg,
164
uint32_t param,
165
uint32_t *read_arg)
166
{
167
struct smu_msg_ctl *ctl = &smu->msg_ctl;
168
struct smu_msg_args args = {
169
.msg = msg,
170
.args[0] = param,
171
.num_args = 1,
172
.num_out_args = read_arg ? 1 : 0,
173
.flags = 0,
174
.timeout = 0,
175
};
176
int ret;
177
178
ret = ctl->ops->send_msg(ctl, &args);
179
180
if (read_arg)
181
*read_arg = args.out_args[0];
182
183
return ret;
184
}
185
186
int smu_cmn_send_smc_msg(struct smu_context *smu,
187
enum smu_message_type msg,
188
uint32_t *read_arg)
189
{
190
return smu_cmn_send_smc_msg_with_param(smu,
191
msg,
192
0,
193
read_arg);
194
}
195
196
int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
197
uint32_t msg)
198
{
199
return __smu_cmn_send_debug_msg(&smu->msg_ctl, msg, 0);
200
}
201
202
int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
203
uint32_t msg, uint32_t param)
204
{
205
return __smu_cmn_send_debug_msg(&smu->msg_ctl, msg, param);
206
}
207
208
static int smu_msg_v1_decode_response(u32 resp)
209
{
210
int res;
211
212
switch (resp) {
213
case SMU_RESP_NONE:
214
/* The SMU is busy--still executing your command.
215
*/
216
res = -ETIME;
217
break;
218
case SMU_RESP_OK:
219
res = 0;
220
break;
221
case SMU_RESP_CMD_FAIL:
222
/* Command completed successfully, but the command
223
* status was failure.
224
*/
225
res = -EIO;
226
break;
227
case SMU_RESP_CMD_UNKNOWN:
228
/* Unknown command--ignored by the SMU.
229
*/
230
res = -EOPNOTSUPP;
231
break;
232
case SMU_RESP_CMD_BAD_PREREQ:
233
/* Valid command--bad prerequisites.
234
*/
235
res = -EINVAL;
236
break;
237
case SMU_RESP_BUSY_OTHER:
238
/* The SMU is busy with other commands. The client
239
* should retry in 10 us.
240
*/
241
res = -EBUSY;
242
break;
243
default:
244
/* Unknown or debug response from the SMU.
245
*/
246
res = -EREMOTEIO;
247
break;
248
}
249
250
return res;
251
}
252
253
static u32 __smu_msg_v1_poll_stat(struct smu_msg_ctl *ctl, u32 timeout_us)
254
{
255
struct amdgpu_device *adev = ctl->smu->adev;
256
struct smu_msg_config *cfg = &ctl->config;
257
u32 timeout = timeout_us ? timeout_us : ctl->default_timeout;
258
u32 reg;
259
260
for (; timeout > 0; timeout--) {
261
reg = RREG32(cfg->resp_reg);
262
if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
263
break;
264
udelay(1);
265
}
266
267
return reg;
268
}
269
270
static void __smu_msg_v1_send(struct smu_msg_ctl *ctl, u16 index,
271
struct smu_msg_args *args)
272
{
273
struct amdgpu_device *adev = ctl->smu->adev;
274
struct smu_msg_config *cfg = &ctl->config;
275
int i;
276
277
WREG32(cfg->resp_reg, 0);
278
for (i = 0; i < args->num_args; i++)
279
WREG32(cfg->arg_regs[i], args->args[i]);
280
WREG32(cfg->msg_reg, index);
281
}
282
283
static void __smu_msg_v1_read_out_args(struct smu_msg_ctl *ctl,
284
struct smu_msg_args *args)
285
{
286
struct amdgpu_device *adev = ctl->smu->adev;
287
int i;
288
289
for (i = 0; i < args->num_out_args; i++)
290
args->out_args[i] = RREG32(ctl->config.arg_regs[i]);
291
}
292
293
static void __smu_msg_v1_print_err_limited(struct smu_msg_ctl *ctl,
294
struct smu_msg_args *args,
295
char *err_msg)
296
{
297
static DEFINE_RATELIMIT_STATE(_rs,
298
SMU_MSG_V1_DEFAULT_RATELIMIT_INTERVAL,
299
SMU_MSG_V1_DEFAULT_RATELIMIT_BURST);
300
struct smu_context *smu = ctl->smu;
301
struct amdgpu_device *adev = smu->adev;
302
303
if (__ratelimit(&_rs)) {
304
u32 in[SMU_MSG_MAX_ARGS];
305
int i;
306
307
dev_err(adev->dev, "%s msg_reg: %x resp_reg: %x", err_msg,
308
RREG32(ctl->config.msg_reg),
309
RREG32(ctl->config.resp_reg));
310
if (args->num_args > 0) {
311
for (i = 0; i < args->num_args; i++)
312
in[i] = RREG32(ctl->config.arg_regs[i]);
313
print_hex_dump(KERN_ERR, "in params:", DUMP_PREFIX_NONE,
314
16, 4, in, args->num_args * sizeof(u32),
315
false);
316
}
317
}
318
}
319
320
static void __smu_msg_v1_print_error(struct smu_msg_ctl *ctl,
321
u32 resp,
322
struct smu_msg_args *args)
323
{
324
struct smu_context *smu = ctl->smu;
325
struct amdgpu_device *adev = smu->adev;
326
int index = ctl->message_map[args->msg].map_to;
327
328
switch (resp) {
329
case SMU_RESP_NONE:
330
__smu_msg_v1_print_err_limited(ctl, args, "SMU: No response");
331
break;
332
case SMU_RESP_OK:
333
break;
334
case SMU_RESP_CMD_FAIL:
335
break;
336
case SMU_RESP_CMD_UNKNOWN:
337
__smu_msg_v1_print_err_limited(ctl, args,
338
"SMU: unknown command");
339
break;
340
case SMU_RESP_CMD_BAD_PREREQ:
341
__smu_msg_v1_print_err_limited(
342
ctl, args, "SMU: valid command, bad prerequisites");
343
break;
344
case SMU_RESP_BUSY_OTHER:
345
if (args->msg != SMU_MSG_GetBadPageCount)
346
__smu_msg_v1_print_err_limited(ctl, args,
347
"SMU: I'm very busy");
348
break;
349
case SMU_RESP_DEBUG_END:
350
__smu_msg_v1_print_err_limited(ctl, args, "SMU: Debug Err");
351
break;
352
case SMU_RESP_UNEXP:
353
if (amdgpu_device_bus_status_check(adev)) {
354
dev_err(adev->dev,
355
"SMU: bus error for message: %s(%d) response:0x%08X ",
356
smu_get_message_name(smu, args->msg), index,
357
resp);
358
if (args->num_args > 0)
359
print_hex_dump(KERN_ERR,
360
"in params:", DUMP_PREFIX_NONE,
361
16, 4, args->args,
362
args->num_args * sizeof(u32),
363
false);
364
}
365
break;
366
default:
367
__smu_msg_v1_print_err_limited(ctl, args,
368
"SMU: unknown response");
369
break;
370
}
371
}
372
373
static int __smu_msg_v1_ras_filter(struct smu_msg_ctl *ctl,
374
enum smu_message_type msg, u32 msg_flags,
375
bool *skip_pre_poll)
376
{
377
struct smu_context *smu = ctl->smu;
378
struct amdgpu_device *adev = smu->adev;
379
bool fed_status;
380
u32 reg;
381
382
if (!(smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI))
383
return 0;
384
385
fed_status = amdgpu_ras_get_fed_status(adev);
386
387
/* Block non-RAS-priority messages during RAS error */
388
if (fed_status && !(msg_flags & SMU_MSG_RAS_PRI)) {
389
dev_dbg(adev->dev, "RAS error detected, skip sending %s",
390
smu_get_message_name(smu, msg));
391
return -EACCES;
392
}
393
394
/* Skip pre-poll for priority messages or during RAS error */
395
if ((msg_flags & SMU_MSG_NO_PRECHECK) || fed_status) {
396
reg = RREG32(ctl->config.resp_reg);
397
dev_dbg(adev->dev,
398
"Sending priority message %s response status: %x",
399
smu_get_message_name(smu, msg), reg);
400
if (reg == 0)
401
*skip_pre_poll = true;
402
}
403
404
return 0;
405
}
406
407
/**
408
* smu_msg_proto_v1_send_msg - Complete V1 protocol with all filtering
409
* @ctl: Message control block
410
* @args: Message arguments
411
*
412
* Return: 0 on success, negative errno on failure
413
*/
414
static int smu_msg_v1_send_msg(struct smu_msg_ctl *ctl,
415
struct smu_msg_args *args)
416
{
417
struct smu_context *smu = ctl->smu;
418
struct amdgpu_device *adev = smu->adev;
419
const struct cmn2asic_msg_mapping *mapping;
420
u32 reg, msg_flags;
421
int ret, index;
422
bool skip_pre_poll = false;
423
bool lock_held = args->flags & SMU_MSG_FLAG_LOCK_HELD;
424
425
/* Early exit if no HW access */
426
if (adev->no_hw_access)
427
return 0;
428
429
/* Message index translation */
430
if (args->msg >= SMU_MSG_MAX_COUNT || !ctl->message_map)
431
return -EINVAL;
432
433
if (args->num_args > ctl->config.num_arg_regs ||
434
args->num_out_args > ctl->config.num_arg_regs)
435
return -EINVAL;
436
437
mapping = &ctl->message_map[args->msg];
438
if (!mapping->valid_mapping)
439
return -EINVAL;
440
441
msg_flags = mapping->flags;
442
index = mapping->map_to;
443
444
/* VF filter - skip messages not valid for VF */
445
if (amdgpu_sriov_vf(adev) && !(msg_flags & SMU_MSG_VF_FLAG))
446
return 0;
447
448
if (!lock_held)
449
mutex_lock(&ctl->lock);
450
451
/* RAS priority filter */
452
ret = __smu_msg_v1_ras_filter(ctl, args->msg, msg_flags,
453
&skip_pre_poll);
454
if (ret)
455
goto out;
456
457
/* FW state checks */
458
if (smu->smc_fw_state == SMU_FW_HANG) {
459
dev_err(adev->dev,
460
"SMU is in hanged state, failed to send smu message!\n");
461
ret = -EREMOTEIO;
462
goto out;
463
} else if (smu->smc_fw_state == SMU_FW_INIT) {
464
skip_pre_poll = true;
465
smu->smc_fw_state = SMU_FW_RUNTIME;
466
}
467
468
/* Pre-poll: ensure previous message completed */
469
if (!skip_pre_poll) {
470
reg = __smu_msg_v1_poll_stat(ctl, args->timeout);
471
ret = smu_msg_v1_decode_response(reg);
472
if (reg == SMU_RESP_NONE || ret == -EREMOTEIO) {
473
__smu_msg_v1_print_error(ctl, reg, args);
474
goto out;
475
}
476
}
477
478
/* Send message */
479
__smu_msg_v1_send(ctl, (u16)index, args);
480
481
/* Post-poll (skip if ASYNC) */
482
if (args->flags & SMU_MSG_FLAG_ASYNC) {
483
ret = 0;
484
goto out;
485
}
486
487
reg = __smu_msg_v1_poll_stat(ctl, args->timeout);
488
ret = smu_msg_v1_decode_response(reg);
489
490
/* FW state update on fatal error */
491
if (ret == -EREMOTEIO) {
492
smu->smc_fw_state = SMU_FW_HANG;
493
__smu_msg_v1_print_error(ctl, reg, args);
494
} else if (ret != 0) {
495
__smu_msg_v1_print_error(ctl, reg, args);
496
}
497
498
/* Read output args */
499
if (ret == 0 && args->num_out_args > 0) {
500
__smu_msg_v1_read_out_args(ctl, args);
501
dev_dbg(adev->dev, "smu send message: %s(%d) resp : 0x%08x",
502
smu_get_message_name(smu, args->msg), index, reg);
503
if (args->num_args > 0)
504
print_hex_dump_debug("in params:", DUMP_PREFIX_NONE, 16,
505
4, args->args,
506
args->num_args * sizeof(u32),
507
false);
508
print_hex_dump_debug("out params:", DUMP_PREFIX_NONE, 16, 4,
509
args->out_args,
510
args->num_out_args * sizeof(u32), false);
511
} else {
512
dev_dbg(adev->dev, "smu send message: %s(%d), resp: 0x%08x\n",
513
smu_get_message_name(smu, args->msg), index, reg);
514
if (args->num_args > 0)
515
print_hex_dump_debug("in params:", DUMP_PREFIX_NONE, 16,
516
4, args->args,
517
args->num_args * sizeof(u32),
518
false);
519
}
520
521
out:
522
/* Debug halt on error */
523
if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
524
ret) {
525
amdgpu_device_halt(adev);
526
WARN_ON(1);
527
}
528
529
if (!lock_held)
530
mutex_unlock(&ctl->lock);
531
return ret;
532
}
533
534
static int smu_msg_v1_wait_response(struct smu_msg_ctl *ctl, u32 timeout_us)
535
{
536
struct smu_context *smu = ctl->smu;
537
struct amdgpu_device *adev = smu->adev;
538
u32 reg;
539
int ret;
540
541
reg = __smu_msg_v1_poll_stat(ctl, timeout_us);
542
ret = smu_msg_v1_decode_response(reg);
543
544
if (ret == -EREMOTEIO)
545
smu->smc_fw_state = SMU_FW_HANG;
546
547
if (unlikely(adev->pm.smu_debug_mask & SMU_DEBUG_HALT_ON_ERROR) &&
548
ret && (ret != -ETIME)) {
549
amdgpu_device_halt(adev);
550
WARN_ON(1);
551
}
552
553
return ret;
554
}
555
556
const struct smu_msg_ops smu_msg_v1_ops = {
557
.send_msg = smu_msg_v1_send_msg,
558
.wait_response = smu_msg_v1_wait_response,
559
.decode_response = smu_msg_v1_decode_response,
560
.send_debug_msg = smu_msg_v1_send_debug_msg,
561
};
562
563
int smu_msg_wait_response(struct smu_msg_ctl *ctl, u32 timeout_us)
564
{
565
return ctl->ops->wait_response(ctl, timeout_us);
566
}
567
568
/**
569
* smu_msg_send_async_locked - Send message asynchronously, caller holds lock
570
* @ctl: Message control block
571
* @msg: Message type
572
* @param: Message parameter
573
*
574
* Send an SMU message without waiting for response. Caller must hold ctl->lock
575
* and call smu_msg_wait_response() later to get the result.
576
*
577
* Return: 0 on success, negative errno on failure
578
*/
579
int smu_msg_send_async_locked(struct smu_msg_ctl *ctl,
580
enum smu_message_type msg, u32 param)
581
{
582
struct smu_msg_args args = {
583
.msg = msg,
584
.args[0] = param,
585
.num_args = 1,
586
.num_out_args = 0,
587
.flags = SMU_MSG_FLAG_ASYNC | SMU_MSG_FLAG_LOCK_HELD,
588
.timeout = 0,
589
};
590
591
return ctl->ops->send_msg(ctl, &args);
592
}
593
594
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
595
enum smu_cmn2asic_mapping_type type,
596
uint32_t index)
597
{
598
struct cmn2asic_msg_mapping msg_mapping;
599
struct cmn2asic_mapping mapping;
600
601
switch (type) {
602
case CMN2ASIC_MAPPING_MSG:
603
if (index >= SMU_MSG_MAX_COUNT ||
604
!smu->msg_ctl.message_map)
605
return -EINVAL;
606
607
msg_mapping = smu->msg_ctl.message_map[index];
608
if (!msg_mapping.valid_mapping)
609
return -EINVAL;
610
611
if (amdgpu_sriov_vf(smu->adev) &&
612
!(msg_mapping.flags & SMU_MSG_VF_FLAG))
613
return -EACCES;
614
615
return msg_mapping.map_to;
616
617
case CMN2ASIC_MAPPING_CLK:
618
if (index >= SMU_CLK_COUNT ||
619
!smu->clock_map)
620
return -EINVAL;
621
622
mapping = smu->clock_map[index];
623
if (!mapping.valid_mapping)
624
return -EINVAL;
625
626
return mapping.map_to;
627
628
case CMN2ASIC_MAPPING_FEATURE:
629
if (index >= SMU_FEATURE_COUNT ||
630
!smu->feature_map)
631
return -EINVAL;
632
633
mapping = smu->feature_map[index];
634
if (!mapping.valid_mapping)
635
return -EINVAL;
636
637
return mapping.map_to;
638
639
case CMN2ASIC_MAPPING_TABLE:
640
if (index >= SMU_TABLE_COUNT ||
641
!smu->table_map)
642
return -EINVAL;
643
644
mapping = smu->table_map[index];
645
if (!mapping.valid_mapping)
646
return -EINVAL;
647
648
return mapping.map_to;
649
650
case CMN2ASIC_MAPPING_PWR:
651
if (index >= SMU_POWER_SOURCE_COUNT ||
652
!smu->pwr_src_map)
653
return -EINVAL;
654
655
mapping = smu->pwr_src_map[index];
656
if (!mapping.valid_mapping)
657
return -EINVAL;
658
659
return mapping.map_to;
660
661
case CMN2ASIC_MAPPING_WORKLOAD:
662
if (index >= PP_SMC_POWER_PROFILE_COUNT ||
663
!smu->workload_map)
664
return -EINVAL;
665
666
mapping = smu->workload_map[index];
667
if (!mapping.valid_mapping)
668
return -ENOTSUPP;
669
670
return mapping.map_to;
671
672
default:
673
return -EINVAL;
674
}
675
}
676
677
int smu_cmn_feature_is_supported(struct smu_context *smu,
678
enum smu_feature_mask mask)
679
{
680
int feature_id;
681
682
feature_id = smu_cmn_to_asic_specific_index(smu,
683
CMN2ASIC_MAPPING_FEATURE,
684
mask);
685
if (feature_id < 0)
686
return 0;
687
688
return smu_feature_list_is_set(smu, SMU_FEATURE_LIST_SUPPORTED,
689
feature_id);
690
}
691
692
static int __smu_get_enabled_features(struct smu_context *smu,
693
uint64_t *enabled_features)
694
{
695
return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
696
}
697
698
int smu_cmn_feature_is_enabled(struct smu_context *smu,
699
enum smu_feature_mask mask)
700
{
701
struct amdgpu_device *adev = smu->adev;
702
uint64_t enabled_features;
703
int feature_id;
704
705
if (__smu_get_enabled_features(smu, &enabled_features)) {
706
dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
707
return 0;
708
}
709
710
/*
711
* For Renoir and Cyan Skillfish, they are assumed to have all features
712
* enabled. Also considering they have no feature_map available, the
713
* check here can avoid unwanted feature_map check below.
714
*/
715
if (enabled_features == ULLONG_MAX)
716
return 1;
717
718
feature_id = smu_cmn_to_asic_specific_index(smu,
719
CMN2ASIC_MAPPING_FEATURE,
720
mask);
721
if (feature_id < 0)
722
return 0;
723
724
return test_bit(feature_id, (unsigned long *)&enabled_features);
725
}
726
727
bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
728
enum smu_clk_type clk_type)
729
{
730
enum smu_feature_mask feature_id = 0;
731
732
switch (clk_type) {
733
case SMU_MCLK:
734
case SMU_UCLK:
735
feature_id = SMU_FEATURE_DPM_UCLK_BIT;
736
break;
737
case SMU_GFXCLK:
738
case SMU_SCLK:
739
feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
740
break;
741
case SMU_SOCCLK:
742
feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
743
break;
744
case SMU_VCLK:
745
case SMU_VCLK1:
746
feature_id = SMU_FEATURE_DPM_VCLK_BIT;
747
break;
748
case SMU_DCLK:
749
case SMU_DCLK1:
750
feature_id = SMU_FEATURE_DPM_DCLK_BIT;
751
break;
752
case SMU_FCLK:
753
feature_id = SMU_FEATURE_DPM_FCLK_BIT;
754
break;
755
default:
756
return true;
757
}
758
759
if (!smu_cmn_feature_is_enabled(smu, feature_id))
760
return false;
761
762
return true;
763
}
764
765
int smu_cmn_get_enabled_mask(struct smu_context *smu,
766
uint64_t *feature_mask)
767
{
768
uint32_t *feature_mask_high;
769
uint32_t *feature_mask_low;
770
int ret = 0, index = 0;
771
772
if (!feature_mask)
773
return -EINVAL;
774
775
feature_mask_low = &((uint32_t *)feature_mask)[0];
776
feature_mask_high = &((uint32_t *)feature_mask)[1];
777
778
index = smu_cmn_to_asic_specific_index(smu,
779
CMN2ASIC_MAPPING_MSG,
780
SMU_MSG_GetEnabledSmuFeatures);
781
if (index > 0) {
782
ret = smu_cmn_send_smc_msg_with_param(smu,
783
SMU_MSG_GetEnabledSmuFeatures,
784
0,
785
feature_mask_low);
786
if (ret)
787
return ret;
788
789
ret = smu_cmn_send_smc_msg_with_param(smu,
790
SMU_MSG_GetEnabledSmuFeatures,
791
1,
792
feature_mask_high);
793
} else {
794
ret = smu_cmn_send_smc_msg(smu,
795
SMU_MSG_GetEnabledSmuFeaturesHigh,
796
feature_mask_high);
797
if (ret)
798
return ret;
799
800
ret = smu_cmn_send_smc_msg(smu,
801
SMU_MSG_GetEnabledSmuFeaturesLow,
802
feature_mask_low);
803
}
804
805
return ret;
806
}
807
808
uint64_t smu_cmn_get_indep_throttler_status(
809
const unsigned long dep_status,
810
const uint8_t *throttler_map)
811
{
812
uint64_t indep_status = 0;
813
uint8_t dep_bit = 0;
814
815
for_each_set_bit(dep_bit, &dep_status, 32)
816
indep_status |= 1ULL << throttler_map[dep_bit];
817
818
return indep_status;
819
}
820
821
int smu_cmn_feature_update_enable_state(struct smu_context *smu,
822
uint64_t feature_mask,
823
bool enabled)
824
{
825
int ret = 0;
826
827
if (enabled) {
828
ret = smu_cmn_send_smc_msg_with_param(smu,
829
SMU_MSG_EnableSmuFeaturesLow,
830
lower_32_bits(feature_mask),
831
NULL);
832
if (ret)
833
return ret;
834
ret = smu_cmn_send_smc_msg_with_param(smu,
835
SMU_MSG_EnableSmuFeaturesHigh,
836
upper_32_bits(feature_mask),
837
NULL);
838
} else {
839
ret = smu_cmn_send_smc_msg_with_param(smu,
840
SMU_MSG_DisableSmuFeaturesLow,
841
lower_32_bits(feature_mask),
842
NULL);
843
if (ret)
844
return ret;
845
ret = smu_cmn_send_smc_msg_with_param(smu,
846
SMU_MSG_DisableSmuFeaturesHigh,
847
upper_32_bits(feature_mask),
848
NULL);
849
}
850
851
return ret;
852
}
853
854
int smu_cmn_feature_set_enabled(struct smu_context *smu,
855
enum smu_feature_mask mask,
856
bool enable)
857
{
858
int feature_id;
859
860
feature_id = smu_cmn_to_asic_specific_index(smu,
861
CMN2ASIC_MAPPING_FEATURE,
862
mask);
863
if (feature_id < 0)
864
return -EINVAL;
865
866
return smu_cmn_feature_update_enable_state(smu,
867
1ULL << feature_id,
868
enable);
869
}
870
871
#undef __SMU_DUMMY_MAP
872
#define __SMU_DUMMY_MAP(fea) #fea
873
static const char *__smu_feature_names[] = {
874
SMU_FEATURE_MASKS
875
};
876
877
static const char *smu_get_feature_name(struct smu_context *smu,
878
enum smu_feature_mask feature)
879
{
880
if (feature >= SMU_FEATURE_COUNT)
881
return "unknown smu feature";
882
return __smu_feature_names[feature];
883
}
884
885
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
886
char *buf)
887
{
888
int8_t sort_feature[MAX(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
889
uint64_t feature_mask;
890
int i, feature_index;
891
uint32_t count = 0;
892
size_t size = 0;
893
894
if (__smu_get_enabled_features(smu, &feature_mask))
895
return 0;
896
897
size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
898
upper_32_bits(feature_mask), lower_32_bits(feature_mask));
899
900
memset(sort_feature, -1, sizeof(sort_feature));
901
902
for (i = 0; i < SMU_FEATURE_COUNT; i++) {
903
feature_index = smu_cmn_to_asic_specific_index(smu,
904
CMN2ASIC_MAPPING_FEATURE,
905
i);
906
if (feature_index < 0)
907
continue;
908
909
sort_feature[feature_index] = i;
910
}
911
912
size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
913
"No", "Feature", "Bit", "State");
914
915
for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {
916
if (sort_feature[feature_index] < 0)
917
continue;
918
919
size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
920
count++,
921
smu_get_feature_name(smu, sort_feature[feature_index]),
922
feature_index,
923
!!test_bit(feature_index, (unsigned long *)&feature_mask) ?
924
"enabled" : "disabled");
925
}
926
927
return size;
928
}
929
930
int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
931
uint64_t new_mask)
932
{
933
int ret = 0;
934
uint64_t feature_mask;
935
uint64_t feature_2_enabled = 0;
936
uint64_t feature_2_disabled = 0;
937
938
ret = __smu_get_enabled_features(smu, &feature_mask);
939
if (ret)
940
return ret;
941
942
feature_2_enabled = ~feature_mask & new_mask;
943
feature_2_disabled = feature_mask & ~new_mask;
944
945
if (feature_2_enabled) {
946
ret = smu_cmn_feature_update_enable_state(smu,
947
feature_2_enabled,
948
true);
949
if (ret)
950
return ret;
951
}
952
if (feature_2_disabled) {
953
ret = smu_cmn_feature_update_enable_state(smu,
954
feature_2_disabled,
955
false);
956
if (ret)
957
return ret;
958
}
959
960
return ret;
961
}
962
963
/**
964
* smu_cmn_disable_all_features_with_exception - disable all dpm features
965
* except this specified by
966
* @mask
967
*
968
* @smu: smu_context pointer
969
* @mask: the dpm feature which should not be disabled
970
* SMU_FEATURE_COUNT: no exception, all dpm features
971
* to disable
972
*
973
* Returns:
974
* 0 on success or a negative error code on failure.
975
*/
976
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
977
enum smu_feature_mask mask)
978
{
979
uint64_t features_to_disable = U64_MAX;
980
int skipped_feature_id;
981
982
if (mask != SMU_FEATURE_COUNT) {
983
skipped_feature_id = smu_cmn_to_asic_specific_index(smu,
984
CMN2ASIC_MAPPING_FEATURE,
985
mask);
986
if (skipped_feature_id < 0)
987
return -EINVAL;
988
989
features_to_disable &= ~(1ULL << skipped_feature_id);
990
}
991
992
return smu_cmn_feature_update_enable_state(smu,
993
features_to_disable,
994
0);
995
}
996
997
int smu_cmn_get_smc_version(struct smu_context *smu,
998
uint32_t *if_version,
999
uint32_t *smu_version)
1000
{
1001
int ret = 0;
1002
1003
if (!if_version && !smu_version)
1004
return -EINVAL;
1005
1006
if (smu->smc_fw_if_version && smu->smc_fw_version)
1007
{
1008
if (if_version)
1009
*if_version = smu->smc_fw_if_version;
1010
1011
if (smu_version)
1012
*smu_version = smu->smc_fw_version;
1013
1014
return 0;
1015
}
1016
1017
if (if_version) {
1018
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
1019
if (ret)
1020
return ret;
1021
1022
smu->smc_fw_if_version = *if_version;
1023
}
1024
1025
if (smu_version) {
1026
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
1027
if (ret)
1028
return ret;
1029
1030
smu->smc_fw_version = *smu_version;
1031
}
1032
1033
return ret;
1034
}
1035
1036
int smu_cmn_update_table(struct smu_context *smu,
1037
enum smu_table_id table_index,
1038
int argument,
1039
void *table_data,
1040
bool drv2smu)
1041
{
1042
struct smu_table_context *smu_table = &smu->smu_table;
1043
struct amdgpu_device *adev = smu->adev;
1044
struct smu_table *table = &smu_table->driver_table;
1045
int table_id = smu_cmn_to_asic_specific_index(smu,
1046
CMN2ASIC_MAPPING_TABLE,
1047
table_index);
1048
uint32_t table_size;
1049
int ret = 0;
1050
if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
1051
return -EINVAL;
1052
1053
table_size = smu_table->tables[table_index].size;
1054
1055
if (drv2smu) {
1056
memcpy(table->cpu_addr, table_data, table_size);
1057
/*
1058
* Flush hdp cache: to guard the content seen by
1059
* GPU is consitent with CPU.
1060
*/
1061
amdgpu_hdp_flush(adev, NULL);
1062
}
1063
1064
ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
1065
SMU_MSG_TransferTableDram2Smu :
1066
SMU_MSG_TransferTableSmu2Dram,
1067
table_id | ((argument & 0xFFFF) << 16),
1068
NULL);
1069
if (ret)
1070
return ret;
1071
1072
if (!drv2smu) {
1073
amdgpu_hdp_invalidate(adev, NULL);
1074
memcpy(table_data, table->cpu_addr, table_size);
1075
}
1076
1077
return 0;
1078
}
1079
1080
int smu_cmn_write_watermarks_table(struct smu_context *smu)
1081
{
1082
void *watermarks_table = smu->smu_table.watermarks_table;
1083
1084
if (!watermarks_table)
1085
return -EINVAL;
1086
1087
return smu_cmn_update_table(smu,
1088
SMU_TABLE_WATERMARKS,
1089
0,
1090
watermarks_table,
1091
true);
1092
}
1093
1094
int smu_cmn_write_pptable(struct smu_context *smu)
1095
{
1096
void *pptable = smu->smu_table.driver_pptable;
1097
1098
return smu_cmn_update_table(smu,
1099
SMU_TABLE_PPTABLE,
1100
0,
1101
pptable,
1102
true);
1103
}
1104
1105
int smu_cmn_get_metrics_table(struct smu_context *smu,
1106
void *metrics_table,
1107
bool bypass_cache)
1108
{
1109
struct smu_table_context *smu_table = &smu->smu_table;
1110
uint32_t table_size =
1111
smu_table->tables[SMU_TABLE_SMU_METRICS].size;
1112
int ret = 0;
1113
1114
if (bypass_cache ||
1115
!smu_table->metrics_time ||
1116
time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
1117
ret = smu_cmn_update_table(smu,
1118
SMU_TABLE_SMU_METRICS,
1119
0,
1120
smu_table->metrics_table,
1121
false);
1122
if (ret) {
1123
dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
1124
return ret;
1125
}
1126
smu_table->metrics_time = jiffies;
1127
}
1128
1129
if (metrics_table)
1130
memcpy(metrics_table, smu_table->metrics_table, table_size);
1131
1132
return 0;
1133
}
1134
1135
int smu_cmn_get_combo_pptable(struct smu_context *smu)
1136
{
1137
void *pptable = smu->smu_table.combo_pptable;
1138
1139
return smu_cmn_update_table(smu,
1140
SMU_TABLE_COMBO_PPTABLE,
1141
0,
1142
pptable,
1143
false);
1144
}
1145
1146
int smu_cmn_set_mp1_state(struct smu_context *smu,
1147
enum pp_mp1_state mp1_state)
1148
{
1149
enum smu_message_type msg;
1150
int ret;
1151
1152
switch (mp1_state) {
1153
case PP_MP1_STATE_SHUTDOWN:
1154
msg = SMU_MSG_PrepareMp1ForShutdown;
1155
break;
1156
case PP_MP1_STATE_UNLOAD:
1157
msg = SMU_MSG_PrepareMp1ForUnload;
1158
break;
1159
case PP_MP1_STATE_RESET:
1160
msg = SMU_MSG_PrepareMp1ForReset;
1161
break;
1162
case PP_MP1_STATE_NONE:
1163
default:
1164
return 0;
1165
}
1166
1167
ret = smu_cmn_send_smc_msg(smu, msg, NULL);
1168
if (ret)
1169
dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n");
1170
1171
return ret;
1172
}
1173
1174
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev)
1175
{
1176
struct pci_dev *p = NULL;
1177
bool snd_driver_loaded;
1178
1179
/*
1180
* If the ASIC comes with no audio function, we always assume
1181
* it is "enabled".
1182
*/
1183
p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
1184
adev->pdev->bus->number, 1);
1185
if (!p)
1186
return true;
1187
1188
snd_driver_loaded = pci_is_enabled(p) ? true : false;
1189
1190
pci_dev_put(p);
1191
1192
return snd_driver_loaded;
1193
}
1194
1195
static char *smu_soc_policy_get_desc(struct smu_dpm_policy *policy, int level)
1196
{
1197
if (level < 0 || !(policy->level_mask & BIT(level)))
1198
return "Invalid";
1199
1200
switch (level) {
1201
case SOC_PSTATE_DEFAULT:
1202
return "soc_pstate_default";
1203
case SOC_PSTATE_0:
1204
return "soc_pstate_0";
1205
case SOC_PSTATE_1:
1206
return "soc_pstate_1";
1207
case SOC_PSTATE_2:
1208
return "soc_pstate_2";
1209
}
1210
1211
return "Invalid";
1212
}
1213
1214
static struct smu_dpm_policy_desc pstate_policy_desc = {
1215
.name = STR_SOC_PSTATE_POLICY,
1216
.get_desc = smu_soc_policy_get_desc,
1217
};
1218
1219
void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy)
1220
{
1221
policy->desc = &pstate_policy_desc;
1222
}
1223
1224
static char *smu_xgmi_plpd_policy_get_desc(struct smu_dpm_policy *policy,
1225
int level)
1226
{
1227
if (level < 0 || !(policy->level_mask & BIT(level)))
1228
return "Invalid";
1229
1230
switch (level) {
1231
case XGMI_PLPD_DISALLOW:
1232
return "plpd_disallow";
1233
case XGMI_PLPD_DEFAULT:
1234
return "plpd_default";
1235
case XGMI_PLPD_OPTIMIZED:
1236
return "plpd_optimized";
1237
}
1238
1239
return "Invalid";
1240
}
1241
1242
static struct smu_dpm_policy_desc xgmi_plpd_policy_desc = {
1243
.name = STR_XGMI_PLPD_POLICY,
1244
.get_desc = smu_xgmi_plpd_policy_get_desc,
1245
};
1246
1247
void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy)
1248
{
1249
policy->desc = &xgmi_plpd_policy_desc;
1250
}
1251
1252
void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
1253
u32 workload_mask,
1254
u32 *backend_workload_mask)
1255
{
1256
int workload_type;
1257
u32 profile_mode;
1258
1259
*backend_workload_mask = 0;
1260
1261
for (profile_mode = 0; profile_mode < PP_SMC_POWER_PROFILE_COUNT; profile_mode++) {
1262
if (!(workload_mask & (1 << profile_mode)))
1263
continue;
1264
1265
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1266
workload_type = smu_cmn_to_asic_specific_index(smu,
1267
CMN2ASIC_MAPPING_WORKLOAD,
1268
profile_mode);
1269
1270
if (workload_type < 0)
1271
continue;
1272
1273
*backend_workload_mask |= 1 << workload_type;
1274
}
1275
}
1276
1277
static inline bool smu_cmn_freqs_match(uint32_t freq1, uint32_t freq2)
1278
{
1279
/* Frequencies within 25 MHz are considered equal */
1280
return (abs((int)freq1 - (int)freq2) <= 25);
1281
}
1282
1283
int smu_cmn_print_dpm_clk_levels(struct smu_context *smu,
1284
struct smu_dpm_table *dpm_table,
1285
uint32_t cur_clk, char *buf, int *offset)
1286
{
1287
uint32_t min_clk, max_clk, level_index, count;
1288
uint32_t freq_values[3];
1289
int size, lvl, i;
1290
bool is_fine_grained;
1291
bool is_deep_sleep;
1292
bool freq_match;
1293
1294
if (!dpm_table || !buf)
1295
return -EINVAL;
1296
1297
level_index = 0;
1298
size = *offset;
1299
count = dpm_table->count;
1300
is_fine_grained = dpm_table->flags & SMU_DPM_TABLE_FINE_GRAINED;
1301
min_clk = SMU_DPM_TABLE_MIN(dpm_table);
1302
max_clk = SMU_DPM_TABLE_MAX(dpm_table);
1303
1304
/* Deep sleep - current clock < min_clock/2, TBD: cur_clk = 0 as GFXOFF */
1305
is_deep_sleep = cur_clk < min_clk / 2;
1306
if (is_deep_sleep) {
1307
size += sysfs_emit_at(buf, size, "S: %uMhz *\n", cur_clk);
1308
level_index = 1;
1309
}
1310
1311
if (!is_fine_grained) {
1312
for (i = 0; i < count; i++) {
1313
freq_match = !is_deep_sleep &&
1314
smu_cmn_freqs_match(
1315
cur_clk,
1316
dpm_table->dpm_levels[i].value);
1317
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1318
level_index + i,
1319
dpm_table->dpm_levels[i].value,
1320
freq_match ? "*" : "");
1321
}
1322
} else {
1323
count = 2;
1324
freq_values[0] = min_clk;
1325
freq_values[1] = max_clk;
1326
1327
if (!is_deep_sleep) {
1328
if (smu_cmn_freqs_match(cur_clk, min_clk)) {
1329
lvl = 0;
1330
} else if (smu_cmn_freqs_match(cur_clk, max_clk)) {
1331
lvl = 1;
1332
} else {
1333
/* NOTE: use index '1' to show current clock value */
1334
lvl = 1;
1335
count = 3;
1336
freq_values[1] = cur_clk;
1337
freq_values[2] = max_clk;
1338
}
1339
}
1340
1341
for (i = 0; i < count; i++) {
1342
size += sysfs_emit_at(
1343
buf, size, "%d: %uMhz %s\n", level_index + i,
1344
freq_values[i],
1345
(!is_deep_sleep && i == lvl) ? "*" : "");
1346
}
1347
}
1348
1349
*offset = size;
1350
1351
return 0;
1352
}
1353
1354
int smu_cmn_print_pcie_levels(struct smu_context *smu,
1355
struct smu_pcie_table *pcie_table,
1356
uint32_t cur_gen, uint32_t cur_lane, char *buf,
1357
int *offset)
1358
{
1359
int size, i;
1360
1361
if (!pcie_table || !buf)
1362
return -EINVAL;
1363
1364
size = *offset;
1365
1366
for (i = 0; i < pcie_table->lclk_levels; i++) {
1367
size += sysfs_emit_at(
1368
buf, size, "%d: %s %s %dMhz %s\n", i,
1369
(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s," :
1370
(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s," :
1371
(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s," :
1372
(pcie_table->pcie_gen[i] == 3) ? "16.0GT/s," :
1373
(pcie_table->pcie_gen[i] == 4) ? "32.0GT/s," :
1374
(pcie_table->pcie_gen[i] == 5) ? "64.0GT/s," :
1375
"",
1376
(pcie_table->pcie_lane[i] == 1) ? "x1" :
1377
(pcie_table->pcie_lane[i] == 2) ? "x2" :
1378
(pcie_table->pcie_lane[i] == 3) ? "x4" :
1379
(pcie_table->pcie_lane[i] == 4) ? "x8" :
1380
(pcie_table->pcie_lane[i] == 5) ? "x12" :
1381
(pcie_table->pcie_lane[i] == 6) ? "x16" :
1382
(pcie_table->pcie_lane[i] == 7) ? "x32" :
1383
"",
1384
pcie_table->lclk_freq[i],
1385
(cur_gen == pcie_table->pcie_gen[i]) &&
1386
(cur_lane == pcie_table->pcie_lane[i]) ?
1387
"*" :
1388
"");
1389
}
1390
1391
*offset = size;
1392
1393
return 0;
1394
}
1395
1396
int smu_cmn_dpm_pcie_gen_idx(int gen)
1397
{
1398
int ret;
1399
1400
switch (gen) {
1401
case 1 ... 5:
1402
ret = gen - 1;
1403
break;
1404
default:
1405
ret = -1;
1406
break;
1407
}
1408
1409
return ret;
1410
}
1411
1412
int smu_cmn_dpm_pcie_width_idx(int width)
1413
{
1414
int ret;
1415
1416
switch (width) {
1417
case 1:
1418
ret = 1;
1419
break;
1420
case 2:
1421
ret = 2;
1422
break;
1423
case 4:
1424
ret = 3;
1425
break;
1426
case 8:
1427
ret = 4;
1428
break;
1429
case 12:
1430
ret = 5;
1431
break;
1432
case 16:
1433
ret = 6;
1434
break;
1435
case 32:
1436
ret = 7;
1437
break;
1438
default:
1439
ret = -1;
1440
break;
1441
}
1442
1443
return ret;
1444
}
1445
1446