Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
26535 views
1
/*
2
* Copyright 2019 Advanced Micro Devices, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*/
22
23
#define SWSMU_CODE_LAYER_L1
24
25
#include <linux/firmware.h>
26
#include <linux/pci.h>
27
#include <linux/power_supply.h>
28
#include <linux/reboot.h>
29
30
#include "amdgpu.h"
31
#include "amdgpu_smu.h"
32
#include "smu_internal.h"
33
#include "atom.h"
34
#include "arcturus_ppt.h"
35
#include "navi10_ppt.h"
36
#include "sienna_cichlid_ppt.h"
37
#include "renoir_ppt.h"
38
#include "vangogh_ppt.h"
39
#include "aldebaran_ppt.h"
40
#include "yellow_carp_ppt.h"
41
#include "cyan_skillfish_ppt.h"
42
#include "smu_v13_0_0_ppt.h"
43
#include "smu_v13_0_4_ppt.h"
44
#include "smu_v13_0_5_ppt.h"
45
#include "smu_v13_0_6_ppt.h"
46
#include "smu_v13_0_7_ppt.h"
47
#include "smu_v14_0_0_ppt.h"
48
#include "smu_v14_0_2_ppt.h"
49
#include "amd_pcie.h"
50
51
/*
52
* DO NOT use these for err/warn/info/debug messages.
53
* Use dev_err, dev_warn, dev_info and dev_dbg instead.
54
* They are more MGPU friendly.
55
*/
56
#undef pr_err
57
#undef pr_warn
58
#undef pr_info
59
#undef pr_debug
60
61
static const struct amd_pm_funcs swsmu_pm_funcs;
62
static int smu_force_smuclk_levels(struct smu_context *smu,
63
enum smu_clk_type clk_type,
64
uint32_t mask);
65
static int smu_handle_task(struct smu_context *smu,
66
enum amd_dpm_forced_level level,
67
enum amd_pp_task task_id);
68
static int smu_reset(struct smu_context *smu);
69
static int smu_set_fan_speed_pwm(void *handle, u32 speed);
70
static int smu_set_fan_control_mode(void *handle, u32 value);
71
static int smu_set_power_limit(void *handle, uint32_t limit);
72
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
73
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
74
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
75
static void smu_power_profile_mode_get(struct smu_context *smu,
76
enum PP_SMC_POWER_PROFILE profile_mode);
77
static void smu_power_profile_mode_put(struct smu_context *smu,
78
enum PP_SMC_POWER_PROFILE profile_mode);
79
static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type);
80
static int smu_od_edit_dpm_table(void *handle,
81
enum PP_OD_DPM_TABLE_COMMAND type,
82
long *input, uint32_t size);
83
84
static int smu_sys_get_pp_feature_mask(void *handle,
85
char *buf)
86
{
87
struct smu_context *smu = handle;
88
89
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
90
return -EOPNOTSUPP;
91
92
return smu_get_pp_feature_mask(smu, buf);
93
}
94
95
static int smu_sys_set_pp_feature_mask(void *handle,
96
uint64_t new_mask)
97
{
98
struct smu_context *smu = handle;
99
100
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
101
return -EOPNOTSUPP;
102
103
return smu_set_pp_feature_mask(smu, new_mask);
104
}
105
106
int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
107
{
108
if (!smu->ppt_funcs->set_gfx_off_residency)
109
return -EINVAL;
110
111
return smu_set_gfx_off_residency(smu, value);
112
}
113
114
int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
115
{
116
if (!smu->ppt_funcs->get_gfx_off_residency)
117
return -EINVAL;
118
119
return smu_get_gfx_off_residency(smu, value);
120
}
121
122
int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
123
{
124
if (!smu->ppt_funcs->get_gfx_off_entrycount)
125
return -EINVAL;
126
127
return smu_get_gfx_off_entrycount(smu, value);
128
}
129
130
int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
131
{
132
if (!smu->ppt_funcs->get_gfx_off_status)
133
return -EINVAL;
134
135
*value = smu_get_gfx_off_status(smu);
136
137
return 0;
138
}
139
140
int smu_set_soft_freq_range(struct smu_context *smu,
141
enum pp_clock_type type,
142
uint32_t min,
143
uint32_t max)
144
{
145
enum smu_clk_type clk_type;
146
int ret = 0;
147
148
clk_type = smu_convert_to_smuclk(type);
149
if (clk_type == SMU_CLK_COUNT)
150
return -EINVAL;
151
152
if (smu->ppt_funcs->set_soft_freq_limited_range)
153
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
154
clk_type,
155
min,
156
max,
157
false);
158
159
return ret;
160
}
161
162
int smu_get_dpm_freq_range(struct smu_context *smu,
163
enum smu_clk_type clk_type,
164
uint32_t *min,
165
uint32_t *max)
166
{
167
int ret = -ENOTSUPP;
168
169
if (!min && !max)
170
return -EINVAL;
171
172
if (smu->ppt_funcs->get_dpm_ultimate_freq)
173
ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
174
clk_type,
175
min,
176
max);
177
178
return ret;
179
}
180
181
int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
182
{
183
int ret = 0;
184
struct amdgpu_device *adev = smu->adev;
185
186
if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
187
ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
188
if (ret)
189
dev_err(adev->dev, "Failed to enable gfx imu!\n");
190
}
191
return ret;
192
}
193
194
static u32 smu_get_mclk(void *handle, bool low)
195
{
196
struct smu_context *smu = handle;
197
uint32_t clk_freq;
198
int ret = 0;
199
200
ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
201
low ? &clk_freq : NULL,
202
!low ? &clk_freq : NULL);
203
if (ret)
204
return 0;
205
return clk_freq * 100;
206
}
207
208
static u32 smu_get_sclk(void *handle, bool low)
209
{
210
struct smu_context *smu = handle;
211
uint32_t clk_freq;
212
int ret = 0;
213
214
ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
215
low ? &clk_freq : NULL,
216
!low ? &clk_freq : NULL);
217
if (ret)
218
return 0;
219
return clk_freq * 100;
220
}
221
222
static int smu_set_gfx_imu_enable(struct smu_context *smu)
223
{
224
struct amdgpu_device *adev = smu->adev;
225
226
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
227
return 0;
228
229
if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
230
return 0;
231
232
return smu_set_gfx_power_up_by_imu(smu);
233
}
234
235
static bool is_vcn_enabled(struct amdgpu_device *adev)
236
{
237
int i;
238
239
for (i = 0; i < adev->num_ip_blocks; i++) {
240
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_VCN ||
241
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_JPEG) &&
242
!adev->ip_blocks[i].status.valid)
243
return false;
244
}
245
246
return true;
247
}
248
249
static int smu_dpm_set_vcn_enable(struct smu_context *smu,
250
bool enable,
251
int inst)
252
{
253
struct smu_power_context *smu_power = &smu->smu_power;
254
struct smu_power_gate *power_gate = &smu_power->power_gate;
255
int ret = 0;
256
257
/*
258
* don't poweron vcn/jpeg when they are skipped.
259
*/
260
if (!is_vcn_enabled(smu->adev))
261
return 0;
262
263
if (!smu->ppt_funcs->dpm_set_vcn_enable)
264
return 0;
265
266
if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable)
267
return 0;
268
269
ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst);
270
if (!ret)
271
atomic_set(&power_gate->vcn_gated[inst], !enable);
272
273
return ret;
274
}
275
276
static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
277
bool enable)
278
{
279
struct smu_power_context *smu_power = &smu->smu_power;
280
struct smu_power_gate *power_gate = &smu_power->power_gate;
281
int ret = 0;
282
283
if (!is_vcn_enabled(smu->adev))
284
return 0;
285
286
if (!smu->ppt_funcs->dpm_set_jpeg_enable)
287
return 0;
288
289
if (atomic_read(&power_gate->jpeg_gated) ^ enable)
290
return 0;
291
292
ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
293
if (!ret)
294
atomic_set(&power_gate->jpeg_gated, !enable);
295
296
return ret;
297
}
298
299
static int smu_dpm_set_vpe_enable(struct smu_context *smu,
300
bool enable)
301
{
302
struct smu_power_context *smu_power = &smu->smu_power;
303
struct smu_power_gate *power_gate = &smu_power->power_gate;
304
int ret = 0;
305
306
if (!smu->ppt_funcs->dpm_set_vpe_enable)
307
return 0;
308
309
if (atomic_read(&power_gate->vpe_gated) ^ enable)
310
return 0;
311
312
ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
313
if (!ret)
314
atomic_set(&power_gate->vpe_gated, !enable);
315
316
return ret;
317
}
318
319
static int smu_dpm_set_isp_enable(struct smu_context *smu,
320
bool enable)
321
{
322
struct smu_power_context *smu_power = &smu->smu_power;
323
struct smu_power_gate *power_gate = &smu_power->power_gate;
324
int ret;
325
326
if (!smu->ppt_funcs->dpm_set_isp_enable)
327
return 0;
328
329
if (atomic_read(&power_gate->isp_gated) ^ enable)
330
return 0;
331
332
ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable);
333
if (!ret)
334
atomic_set(&power_gate->isp_gated, !enable);
335
336
return ret;
337
}
338
339
static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
340
bool enable)
341
{
342
struct smu_power_context *smu_power = &smu->smu_power;
343
struct smu_power_gate *power_gate = &smu_power->power_gate;
344
int ret = 0;
345
346
if (!smu->adev->enable_umsch_mm)
347
return 0;
348
349
if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
350
return 0;
351
352
if (atomic_read(&power_gate->umsch_mm_gated) ^ enable)
353
return 0;
354
355
ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
356
if (!ret)
357
atomic_set(&power_gate->umsch_mm_gated, !enable);
358
359
return ret;
360
}
361
362
static int smu_set_mall_enable(struct smu_context *smu)
363
{
364
int ret = 0;
365
366
if (!smu->ppt_funcs->set_mall_enable)
367
return 0;
368
369
ret = smu->ppt_funcs->set_mall_enable(smu);
370
371
return ret;
372
}
373
374
/**
375
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
376
*
377
* @handle: smu_context pointer
378
* @block_type: the IP block to power gate/ungate
379
* @gate: to power gate if true, ungate otherwise
380
* @inst: the instance of the IP block to power gate/ungate
381
*
382
* This API uses no smu->mutex lock protection due to:
383
* 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
384
* This is guarded to be race condition free by the caller.
385
* 2. Or get called on user setting request of power_dpm_force_performance_level.
386
* Under this case, the smu->mutex lock protection is already enforced on
387
* the parent API smu_force_performance_level of the call path.
388
*/
389
static int smu_dpm_set_power_gate(void *handle,
390
uint32_t block_type,
391
bool gate,
392
int inst)
393
{
394
struct smu_context *smu = handle;
395
int ret = 0;
396
397
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
398
dev_WARN(smu->adev->dev,
399
"SMU uninitialized but power %s requested for %u!\n",
400
gate ? "gate" : "ungate", block_type);
401
return -EOPNOTSUPP;
402
}
403
404
switch (block_type) {
405
/*
406
* Some legacy code of amdgpu_vcn.c and vcn_v2*.c still uses
407
* AMD_IP_BLOCK_TYPE_UVD for VCN. So, here both of them are kept.
408
*/
409
case AMD_IP_BLOCK_TYPE_UVD:
410
case AMD_IP_BLOCK_TYPE_VCN:
411
ret = smu_dpm_set_vcn_enable(smu, !gate, inst);
412
if (ret)
413
dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n",
414
gate ? "gate" : "ungate", inst);
415
break;
416
case AMD_IP_BLOCK_TYPE_GFX:
417
ret = smu_gfx_off_control(smu, gate);
418
if (ret)
419
dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
420
gate ? "enable" : "disable");
421
break;
422
case AMD_IP_BLOCK_TYPE_SDMA:
423
ret = smu_powergate_sdma(smu, gate);
424
if (ret)
425
dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
426
gate ? "gate" : "ungate");
427
break;
428
case AMD_IP_BLOCK_TYPE_JPEG:
429
ret = smu_dpm_set_jpeg_enable(smu, !gate);
430
if (ret)
431
dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
432
gate ? "gate" : "ungate");
433
break;
434
case AMD_IP_BLOCK_TYPE_VPE:
435
ret = smu_dpm_set_vpe_enable(smu, !gate);
436
if (ret)
437
dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
438
gate ? "gate" : "ungate");
439
break;
440
case AMD_IP_BLOCK_TYPE_ISP:
441
ret = smu_dpm_set_isp_enable(smu, !gate);
442
if (ret)
443
dev_err(smu->adev->dev, "Failed to power %s ISP!\n",
444
gate ? "gate" : "ungate");
445
break;
446
default:
447
dev_err(smu->adev->dev, "Unsupported block type!\n");
448
return -EINVAL;
449
}
450
451
return ret;
452
}
453
454
/**
455
* smu_set_user_clk_dependencies - set user profile clock dependencies
456
*
457
* @smu: smu_context pointer
458
* @clk: enum smu_clk_type type
459
*
460
* Enable/Disable the clock dependency for the @clk type.
461
*/
462
static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
463
{
464
if (smu->adev->in_suspend)
465
return;
466
467
if (clk == SMU_MCLK) {
468
smu->user_dpm_profile.clk_dependency = 0;
469
smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
470
} else if (clk == SMU_FCLK) {
471
/* MCLK takes precedence over FCLK */
472
if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
473
return;
474
475
smu->user_dpm_profile.clk_dependency = 0;
476
smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
477
} else if (clk == SMU_SOCCLK) {
478
/* MCLK takes precedence over SOCCLK */
479
if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
480
return;
481
482
smu->user_dpm_profile.clk_dependency = 0;
483
smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
484
} else
485
/* Add clk dependencies here, if any */
486
return;
487
}
488
489
/**
490
* smu_restore_dpm_user_profile - reinstate user dpm profile
491
*
492
* @smu: smu_context pointer
493
*
494
* Restore the saved user power configurations include power limit,
495
* clock frequencies, fan control mode and fan speed.
496
*/
497
static void smu_restore_dpm_user_profile(struct smu_context *smu)
498
{
499
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
500
int ret = 0;
501
502
if (!smu->adev->in_suspend)
503
return;
504
505
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
506
return;
507
508
/* Enable restore flag */
509
smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
510
511
/* set the user dpm power limit */
512
if (smu->user_dpm_profile.power_limit) {
513
ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
514
if (ret)
515
dev_err(smu->adev->dev, "Failed to set power limit value\n");
516
}
517
518
/* set the user dpm clock configurations */
519
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
520
enum smu_clk_type clk_type;
521
522
for (clk_type = 0; clk_type < SMU_CLK_COUNT; clk_type++) {
523
/*
524
* Iterate over smu clk type and force the saved user clk
525
* configs, skip if clock dependency is enabled
526
*/
527
if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
528
smu->user_dpm_profile.clk_mask[clk_type]) {
529
ret = smu_force_smuclk_levels(smu, clk_type,
530
smu->user_dpm_profile.clk_mask[clk_type]);
531
if (ret)
532
dev_err(smu->adev->dev,
533
"Failed to set clock type = %d\n", clk_type);
534
}
535
}
536
}
537
538
/* set the user dpm fan configurations */
539
if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
540
smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
541
ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
542
if (ret != -EOPNOTSUPP) {
543
smu->user_dpm_profile.fan_speed_pwm = 0;
544
smu->user_dpm_profile.fan_speed_rpm = 0;
545
smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
546
dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
547
}
548
549
if (smu->user_dpm_profile.fan_speed_pwm) {
550
ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
551
if (ret != -EOPNOTSUPP)
552
dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
553
}
554
555
if (smu->user_dpm_profile.fan_speed_rpm) {
556
ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
557
if (ret != -EOPNOTSUPP)
558
dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
559
}
560
}
561
562
/* Restore user customized OD settings */
563
if (smu->user_dpm_profile.user_od) {
564
if (smu->ppt_funcs->restore_user_od_settings) {
565
ret = smu->ppt_funcs->restore_user_od_settings(smu);
566
if (ret)
567
dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
568
}
569
}
570
571
/* Disable restore flag */
572
smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
573
}
574
575
static int smu_get_power_num_states(void *handle,
576
struct pp_states_info *state_info)
577
{
578
if (!state_info)
579
return -EINVAL;
580
581
/* not support power state */
582
memset(state_info, 0, sizeof(struct pp_states_info));
583
state_info->nums = 1;
584
state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
585
586
return 0;
587
}
588
589
bool is_support_sw_smu(struct amdgpu_device *adev)
590
{
591
/* vega20 is 11.0.2, but it's supported via the powerplay code */
592
if (adev->asic_type == CHIP_VEGA20)
593
return false;
594
595
if ((amdgpu_ip_version(adev, MP1_HWIP, 0) >= IP_VERSION(11, 0, 0)) &&
596
amdgpu_device_ip_is_valid(adev, AMD_IP_BLOCK_TYPE_SMC))
597
return true;
598
599
return false;
600
}
601
602
bool is_support_cclk_dpm(struct amdgpu_device *adev)
603
{
604
struct smu_context *smu = adev->powerplay.pp_handle;
605
606
if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
607
return false;
608
609
return true;
610
}
611
612
613
static int smu_sys_get_pp_table(void *handle,
614
char **table)
615
{
616
struct smu_context *smu = handle;
617
struct smu_table_context *smu_table = &smu->smu_table;
618
619
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
620
return -EOPNOTSUPP;
621
622
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
623
return -EINVAL;
624
625
if (smu_table->hardcode_pptable)
626
*table = smu_table->hardcode_pptable;
627
else
628
*table = smu_table->power_play_table;
629
630
return smu_table->power_play_table_size;
631
}
632
633
static int smu_sys_set_pp_table(void *handle,
634
const char *buf,
635
size_t size)
636
{
637
struct smu_context *smu = handle;
638
struct smu_table_context *smu_table = &smu->smu_table;
639
ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
640
int ret = 0;
641
642
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
643
return -EOPNOTSUPP;
644
645
if (header->usStructureSize != size) {
646
dev_err(smu->adev->dev, "pp table size not matched !\n");
647
return -EIO;
648
}
649
650
if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
651
kfree(smu_table->hardcode_pptable);
652
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
653
if (!smu_table->hardcode_pptable)
654
return -ENOMEM;
655
}
656
657
memcpy(smu_table->hardcode_pptable, buf, size);
658
smu_table->power_play_table = smu_table->hardcode_pptable;
659
smu_table->power_play_table_size = size;
660
661
/*
662
* Special hw_fini action(for Navi1x, the DPMs disablement will be
663
* skipped) may be needed for custom pptable uploading.
664
*/
665
smu->uploading_custom_pp_table = true;
666
667
ret = smu_reset(smu);
668
if (ret)
669
dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
670
671
smu->uploading_custom_pp_table = false;
672
673
return ret;
674
}
675
676
static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
677
{
678
struct smu_feature *feature = &smu->smu_feature;
679
uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
680
int ret = 0;
681
682
/*
683
* With SCPM enabled, the allowed featuremasks setting(via
684
* PPSMC_MSG_SetAllowedFeaturesMaskLow/High) is not permitted.
685
* That means there is no way to let PMFW knows the settings below.
686
* Thus, we just assume all the features are allowed under
687
* such scenario.
688
*/
689
if (smu->adev->scpm_enabled) {
690
bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
691
return 0;
692
}
693
694
bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
695
696
ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
697
SMU_FEATURE_MAX/32);
698
if (ret)
699
return ret;
700
701
bitmap_or(feature->allowed, feature->allowed,
702
(unsigned long *)allowed_feature_mask,
703
feature->feature_num);
704
705
return ret;
706
}
707
708
static int smu_set_funcs(struct amdgpu_device *adev)
709
{
710
struct smu_context *smu = adev->powerplay.pp_handle;
711
712
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
713
smu->od_enabled = true;
714
715
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
716
case IP_VERSION(11, 0, 0):
717
case IP_VERSION(11, 0, 5):
718
case IP_VERSION(11, 0, 9):
719
navi10_set_ppt_funcs(smu);
720
break;
721
case IP_VERSION(11, 0, 7):
722
case IP_VERSION(11, 0, 11):
723
case IP_VERSION(11, 0, 12):
724
case IP_VERSION(11, 0, 13):
725
sienna_cichlid_set_ppt_funcs(smu);
726
break;
727
case IP_VERSION(12, 0, 0):
728
case IP_VERSION(12, 0, 1):
729
renoir_set_ppt_funcs(smu);
730
break;
731
case IP_VERSION(11, 5, 0):
732
case IP_VERSION(11, 5, 2):
733
vangogh_set_ppt_funcs(smu);
734
break;
735
case IP_VERSION(13, 0, 1):
736
case IP_VERSION(13, 0, 3):
737
case IP_VERSION(13, 0, 8):
738
yellow_carp_set_ppt_funcs(smu);
739
break;
740
case IP_VERSION(13, 0, 4):
741
case IP_VERSION(13, 0, 11):
742
smu_v13_0_4_set_ppt_funcs(smu);
743
break;
744
case IP_VERSION(13, 0, 5):
745
smu_v13_0_5_set_ppt_funcs(smu);
746
break;
747
case IP_VERSION(11, 0, 8):
748
cyan_skillfish_set_ppt_funcs(smu);
749
break;
750
case IP_VERSION(11, 0, 2):
751
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
752
arcturus_set_ppt_funcs(smu);
753
/* OD is not supported on Arcturus */
754
smu->od_enabled = false;
755
break;
756
case IP_VERSION(13, 0, 2):
757
aldebaran_set_ppt_funcs(smu);
758
/* Enable pp_od_clk_voltage node */
759
smu->od_enabled = true;
760
break;
761
case IP_VERSION(13, 0, 0):
762
case IP_VERSION(13, 0, 10):
763
smu_v13_0_0_set_ppt_funcs(smu);
764
break;
765
case IP_VERSION(13, 0, 6):
766
case IP_VERSION(13, 0, 14):
767
case IP_VERSION(13, 0, 12):
768
smu_v13_0_6_set_ppt_funcs(smu);
769
/* Enable pp_od_clk_voltage node */
770
smu->od_enabled = true;
771
break;
772
case IP_VERSION(13, 0, 7):
773
smu_v13_0_7_set_ppt_funcs(smu);
774
break;
775
case IP_VERSION(14, 0, 0):
776
case IP_VERSION(14, 0, 1):
777
case IP_VERSION(14, 0, 4):
778
case IP_VERSION(14, 0, 5):
779
smu_v14_0_0_set_ppt_funcs(smu);
780
break;
781
case IP_VERSION(14, 0, 2):
782
case IP_VERSION(14, 0, 3):
783
smu_v14_0_2_set_ppt_funcs(smu);
784
break;
785
default:
786
return -EINVAL;
787
}
788
789
return 0;
790
}
791
792
static int smu_early_init(struct amdgpu_ip_block *ip_block)
793
{
794
struct amdgpu_device *adev = ip_block->adev;
795
struct smu_context *smu;
796
int r;
797
798
smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
799
if (!smu)
800
return -ENOMEM;
801
802
smu->adev = adev;
803
smu->pm_enabled = !!amdgpu_dpm;
804
smu->is_apu = false;
805
smu->smu_baco.state = SMU_BACO_STATE_NONE;
806
smu->smu_baco.platform_support = false;
807
smu->smu_baco.maco_support = false;
808
smu->user_dpm_profile.fan_mode = -1;
809
smu->power_profile_mode = PP_SMC_POWER_PROFILE_UNKNOWN;
810
811
mutex_init(&smu->message_lock);
812
813
adev->powerplay.pp_handle = smu;
814
adev->powerplay.pp_funcs = &swsmu_pm_funcs;
815
816
r = smu_set_funcs(adev);
817
if (r)
818
return r;
819
return smu_init_microcode(smu);
820
}
821
822
static int smu_set_default_dpm_table(struct smu_context *smu)
823
{
824
struct amdgpu_device *adev = smu->adev;
825
struct smu_power_context *smu_power = &smu->smu_power;
826
struct smu_power_gate *power_gate = &smu_power->power_gate;
827
int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i;
828
int ret = 0;
829
830
if (!smu->ppt_funcs->set_default_dpm_table)
831
return 0;
832
833
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
834
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
835
vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]);
836
}
837
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
838
jpeg_gate = atomic_read(&power_gate->jpeg_gated);
839
840
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
841
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
842
ret = smu_dpm_set_vcn_enable(smu, true, i);
843
if (ret)
844
return ret;
845
}
846
}
847
848
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
849
ret = smu_dpm_set_jpeg_enable(smu, true);
850
if (ret)
851
goto err_out;
852
}
853
854
ret = smu->ppt_funcs->set_default_dpm_table(smu);
855
if (ret)
856
dev_err(smu->adev->dev,
857
"Failed to setup default dpm clock tables!\n");
858
859
if (adev->pg_flags & AMD_PG_SUPPORT_JPEG)
860
smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
861
err_out:
862
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
863
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
864
smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i);
865
}
866
867
return ret;
868
}
869
870
static int smu_apply_default_config_table_settings(struct smu_context *smu)
871
{
872
struct amdgpu_device *adev = smu->adev;
873
int ret = 0;
874
875
ret = smu_get_default_config_table_settings(smu,
876
&adev->pm.config_table);
877
if (ret)
878
return ret;
879
880
return smu_set_config_table(smu, &adev->pm.config_table);
881
}
882
883
static int smu_late_init(struct amdgpu_ip_block *ip_block)
884
{
885
struct amdgpu_device *adev = ip_block->adev;
886
struct smu_context *smu = adev->powerplay.pp_handle;
887
int ret = 0;
888
889
smu_set_fine_grain_gfx_freq_parameters(smu);
890
891
if (!smu->pm_enabled)
892
return 0;
893
894
ret = smu_post_init(smu);
895
if (ret) {
896
dev_err(adev->dev, "Failed to post smu init!\n");
897
return ret;
898
}
899
900
/*
901
* Explicitly notify PMFW the power mode the system in. Since
902
* the PMFW may boot the ASIC with a different mode.
903
* For those supporting ACDC switch via gpio, PMFW will
904
* handle the switch automatically. Driver involvement
905
* is unnecessary.
906
*/
907
adev->pm.ac_power = power_supply_is_system_supplied() > 0;
908
smu_set_ac_dc(smu);
909
910
if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
911
(amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
912
return 0;
913
914
if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
915
ret = smu_set_default_od_settings(smu);
916
if (ret) {
917
dev_err(adev->dev, "Failed to setup default OD settings!\n");
918
return ret;
919
}
920
}
921
922
ret = smu_populate_umd_state_clk(smu);
923
if (ret) {
924
dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
925
return ret;
926
}
927
928
ret = smu_get_asic_power_limits(smu,
929
&smu->current_power_limit,
930
&smu->default_power_limit,
931
&smu->max_power_limit,
932
&smu->min_power_limit);
933
if (ret) {
934
dev_err(adev->dev, "Failed to get asic power limits!\n");
935
return ret;
936
}
937
938
if (!amdgpu_sriov_vf(adev))
939
smu_get_unique_id(smu);
940
941
smu_get_fan_parameters(smu);
942
943
smu_handle_task(smu,
944
smu->smu_dpm.dpm_level,
945
AMD_PP_TASK_COMPLETE_INIT);
946
947
ret = smu_apply_default_config_table_settings(smu);
948
if (ret && (ret != -EOPNOTSUPP)) {
949
dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
950
return ret;
951
}
952
953
smu_restore_dpm_user_profile(smu);
954
955
return 0;
956
}
957
958
static int smu_init_fb_allocations(struct smu_context *smu)
959
{
960
struct amdgpu_device *adev = smu->adev;
961
struct smu_table_context *smu_table = &smu->smu_table;
962
struct smu_table *tables = smu_table->tables;
963
struct smu_table *driver_table = &(smu_table->driver_table);
964
uint32_t max_table_size = 0;
965
int ret, i;
966
967
/* VRAM allocation for tool table */
968
if (tables[SMU_TABLE_PMSTATUSLOG].size) {
969
ret = amdgpu_bo_create_kernel(adev,
970
tables[SMU_TABLE_PMSTATUSLOG].size,
971
tables[SMU_TABLE_PMSTATUSLOG].align,
972
tables[SMU_TABLE_PMSTATUSLOG].domain,
973
&tables[SMU_TABLE_PMSTATUSLOG].bo,
974
&tables[SMU_TABLE_PMSTATUSLOG].mc_address,
975
&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
976
if (ret) {
977
dev_err(adev->dev, "VRAM allocation for tool table failed!\n");
978
return ret;
979
}
980
}
981
982
driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT;
983
/* VRAM allocation for driver table */
984
for (i = 0; i < SMU_TABLE_COUNT; i++) {
985
if (tables[i].size == 0)
986
continue;
987
988
/* If one of the tables has VRAM domain restriction, keep it in
989
* VRAM
990
*/
991
if ((tables[i].domain &
992
(AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) ==
993
AMDGPU_GEM_DOMAIN_VRAM)
994
driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
995
996
if (i == SMU_TABLE_PMSTATUSLOG)
997
continue;
998
999
if (max_table_size < tables[i].size)
1000
max_table_size = tables[i].size;
1001
}
1002
1003
driver_table->size = max_table_size;
1004
driver_table->align = PAGE_SIZE;
1005
1006
ret = amdgpu_bo_create_kernel(adev,
1007
driver_table->size,
1008
driver_table->align,
1009
driver_table->domain,
1010
&driver_table->bo,
1011
&driver_table->mc_address,
1012
&driver_table->cpu_addr);
1013
if (ret) {
1014
dev_err(adev->dev, "VRAM allocation for driver table failed!\n");
1015
if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1016
amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1017
&tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1018
&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1019
}
1020
1021
return ret;
1022
}
1023
1024
static int smu_fini_fb_allocations(struct smu_context *smu)
1025
{
1026
struct smu_table_context *smu_table = &smu->smu_table;
1027
struct smu_table *tables = smu_table->tables;
1028
struct smu_table *driver_table = &(smu_table->driver_table);
1029
1030
if (tables[SMU_TABLE_PMSTATUSLOG].mc_address)
1031
amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo,
1032
&tables[SMU_TABLE_PMSTATUSLOG].mc_address,
1033
&tables[SMU_TABLE_PMSTATUSLOG].cpu_addr);
1034
1035
amdgpu_bo_free_kernel(&driver_table->bo,
1036
&driver_table->mc_address,
1037
&driver_table->cpu_addr);
1038
1039
return 0;
1040
}
1041
1042
static void smu_update_gpu_addresses(struct smu_context *smu)
1043
{
1044
struct smu_table_context *smu_table = &smu->smu_table;
1045
struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG;
1046
struct smu_table *driver_table = &(smu_table->driver_table);
1047
struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table;
1048
1049
if (pm_status_table->bo)
1050
pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo);
1051
if (driver_table->bo)
1052
driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo);
1053
if (dummy_read_1_table->bo)
1054
dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo);
1055
}
1056
1057
/**
1058
* smu_alloc_memory_pool - allocate memory pool in the system memory
1059
*
1060
* @smu: amdgpu_device pointer
1061
*
1062
* This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1063
* and DramLogSetDramAddr can notify it changed.
1064
*
1065
* Returns 0 on success, error on failure.
1066
*/
1067
static int smu_alloc_memory_pool(struct smu_context *smu)
1068
{
1069
struct amdgpu_device *adev = smu->adev;
1070
struct smu_table_context *smu_table = &smu->smu_table;
1071
struct smu_table *memory_pool = &smu_table->memory_pool;
1072
uint64_t pool_size = smu->pool_size;
1073
int ret = 0;
1074
1075
if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
1076
return ret;
1077
1078
memory_pool->size = pool_size;
1079
memory_pool->align = PAGE_SIZE;
1080
memory_pool->domain =
1081
(adev->pm.smu_debug_mask & SMU_DEBUG_POOL_USE_VRAM) ?
1082
AMDGPU_GEM_DOMAIN_VRAM :
1083
AMDGPU_GEM_DOMAIN_GTT;
1084
1085
switch (pool_size) {
1086
case SMU_MEMORY_POOL_SIZE_256_MB:
1087
case SMU_MEMORY_POOL_SIZE_512_MB:
1088
case SMU_MEMORY_POOL_SIZE_1_GB:
1089
case SMU_MEMORY_POOL_SIZE_2_GB:
1090
ret = amdgpu_bo_create_kernel(adev,
1091
memory_pool->size,
1092
memory_pool->align,
1093
memory_pool->domain,
1094
&memory_pool->bo,
1095
&memory_pool->mc_address,
1096
&memory_pool->cpu_addr);
1097
if (ret)
1098
dev_err(adev->dev, "VRAM allocation for dramlog failed!\n");
1099
break;
1100
default:
1101
break;
1102
}
1103
1104
return ret;
1105
}
1106
1107
static int smu_free_memory_pool(struct smu_context *smu)
1108
{
1109
struct smu_table_context *smu_table = &smu->smu_table;
1110
struct smu_table *memory_pool = &smu_table->memory_pool;
1111
1112
if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
1113
return 0;
1114
1115
amdgpu_bo_free_kernel(&memory_pool->bo,
1116
&memory_pool->mc_address,
1117
&memory_pool->cpu_addr);
1118
1119
memset(memory_pool, 0, sizeof(struct smu_table));
1120
1121
return 0;
1122
}
1123
1124
static int smu_alloc_dummy_read_table(struct smu_context *smu)
1125
{
1126
struct smu_table_context *smu_table = &smu->smu_table;
1127
struct smu_table *dummy_read_1_table =
1128
&smu_table->dummy_read_1_table;
1129
struct amdgpu_device *adev = smu->adev;
1130
int ret = 0;
1131
1132
if (!dummy_read_1_table->size)
1133
return 0;
1134
1135
ret = amdgpu_bo_create_kernel(adev,
1136
dummy_read_1_table->size,
1137
dummy_read_1_table->align,
1138
dummy_read_1_table->domain,
1139
&dummy_read_1_table->bo,
1140
&dummy_read_1_table->mc_address,
1141
&dummy_read_1_table->cpu_addr);
1142
if (ret)
1143
dev_err(adev->dev, "VRAM allocation for dummy read table failed!\n");
1144
1145
return ret;
1146
}
1147
1148
static void smu_free_dummy_read_table(struct smu_context *smu)
1149
{
1150
struct smu_table_context *smu_table = &smu->smu_table;
1151
struct smu_table *dummy_read_1_table =
1152
&smu_table->dummy_read_1_table;
1153
1154
1155
amdgpu_bo_free_kernel(&dummy_read_1_table->bo,
1156
&dummy_read_1_table->mc_address,
1157
&dummy_read_1_table->cpu_addr);
1158
1159
memset(dummy_read_1_table, 0, sizeof(struct smu_table));
1160
}
1161
1162
static int smu_smc_table_sw_init(struct smu_context *smu)
1163
{
1164
int ret;
1165
1166
/**
1167
* Create smu_table structure, and init smc tables such as
1168
* TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
1169
*/
1170
ret = smu_init_smc_tables(smu);
1171
if (ret) {
1172
dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1173
return ret;
1174
}
1175
1176
/**
1177
* Create smu_power_context structure, and allocate smu_dpm_context and
1178
* context size to fill the smu_power_context data.
1179
*/
1180
ret = smu_init_power(smu);
1181
if (ret) {
1182
dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1183
return ret;
1184
}
1185
1186
/*
1187
* allocate vram bos to store smc table contents.
1188
*/
1189
ret = smu_init_fb_allocations(smu);
1190
if (ret)
1191
return ret;
1192
1193
ret = smu_alloc_memory_pool(smu);
1194
if (ret)
1195
return ret;
1196
1197
ret = smu_alloc_dummy_read_table(smu);
1198
if (ret)
1199
return ret;
1200
1201
ret = smu_i2c_init(smu);
1202
if (ret)
1203
return ret;
1204
1205
return 0;
1206
}
1207
1208
static int smu_smc_table_sw_fini(struct smu_context *smu)
1209
{
1210
int ret;
1211
1212
smu_i2c_fini(smu);
1213
1214
smu_free_dummy_read_table(smu);
1215
1216
ret = smu_free_memory_pool(smu);
1217
if (ret)
1218
return ret;
1219
1220
ret = smu_fini_fb_allocations(smu);
1221
if (ret)
1222
return ret;
1223
1224
ret = smu_fini_power(smu);
1225
if (ret) {
1226
dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1227
return ret;
1228
}
1229
1230
ret = smu_fini_smc_tables(smu);
1231
if (ret) {
1232
dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1233
return ret;
1234
}
1235
1236
return 0;
1237
}
1238
1239
static void smu_throttling_logging_work_fn(struct work_struct *work)
1240
{
1241
struct smu_context *smu = container_of(work, struct smu_context,
1242
throttling_logging_work);
1243
1244
smu_log_thermal_throttling(smu);
1245
}
1246
1247
static void smu_interrupt_work_fn(struct work_struct *work)
1248
{
1249
struct smu_context *smu = container_of(work, struct smu_context,
1250
interrupt_work);
1251
1252
if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1253
smu->ppt_funcs->interrupt_work(smu);
1254
}
1255
1256
static void smu_swctf_delayed_work_handler(struct work_struct *work)
1257
{
1258
struct smu_context *smu =
1259
container_of(work, struct smu_context, swctf_delayed_work.work);
1260
struct smu_temperature_range *range =
1261
&smu->thermal_range;
1262
struct amdgpu_device *adev = smu->adev;
1263
uint32_t hotspot_tmp, size;
1264
1265
/*
1266
* If the hotspot temperature is confirmed as below SW CTF setting point
1267
* after the delay enforced, nothing will be done.
1268
* Otherwise, a graceful shutdown will be performed to prevent further damage.
1269
*/
1270
if (range->software_shutdown_temp &&
1271
smu->ppt_funcs->read_sensor &&
1272
!smu->ppt_funcs->read_sensor(smu,
1273
AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1274
&hotspot_tmp,
1275
&size) &&
1276
hotspot_tmp / 1000 < range->software_shutdown_temp)
1277
return;
1278
1279
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
1280
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
1281
orderly_poweroff(true);
1282
}
1283
1284
static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1285
{
1286
struct smu_dpm_context *dpm_ctxt = &(smu->smu_dpm);
1287
struct smu_dpm_policy_ctxt *policy_ctxt;
1288
struct smu_dpm_policy *policy;
1289
1290
policy = smu_get_pm_policy(smu, PP_PM_POLICY_XGMI_PLPD);
1291
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1292
if (policy)
1293
policy->current_level = XGMI_PLPD_DEFAULT;
1294
return;
1295
}
1296
1297
/* PMFW put PLPD into default policy after enabling the feature */
1298
if (smu_feature_is_enabled(smu,
1299
SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT)) {
1300
if (policy)
1301
policy->current_level = XGMI_PLPD_DEFAULT;
1302
} else {
1303
policy_ctxt = dpm_ctxt->dpm_policies;
1304
if (policy_ctxt)
1305
policy_ctxt->policy_mask &=
1306
~BIT(PP_PM_POLICY_XGMI_PLPD);
1307
}
1308
}
1309
1310
static void smu_init_power_profile(struct smu_context *smu)
1311
{
1312
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_UNKNOWN)
1313
smu->power_profile_mode =
1314
PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1315
smu_power_profile_mode_get(smu, smu->power_profile_mode);
1316
}
1317
1318
static int smu_sw_init(struct amdgpu_ip_block *ip_block)
1319
{
1320
struct amdgpu_device *adev = ip_block->adev;
1321
struct smu_context *smu = adev->powerplay.pp_handle;
1322
int i, ret;
1323
1324
smu->pool_size = adev->pm.smu_prv_buffer_size;
1325
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1326
bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1327
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1328
1329
INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1330
INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1331
atomic64_set(&smu->throttle_int_counter, 0);
1332
smu->watermarks_bitmap = 0;
1333
1334
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1335
atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1);
1336
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1337
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1338
atomic_set(&smu->smu_power.power_gate.isp_gated, 1);
1339
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1340
1341
smu_init_power_profile(smu);
1342
smu->display_config = &adev->pm.pm_display_cfg;
1343
1344
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1345
smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1346
1347
INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1348
smu_swctf_delayed_work_handler);
1349
1350
ret = smu_smc_table_sw_init(smu);
1351
if (ret) {
1352
dev_err(adev->dev, "Failed to sw init smc table!\n");
1353
return ret;
1354
}
1355
1356
/* get boot_values from vbios to set revision, gfxclk, and etc. */
1357
ret = smu_get_vbios_bootup_values(smu);
1358
if (ret) {
1359
dev_err(adev->dev, "Failed to get VBIOS boot clock values!\n");
1360
return ret;
1361
}
1362
1363
ret = smu_init_pptable_microcode(smu);
1364
if (ret) {
1365
dev_err(adev->dev, "Failed to setup pptable firmware!\n");
1366
return ret;
1367
}
1368
1369
ret = smu_register_irq_handler(smu);
1370
if (ret) {
1371
dev_err(adev->dev, "Failed to register smc irq handler!\n");
1372
return ret;
1373
}
1374
1375
/* If there is no way to query fan control mode, fan control is not supported */
1376
if (!smu->ppt_funcs->get_fan_control_mode)
1377
smu->adev->pm.no_fan = true;
1378
1379
return 0;
1380
}
1381
1382
static int smu_sw_fini(struct amdgpu_ip_block *ip_block)
1383
{
1384
struct amdgpu_device *adev = ip_block->adev;
1385
struct smu_context *smu = adev->powerplay.pp_handle;
1386
int ret;
1387
1388
ret = smu_smc_table_sw_fini(smu);
1389
if (ret) {
1390
dev_err(adev->dev, "Failed to sw fini smc table!\n");
1391
return ret;
1392
}
1393
1394
if (smu->custom_profile_params) {
1395
kfree(smu->custom_profile_params);
1396
smu->custom_profile_params = NULL;
1397
}
1398
1399
smu_fini_microcode(smu);
1400
1401
return 0;
1402
}
1403
1404
static int smu_get_thermal_temperature_range(struct smu_context *smu)
1405
{
1406
struct amdgpu_device *adev = smu->adev;
1407
struct smu_temperature_range *range =
1408
&smu->thermal_range;
1409
int ret = 0;
1410
1411
if (!smu->ppt_funcs->get_thermal_temperature_range)
1412
return 0;
1413
1414
ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1415
if (ret)
1416
return ret;
1417
1418
adev->pm.dpm.thermal.min_temp = range->min;
1419
adev->pm.dpm.thermal.max_temp = range->max;
1420
adev->pm.dpm.thermal.max_edge_emergency_temp = range->edge_emergency_max;
1421
adev->pm.dpm.thermal.min_hotspot_temp = range->hotspot_min;
1422
adev->pm.dpm.thermal.max_hotspot_crit_temp = range->hotspot_crit_max;
1423
adev->pm.dpm.thermal.max_hotspot_emergency_temp = range->hotspot_emergency_max;
1424
adev->pm.dpm.thermal.min_mem_temp = range->mem_min;
1425
adev->pm.dpm.thermal.max_mem_crit_temp = range->mem_crit_max;
1426
adev->pm.dpm.thermal.max_mem_emergency_temp = range->mem_emergency_max;
1427
1428
return ret;
1429
}
1430
1431
/**
1432
* smu_wbrf_handle_exclusion_ranges - consume the wbrf exclusion ranges
1433
*
1434
* @smu: smu_context pointer
1435
*
1436
* Retrieve the wbrf exclusion ranges and send them to PMFW for proper handling.
1437
* Returns 0 on success, error on failure.
1438
*/
1439
static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1440
{
1441
struct wbrf_ranges_in_out wbrf_exclusion = {0};
1442
struct freq_band_range *wifi_bands = wbrf_exclusion.band_list;
1443
struct amdgpu_device *adev = smu->adev;
1444
uint32_t num_of_wbrf_ranges = MAX_NUM_OF_WBRF_RANGES;
1445
uint64_t start, end;
1446
int ret, i, j;
1447
1448
ret = amd_wbrf_retrieve_freq_band(adev->dev, &wbrf_exclusion);
1449
if (ret) {
1450
dev_err(adev->dev, "Failed to retrieve exclusion ranges!\n");
1451
return ret;
1452
}
1453
1454
/*
1455
* The exclusion ranges array we got might be filled with holes and duplicate
1456
* entries. For example:
1457
* {(2400, 2500), (0, 0), (6882, 6962), (2400, 2500), (0, 0), (6117, 6189), (0, 0)...}
1458
* We need to do some sortups to eliminate those holes and duplicate entries.
1459
* Expected output: {(2400, 2500), (6117, 6189), (6882, 6962), (0, 0)...}
1460
*/
1461
for (i = 0; i < num_of_wbrf_ranges; i++) {
1462
start = wifi_bands[i].start;
1463
end = wifi_bands[i].end;
1464
1465
/* get the last valid entry to fill the intermediate hole */
1466
if (!start && !end) {
1467
for (j = num_of_wbrf_ranges - 1; j > i; j--)
1468
if (wifi_bands[j].start && wifi_bands[j].end)
1469
break;
1470
1471
/* no valid entry left */
1472
if (j <= i)
1473
break;
1474
1475
start = wifi_bands[i].start = wifi_bands[j].start;
1476
end = wifi_bands[i].end = wifi_bands[j].end;
1477
wifi_bands[j].start = 0;
1478
wifi_bands[j].end = 0;
1479
num_of_wbrf_ranges = j;
1480
}
1481
1482
/* eliminate duplicate entries */
1483
for (j = i + 1; j < num_of_wbrf_ranges; j++) {
1484
if ((wifi_bands[j].start == start) && (wifi_bands[j].end == end)) {
1485
wifi_bands[j].start = 0;
1486
wifi_bands[j].end = 0;
1487
}
1488
}
1489
}
1490
1491
/* Send the sorted wifi_bands to PMFW */
1492
ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1493
/* Try to set the wifi_bands again */
1494
if (unlikely(ret == -EBUSY)) {
1495
mdelay(5);
1496
ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1497
}
1498
1499
return ret;
1500
}
1501
1502
/**
1503
* smu_wbrf_event_handler - handle notify events
1504
*
1505
* @nb: notifier block
1506
* @action: event type
1507
* @_arg: event data
1508
*
1509
* Calls relevant amdgpu function in response to wbrf event
1510
* notification from kernel.
1511
*/
1512
static int smu_wbrf_event_handler(struct notifier_block *nb,
1513
unsigned long action, void *_arg)
1514
{
1515
struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1516
1517
switch (action) {
1518
case WBRF_CHANGED:
1519
schedule_delayed_work(&smu->wbrf_delayed_work,
1520
msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1521
break;
1522
default:
1523
return NOTIFY_DONE;
1524
}
1525
1526
return NOTIFY_OK;
1527
}
1528
1529
/**
1530
* smu_wbrf_delayed_work_handler - callback on delayed work timer expired
1531
*
1532
* @work: struct work_struct pointer
1533
*
1534
* Flood is over and driver will consume the latest exclusion ranges.
1535
*/
1536
static void smu_wbrf_delayed_work_handler(struct work_struct *work)
1537
{
1538
struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1539
1540
smu_wbrf_handle_exclusion_ranges(smu);
1541
}
1542
1543
/**
1544
* smu_wbrf_support_check - check wbrf support
1545
*
1546
* @smu: smu_context pointer
1547
*
1548
* Verifies the ACPI interface whether wbrf is supported.
1549
*/
1550
static void smu_wbrf_support_check(struct smu_context *smu)
1551
{
1552
struct amdgpu_device *adev = smu->adev;
1553
1554
smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1555
acpi_amd_wbrf_supported_consumer(adev->dev);
1556
1557
if (smu->wbrf_supported)
1558
dev_info(adev->dev, "RF interference mitigation is supported\n");
1559
}
1560
1561
/**
1562
* smu_wbrf_init - init driver wbrf support
1563
*
1564
* @smu: smu_context pointer
1565
*
1566
* Verifies the AMD ACPI interfaces and registers with the wbrf
1567
* notifier chain if wbrf feature is supported.
1568
* Returns 0 on success, error on failure.
1569
*/
1570
static int smu_wbrf_init(struct smu_context *smu)
1571
{
1572
int ret;
1573
1574
if (!smu->wbrf_supported)
1575
return 0;
1576
1577
INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1578
1579
smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1580
ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1581
if (ret)
1582
return ret;
1583
1584
/*
1585
* Some wifiband exclusion ranges may be already there
1586
* before our driver loaded. To make sure our driver
1587
* is awared of those exclusion ranges.
1588
*/
1589
schedule_delayed_work(&smu->wbrf_delayed_work,
1590
msecs_to_jiffies(SMU_WBRF_EVENT_HANDLING_PACE));
1591
1592
return 0;
1593
}
1594
1595
/**
1596
* smu_wbrf_fini - tear down driver wbrf support
1597
*
1598
* @smu: smu_context pointer
1599
*
1600
* Unregisters with the wbrf notifier chain.
1601
*/
1602
static void smu_wbrf_fini(struct smu_context *smu)
1603
{
1604
if (!smu->wbrf_supported)
1605
return;
1606
1607
amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1608
1609
cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1610
}
1611
1612
static int smu_smc_hw_setup(struct smu_context *smu)
1613
{
1614
struct smu_feature *feature = &smu->smu_feature;
1615
struct amdgpu_device *adev = smu->adev;
1616
uint8_t pcie_gen = 0, pcie_width = 0;
1617
uint64_t features_supported;
1618
int ret = 0;
1619
1620
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1621
case IP_VERSION(11, 0, 7):
1622
case IP_VERSION(11, 0, 11):
1623
case IP_VERSION(11, 5, 0):
1624
case IP_VERSION(11, 5, 2):
1625
case IP_VERSION(11, 0, 12):
1626
if (adev->in_suspend && smu_is_dpm_running(smu)) {
1627
dev_info(adev->dev, "dpm has been enabled\n");
1628
ret = smu_system_features_control(smu, true);
1629
if (ret)
1630
dev_err(adev->dev, "Failed system features control!\n");
1631
return ret;
1632
}
1633
break;
1634
default:
1635
break;
1636
}
1637
1638
ret = smu_init_display_count(smu, 0);
1639
if (ret) {
1640
dev_info(adev->dev, "Failed to pre-set display count as 0!\n");
1641
return ret;
1642
}
1643
1644
ret = smu_set_driver_table_location(smu);
1645
if (ret) {
1646
dev_err(adev->dev, "Failed to SetDriverDramAddr!\n");
1647
return ret;
1648
}
1649
1650
/*
1651
* Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1652
*/
1653
ret = smu_set_tool_table_location(smu);
1654
if (ret) {
1655
dev_err(adev->dev, "Failed to SetToolsDramAddr!\n");
1656
return ret;
1657
}
1658
1659
/*
1660
* Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1661
* pool location.
1662
*/
1663
ret = smu_notify_memory_pool_location(smu);
1664
if (ret) {
1665
dev_err(adev->dev, "Failed to SetDramLogDramAddr!\n");
1666
return ret;
1667
}
1668
1669
/*
1670
* It is assumed the pptable used before runpm is same as
1671
* the one used afterwards. Thus, we can reuse the stored
1672
* copy and do not need to resetup the pptable again.
1673
*/
1674
if (!adev->in_runpm) {
1675
ret = smu_setup_pptable(smu);
1676
if (ret) {
1677
dev_err(adev->dev, "Failed to setup pptable!\n");
1678
return ret;
1679
}
1680
}
1681
1682
/* smu_dump_pptable(smu); */
1683
1684
/*
1685
* With SCPM enabled, PSP is responsible for the PPTable transferring
1686
* (to SMU). Driver involvement is not needed and permitted.
1687
*/
1688
if (!adev->scpm_enabled) {
1689
/*
1690
* Copy pptable bo in the vram to smc with SMU MSGs such as
1691
* SetDriverDramAddr and TransferTableDram2Smu.
1692
*/
1693
ret = smu_write_pptable(smu);
1694
if (ret) {
1695
dev_err(adev->dev, "Failed to transfer pptable to SMC!\n");
1696
return ret;
1697
}
1698
}
1699
1700
/* issue Run*Btc msg */
1701
ret = smu_run_btc(smu);
1702
if (ret)
1703
return ret;
1704
1705
/* Enable UclkShadow on wbrf supported */
1706
if (smu->wbrf_supported) {
1707
ret = smu_enable_uclk_shadow(smu, true);
1708
if (ret) {
1709
dev_err(adev->dev, "Failed to enable UclkShadow feature to support wbrf!\n");
1710
return ret;
1711
}
1712
}
1713
1714
/*
1715
* With SCPM enabled, these actions(and relevant messages) are
1716
* not needed and permitted.
1717
*/
1718
if (!adev->scpm_enabled) {
1719
ret = smu_feature_set_allowed_mask(smu);
1720
if (ret) {
1721
dev_err(adev->dev, "Failed to set driver allowed features mask!\n");
1722
return ret;
1723
}
1724
}
1725
1726
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5)
1727
pcie_gen = 4;
1728
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
1729
pcie_gen = 3;
1730
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
1731
pcie_gen = 2;
1732
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
1733
pcie_gen = 1;
1734
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
1735
pcie_gen = 0;
1736
1737
/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1738
* Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1739
* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1740
*/
1741
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32)
1742
pcie_width = 7;
1743
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
1744
pcie_width = 6;
1745
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
1746
pcie_width = 5;
1747
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
1748
pcie_width = 4;
1749
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
1750
pcie_width = 3;
1751
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
1752
pcie_width = 2;
1753
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
1754
pcie_width = 1;
1755
ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1756
if (ret) {
1757
dev_err(adev->dev, "Attempt to override pcie params failed!\n");
1758
return ret;
1759
}
1760
1761
ret = smu_system_features_control(smu, true);
1762
if (ret) {
1763
dev_err(adev->dev, "Failed to enable requested dpm features!\n");
1764
return ret;
1765
}
1766
1767
smu_init_xgmi_plpd_mode(smu);
1768
1769
ret = smu_feature_get_enabled_mask(smu, &features_supported);
1770
if (ret) {
1771
dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
1772
return ret;
1773
}
1774
bitmap_copy(feature->supported,
1775
(unsigned long *)&features_supported,
1776
feature->feature_num);
1777
1778
if (!smu_is_dpm_running(smu))
1779
dev_info(adev->dev, "dpm has been disabled\n");
1780
1781
/*
1782
* Set initialized values (get from vbios) to dpm tables context such as
1783
* gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1784
* type of clks.
1785
*/
1786
ret = smu_set_default_dpm_table(smu);
1787
if (ret) {
1788
dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
1789
return ret;
1790
}
1791
1792
ret = smu_get_thermal_temperature_range(smu);
1793
if (ret) {
1794
dev_err(adev->dev, "Failed to get thermal temperature ranges!\n");
1795
return ret;
1796
}
1797
1798
ret = smu_enable_thermal_alert(smu);
1799
if (ret) {
1800
dev_err(adev->dev, "Failed to enable thermal alert!\n");
1801
return ret;
1802
}
1803
1804
ret = smu_notify_display_change(smu);
1805
if (ret) {
1806
dev_err(adev->dev, "Failed to notify display change!\n");
1807
return ret;
1808
}
1809
1810
/*
1811
* Set min deep sleep dce fclk with bootup value from vbios via
1812
* SetMinDeepSleepDcefclk MSG.
1813
*/
1814
ret = smu_set_min_dcef_deep_sleep(smu,
1815
smu->smu_table.boot_values.dcefclk / 100);
1816
if (ret) {
1817
dev_err(adev->dev, "Error setting min deepsleep dcefclk\n");
1818
return ret;
1819
}
1820
1821
/* Init wbrf support. Properly setup the notifier */
1822
ret = smu_wbrf_init(smu);
1823
if (ret)
1824
dev_err(adev->dev, "Error during wbrf init call\n");
1825
1826
return ret;
1827
}
1828
1829
static int smu_start_smc_engine(struct smu_context *smu)
1830
{
1831
struct amdgpu_device *adev = smu->adev;
1832
int ret = 0;
1833
1834
if (amdgpu_virt_xgmi_migrate_enabled(adev))
1835
smu_update_gpu_addresses(smu);
1836
1837
smu->smc_fw_state = SMU_FW_INIT;
1838
1839
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1840
if (amdgpu_ip_version(adev, MP1_HWIP, 0) < IP_VERSION(11, 0, 0)) {
1841
if (smu->ppt_funcs->load_microcode) {
1842
ret = smu->ppt_funcs->load_microcode(smu);
1843
if (ret)
1844
return ret;
1845
}
1846
}
1847
}
1848
1849
if (smu->ppt_funcs->check_fw_status) {
1850
ret = smu->ppt_funcs->check_fw_status(smu);
1851
if (ret) {
1852
dev_err(adev->dev, "SMC is not ready\n");
1853
return ret;
1854
}
1855
}
1856
1857
/*
1858
* Send msg GetDriverIfVersion to check if the return value is equal
1859
* with DRIVER_IF_VERSION of smc header.
1860
*/
1861
ret = smu_check_fw_version(smu);
1862
if (ret)
1863
return ret;
1864
1865
return ret;
1866
}
1867
1868
static int smu_hw_init(struct amdgpu_ip_block *ip_block)
1869
{
1870
int i, ret;
1871
struct amdgpu_device *adev = ip_block->adev;
1872
struct smu_context *smu = adev->powerplay.pp_handle;
1873
1874
if (amdgpu_sriov_multi_vf_mode(adev)) {
1875
smu->pm_enabled = false;
1876
return 0;
1877
}
1878
1879
ret = smu_start_smc_engine(smu);
1880
if (ret) {
1881
dev_err(adev->dev, "SMC engine is not correctly up!\n");
1882
return ret;
1883
}
1884
1885
/*
1886
* Check whether wbrf is supported. This needs to be done
1887
* before SMU setup starts since part of SMU configuration
1888
* relies on this.
1889
*/
1890
smu_wbrf_support_check(smu);
1891
1892
if (smu->is_apu) {
1893
ret = smu_set_gfx_imu_enable(smu);
1894
if (ret)
1895
return ret;
1896
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
1897
smu_dpm_set_vcn_enable(smu, true, i);
1898
smu_dpm_set_jpeg_enable(smu, true);
1899
smu_dpm_set_vpe_enable(smu, true);
1900
smu_dpm_set_umsch_mm_enable(smu, true);
1901
smu_set_mall_enable(smu);
1902
smu_set_gfx_cgpg(smu, true);
1903
}
1904
1905
if (!smu->pm_enabled)
1906
return 0;
1907
1908
ret = smu_get_driver_allowed_feature_mask(smu);
1909
if (ret)
1910
return ret;
1911
1912
ret = smu_smc_hw_setup(smu);
1913
if (ret) {
1914
dev_err(adev->dev, "Failed to setup smc hw!\n");
1915
return ret;
1916
}
1917
1918
/*
1919
* Move maximum sustainable clock retrieving here considering
1920
* 1. It is not needed on resume(from S3).
1921
* 2. DAL settings come between .hw_init and .late_init of SMU.
1922
* And DAL needs to know the maximum sustainable clocks. Thus
1923
* it cannot be put in .late_init().
1924
*/
1925
ret = smu_init_max_sustainable_clocks(smu);
1926
if (ret) {
1927
dev_err(adev->dev, "Failed to init max sustainable clocks!\n");
1928
return ret;
1929
}
1930
1931
adev->pm.dpm_enabled = true;
1932
1933
dev_info(adev->dev, "SMU is initialized successfully!\n");
1934
1935
return 0;
1936
}
1937
1938
static int smu_disable_dpms(struct smu_context *smu)
1939
{
1940
struct amdgpu_device *adev = smu->adev;
1941
int ret = 0;
1942
bool use_baco = !smu->is_apu &&
1943
((amdgpu_in_reset(adev) &&
1944
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
1945
((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
1946
1947
/*
1948
* For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
1949
* properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
1950
*/
1951
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1952
case IP_VERSION(13, 0, 0):
1953
case IP_VERSION(13, 0, 7):
1954
case IP_VERSION(13, 0, 10):
1955
case IP_VERSION(14, 0, 2):
1956
case IP_VERSION(14, 0, 3):
1957
return 0;
1958
default:
1959
break;
1960
}
1961
1962
/*
1963
* For custom pptable uploading, skip the DPM features
1964
* disable process on Navi1x ASICs.
1965
* - As the gfx related features are under control of
1966
* RLC on those ASICs. RLC reinitialization will be
1967
* needed to reenable them. That will cost much more
1968
* efforts.
1969
*
1970
* - SMU firmware can handle the DPM reenablement
1971
* properly.
1972
*/
1973
if (smu->uploading_custom_pp_table) {
1974
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1975
case IP_VERSION(11, 0, 0):
1976
case IP_VERSION(11, 0, 5):
1977
case IP_VERSION(11, 0, 9):
1978
case IP_VERSION(11, 0, 7):
1979
case IP_VERSION(11, 0, 11):
1980
case IP_VERSION(11, 5, 0):
1981
case IP_VERSION(11, 5, 2):
1982
case IP_VERSION(11, 0, 12):
1983
case IP_VERSION(11, 0, 13):
1984
return 0;
1985
default:
1986
break;
1987
}
1988
}
1989
1990
/*
1991
* For Sienna_Cichlid, PMFW will handle the features disablement properly
1992
* on BACO in. Driver involvement is unnecessary.
1993
*/
1994
if (use_baco) {
1995
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1996
case IP_VERSION(11, 0, 7):
1997
case IP_VERSION(11, 0, 0):
1998
case IP_VERSION(11, 0, 5):
1999
case IP_VERSION(11, 0, 9):
2000
case IP_VERSION(13, 0, 7):
2001
return 0;
2002
default:
2003
break;
2004
}
2005
}
2006
2007
/*
2008
* For GFX11 and subsequent APUs, PMFW will handle the features disablement properly
2009
* for gpu reset and S0i3 cases. Driver involvement is unnecessary.
2010
*/
2011
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) >= 11 &&
2012
smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
2013
return 0;
2014
2015
/*
2016
* For gpu reset, runpm and hibernation through BACO,
2017
* BACO feature has to be kept enabled.
2018
*/
2019
if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
2020
ret = smu_disable_all_features_with_exception(smu,
2021
SMU_FEATURE_BACO_BIT);
2022
if (ret)
2023
dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
2024
} else {
2025
/* DisableAllSmuFeatures message is not permitted with SCPM enabled */
2026
if (!adev->scpm_enabled) {
2027
ret = smu_system_features_control(smu, false);
2028
if (ret)
2029
dev_err(adev->dev, "Failed to disable smu features.\n");
2030
}
2031
}
2032
2033
/* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
2034
* otherwise SMU will hang while interacting with RLC if RLC is halted
2035
* this is a WA for Vangogh asic which fix the SMU hang issue.
2036
*/
2037
ret = smu_notify_rlc_state(smu, false);
2038
if (ret) {
2039
dev_err(adev->dev, "Fail to notify rlc status!\n");
2040
return ret;
2041
}
2042
2043
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
2044
!((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
2045
!amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
2046
adev->gfx.rlc.funcs->stop(adev);
2047
2048
return ret;
2049
}
2050
2051
static int smu_smc_hw_cleanup(struct smu_context *smu)
2052
{
2053
struct amdgpu_device *adev = smu->adev;
2054
int ret = 0;
2055
2056
smu_wbrf_fini(smu);
2057
2058
cancel_work_sync(&smu->throttling_logging_work);
2059
cancel_work_sync(&smu->interrupt_work);
2060
2061
ret = smu_disable_thermal_alert(smu);
2062
if (ret) {
2063
dev_err(adev->dev, "Fail to disable thermal alert!\n");
2064
return ret;
2065
}
2066
2067
cancel_delayed_work_sync(&smu->swctf_delayed_work);
2068
2069
ret = smu_disable_dpms(smu);
2070
if (ret) {
2071
dev_err(adev->dev, "Fail to disable dpm features!\n");
2072
return ret;
2073
}
2074
2075
return 0;
2076
}
2077
2078
static int smu_reset_mp1_state(struct smu_context *smu)
2079
{
2080
struct amdgpu_device *adev = smu->adev;
2081
int ret = 0;
2082
2083
if ((!adev->in_runpm) && (!adev->in_suspend) &&
2084
(!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
2085
IP_VERSION(13, 0, 10) &&
2086
!amdgpu_device_has_display_hardware(adev))
2087
ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
2088
2089
return ret;
2090
}
2091
2092
static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
2093
{
2094
struct amdgpu_device *adev = ip_block->adev;
2095
struct smu_context *smu = adev->powerplay.pp_handle;
2096
int i, ret;
2097
2098
if (amdgpu_sriov_multi_vf_mode(adev))
2099
return 0;
2100
2101
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
2102
smu_dpm_set_vcn_enable(smu, false, i);
2103
adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE;
2104
}
2105
smu_dpm_set_jpeg_enable(smu, false);
2106
adev->jpeg.cur_state = AMD_PG_STATE_GATE;
2107
smu_dpm_set_vpe_enable(smu, false);
2108
smu_dpm_set_umsch_mm_enable(smu, false);
2109
2110
if (!smu->pm_enabled)
2111
return 0;
2112
2113
adev->pm.dpm_enabled = false;
2114
2115
ret = smu_smc_hw_cleanup(smu);
2116
if (ret)
2117
return ret;
2118
2119
ret = smu_reset_mp1_state(smu);
2120
if (ret)
2121
return ret;
2122
2123
return 0;
2124
}
2125
2126
static void smu_late_fini(struct amdgpu_ip_block *ip_block)
2127
{
2128
struct amdgpu_device *adev = ip_block->adev;
2129
struct smu_context *smu = adev->powerplay.pp_handle;
2130
2131
kfree(smu);
2132
}
2133
2134
static int smu_reset(struct smu_context *smu)
2135
{
2136
struct amdgpu_device *adev = smu->adev;
2137
struct amdgpu_ip_block *ip_block;
2138
int ret;
2139
2140
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_SMC);
2141
if (!ip_block)
2142
return -EINVAL;
2143
2144
ret = smu_hw_fini(ip_block);
2145
if (ret)
2146
return ret;
2147
2148
ret = smu_hw_init(ip_block);
2149
if (ret)
2150
return ret;
2151
2152
ret = smu_late_init(ip_block);
2153
if (ret)
2154
return ret;
2155
2156
return 0;
2157
}
2158
2159
static int smu_suspend(struct amdgpu_ip_block *ip_block)
2160
{
2161
struct amdgpu_device *adev = ip_block->adev;
2162
struct smu_context *smu = adev->powerplay.pp_handle;
2163
int ret;
2164
uint64_t count;
2165
2166
if (amdgpu_sriov_multi_vf_mode(adev))
2167
return 0;
2168
2169
if (!smu->pm_enabled)
2170
return 0;
2171
2172
adev->pm.dpm_enabled = false;
2173
2174
ret = smu_smc_hw_cleanup(smu);
2175
if (ret)
2176
return ret;
2177
2178
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2179
2180
smu_set_gfx_cgpg(smu, false);
2181
2182
/*
2183
* pwfw resets entrycount when device is suspended, so we save the
2184
* last value to be used when we resume to keep it consistent
2185
*/
2186
ret = smu_get_entrycount_gfxoff(smu, &count);
2187
if (!ret)
2188
adev->gfx.gfx_off_entrycount = count;
2189
2190
/* clear this on suspend so it will get reprogrammed on resume */
2191
smu->workload_mask = 0;
2192
2193
return 0;
2194
}
2195
2196
static int smu_resume(struct amdgpu_ip_block *ip_block)
2197
{
2198
int ret;
2199
struct amdgpu_device *adev = ip_block->adev;
2200
struct smu_context *smu = adev->powerplay.pp_handle;
2201
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2202
2203
if (amdgpu_sriov_multi_vf_mode(adev))
2204
return 0;
2205
2206
if (!smu->pm_enabled)
2207
return 0;
2208
2209
dev_info(adev->dev, "SMU is resuming...\n");
2210
2211
ret = smu_start_smc_engine(smu);
2212
if (ret) {
2213
dev_err(adev->dev, "SMC engine is not correctly up!\n");
2214
return ret;
2215
}
2216
2217
ret = smu_smc_hw_setup(smu);
2218
if (ret) {
2219
dev_err(adev->dev, "Failed to setup smc hw!\n");
2220
return ret;
2221
}
2222
2223
ret = smu_set_gfx_imu_enable(smu);
2224
if (ret)
2225
return ret;
2226
2227
smu_set_gfx_cgpg(smu, true);
2228
2229
smu->disable_uclk_switch = 0;
2230
2231
adev->pm.dpm_enabled = true;
2232
2233
if (smu->current_power_limit) {
2234
ret = smu_set_power_limit(smu, smu->current_power_limit);
2235
if (ret && ret != -EOPNOTSUPP)
2236
return ret;
2237
}
2238
2239
if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
2240
ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
2241
if (ret)
2242
return ret;
2243
}
2244
2245
dev_info(adev->dev, "SMU is resumed successfully!\n");
2246
2247
return 0;
2248
}
2249
2250
static int smu_display_configuration_change(void *handle,
2251
const struct amd_pp_display_configuration *display_config)
2252
{
2253
struct smu_context *smu = handle;
2254
2255
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2256
return -EOPNOTSUPP;
2257
2258
if (!display_config)
2259
return -EINVAL;
2260
2261
smu_set_min_dcef_deep_sleep(smu,
2262
display_config->min_dcef_deep_sleep_set_clk / 100);
2263
2264
return 0;
2265
}
2266
2267
static int smu_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2268
enum amd_clockgating_state state)
2269
{
2270
return 0;
2271
}
2272
2273
static int smu_set_powergating_state(struct amdgpu_ip_block *ip_block,
2274
enum amd_powergating_state state)
2275
{
2276
return 0;
2277
}
2278
2279
static int smu_enable_umd_pstate(void *handle,
2280
enum amd_dpm_forced_level *level)
2281
{
2282
uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
2283
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
2284
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
2285
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
2286
2287
struct smu_context *smu = (struct smu_context*)(handle);
2288
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2289
2290
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2291
return -EINVAL;
2292
2293
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
2294
/* enter umd pstate, save current level, disable gfx cg*/
2295
if (*level & profile_mode_mask) {
2296
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
2297
smu_gpo_control(smu, false);
2298
smu_gfx_ulv_control(smu, false);
2299
smu_deep_sleep_control(smu, false);
2300
amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2301
}
2302
} else {
2303
/* exit umd pstate, restore level, enable gfx cg*/
2304
if (!(*level & profile_mode_mask)) {
2305
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
2306
*level = smu_dpm_ctx->saved_dpm_level;
2307
amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2308
smu_deep_sleep_control(smu, true);
2309
smu_gfx_ulv_control(smu, true);
2310
smu_gpo_control(smu, true);
2311
}
2312
}
2313
2314
return 0;
2315
}
2316
2317
static int smu_bump_power_profile_mode(struct smu_context *smu,
2318
long *custom_params,
2319
u32 custom_params_max_idx)
2320
{
2321
u32 workload_mask = 0;
2322
int i, ret = 0;
2323
2324
for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
2325
if (smu->workload_refcount[i])
2326
workload_mask |= 1 << i;
2327
}
2328
2329
if (smu->workload_mask == workload_mask)
2330
return 0;
2331
2332
if (smu->ppt_funcs->set_power_profile_mode)
2333
ret = smu->ppt_funcs->set_power_profile_mode(smu, workload_mask,
2334
custom_params,
2335
custom_params_max_idx);
2336
2337
if (!ret)
2338
smu->workload_mask = workload_mask;
2339
2340
return ret;
2341
}
2342
2343
static void smu_power_profile_mode_get(struct smu_context *smu,
2344
enum PP_SMC_POWER_PROFILE profile_mode)
2345
{
2346
smu->workload_refcount[profile_mode]++;
2347
}
2348
2349
static void smu_power_profile_mode_put(struct smu_context *smu,
2350
enum PP_SMC_POWER_PROFILE profile_mode)
2351
{
2352
if (smu->workload_refcount[profile_mode])
2353
smu->workload_refcount[profile_mode]--;
2354
}
2355
2356
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2357
enum amd_dpm_forced_level level,
2358
bool skip_display_settings)
2359
{
2360
int ret = 0;
2361
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2362
2363
if (!skip_display_settings) {
2364
ret = smu_display_config_changed(smu);
2365
if (ret) {
2366
dev_err(smu->adev->dev, "Failed to change display config!");
2367
return ret;
2368
}
2369
}
2370
2371
ret = smu_apply_clocks_adjust_rules(smu);
2372
if (ret) {
2373
dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2374
return ret;
2375
}
2376
2377
if (!skip_display_settings) {
2378
ret = smu_notify_smc_display_config(smu);
2379
if (ret) {
2380
dev_err(smu->adev->dev, "Failed to notify smc display config!");
2381
return ret;
2382
}
2383
}
2384
2385
if (smu_dpm_ctx->dpm_level != level) {
2386
ret = smu_asic_set_performance_level(smu, level);
2387
if (ret) {
2388
if (ret == -EOPNOTSUPP)
2389
dev_info(smu->adev->dev, "set performance level %d not supported",
2390
level);
2391
else
2392
dev_err(smu->adev->dev, "Failed to set performance level %d",
2393
level);
2394
return ret;
2395
}
2396
2397
/* update the saved copy */
2398
smu_dpm_ctx->dpm_level = level;
2399
}
2400
2401
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2402
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
2403
smu_bump_power_profile_mode(smu, NULL, 0);
2404
2405
return ret;
2406
}
2407
2408
static int smu_handle_task(struct smu_context *smu,
2409
enum amd_dpm_forced_level level,
2410
enum amd_pp_task task_id)
2411
{
2412
int ret = 0;
2413
2414
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2415
return -EOPNOTSUPP;
2416
2417
switch (task_id) {
2418
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
2419
ret = smu_pre_display_config_changed(smu);
2420
if (ret)
2421
return ret;
2422
ret = smu_adjust_power_state_dynamic(smu, level, false);
2423
break;
2424
case AMD_PP_TASK_COMPLETE_INIT:
2425
ret = smu_adjust_power_state_dynamic(smu, level, true);
2426
break;
2427
case AMD_PP_TASK_READJUST_POWER_STATE:
2428
ret = smu_adjust_power_state_dynamic(smu, level, true);
2429
break;
2430
default:
2431
break;
2432
}
2433
2434
return ret;
2435
}
2436
2437
static int smu_handle_dpm_task(void *handle,
2438
enum amd_pp_task task_id,
2439
enum amd_pm_state_type *user_state)
2440
{
2441
struct smu_context *smu = handle;
2442
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2443
2444
return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2445
2446
}
2447
2448
static int smu_switch_power_profile(void *handle,
2449
enum PP_SMC_POWER_PROFILE type,
2450
bool enable)
2451
{
2452
struct smu_context *smu = handle;
2453
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2454
int ret;
2455
2456
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2457
return -EOPNOTSUPP;
2458
2459
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
2460
return -EINVAL;
2461
2462
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2463
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2464
if (enable)
2465
smu_power_profile_mode_get(smu, type);
2466
else
2467
smu_power_profile_mode_put(smu, type);
2468
/* don't switch the active workload when paused */
2469
if (smu->pause_workload)
2470
ret = 0;
2471
else
2472
ret = smu_bump_power_profile_mode(smu, NULL, 0);
2473
if (ret) {
2474
if (enable)
2475
smu_power_profile_mode_put(smu, type);
2476
else
2477
smu_power_profile_mode_get(smu, type);
2478
return ret;
2479
}
2480
}
2481
2482
return 0;
2483
}
2484
2485
static int smu_pause_power_profile(void *handle,
2486
bool pause)
2487
{
2488
struct smu_context *smu = handle;
2489
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2490
u32 workload_mask = 1 << PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
2491
int ret;
2492
2493
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2494
return -EOPNOTSUPP;
2495
2496
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
2497
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
2498
smu->pause_workload = pause;
2499
2500
/* force to bootup default profile */
2501
if (smu->pause_workload && smu->ppt_funcs->set_power_profile_mode)
2502
ret = smu->ppt_funcs->set_power_profile_mode(smu,
2503
workload_mask,
2504
NULL,
2505
0);
2506
else
2507
ret = smu_bump_power_profile_mode(smu, NULL, 0);
2508
return ret;
2509
}
2510
2511
return 0;
2512
}
2513
2514
static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
2515
{
2516
struct smu_context *smu = handle;
2517
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2518
2519
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2520
return -EOPNOTSUPP;
2521
2522
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2523
return -EINVAL;
2524
2525
return smu_dpm_ctx->dpm_level;
2526
}
2527
2528
static int smu_force_performance_level(void *handle,
2529
enum amd_dpm_forced_level level)
2530
{
2531
struct smu_context *smu = handle;
2532
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2533
int ret = 0;
2534
2535
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2536
return -EOPNOTSUPP;
2537
2538
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2539
return -EINVAL;
2540
2541
ret = smu_enable_umd_pstate(smu, &level);
2542
if (ret)
2543
return ret;
2544
2545
ret = smu_handle_task(smu, level,
2546
AMD_PP_TASK_READJUST_POWER_STATE);
2547
2548
/* reset user dpm clock state */
2549
if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2550
memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2551
smu->user_dpm_profile.clk_dependency = 0;
2552
}
2553
2554
return ret;
2555
}
2556
2557
static int smu_set_display_count(void *handle, uint32_t count)
2558
{
2559
struct smu_context *smu = handle;
2560
2561
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2562
return -EOPNOTSUPP;
2563
2564
return smu_init_display_count(smu, count);
2565
}
2566
2567
static int smu_force_smuclk_levels(struct smu_context *smu,
2568
enum smu_clk_type clk_type,
2569
uint32_t mask)
2570
{
2571
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2572
int ret = 0;
2573
2574
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2575
return -EOPNOTSUPP;
2576
2577
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
2578
dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2579
return -EINVAL;
2580
}
2581
2582
if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2583
ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2584
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2585
smu->user_dpm_profile.clk_mask[clk_type] = mask;
2586
smu_set_user_clk_dependencies(smu, clk_type);
2587
}
2588
}
2589
2590
return ret;
2591
}
2592
2593
static int smu_force_ppclk_levels(void *handle,
2594
enum pp_clock_type type,
2595
uint32_t mask)
2596
{
2597
struct smu_context *smu = handle;
2598
enum smu_clk_type clk_type;
2599
2600
switch (type) {
2601
case PP_SCLK:
2602
clk_type = SMU_SCLK; break;
2603
case PP_MCLK:
2604
clk_type = SMU_MCLK; break;
2605
case PP_PCIE:
2606
clk_type = SMU_PCIE; break;
2607
case PP_SOCCLK:
2608
clk_type = SMU_SOCCLK; break;
2609
case PP_FCLK:
2610
clk_type = SMU_FCLK; break;
2611
case PP_DCEFCLK:
2612
clk_type = SMU_DCEFCLK; break;
2613
case PP_VCLK:
2614
clk_type = SMU_VCLK; break;
2615
case PP_VCLK1:
2616
clk_type = SMU_VCLK1; break;
2617
case PP_DCLK:
2618
clk_type = SMU_DCLK; break;
2619
case PP_DCLK1:
2620
clk_type = SMU_DCLK1; break;
2621
case OD_SCLK:
2622
clk_type = SMU_OD_SCLK; break;
2623
case OD_MCLK:
2624
clk_type = SMU_OD_MCLK; break;
2625
case OD_VDDC_CURVE:
2626
clk_type = SMU_OD_VDDC_CURVE; break;
2627
case OD_RANGE:
2628
clk_type = SMU_OD_RANGE; break;
2629
default:
2630
return -EINVAL;
2631
}
2632
2633
return smu_force_smuclk_levels(smu, clk_type, mask);
2634
}
2635
2636
/*
2637
* On system suspending or resetting, the dpm_enabled
2638
* flag will be cleared. So that those SMU services which
2639
* are not supported will be gated.
2640
* However, the mp1 state setting should still be granted
2641
* even if the dpm_enabled cleared.
2642
*/
2643
static int smu_set_mp1_state(void *handle,
2644
enum pp_mp1_state mp1_state)
2645
{
2646
struct smu_context *smu = handle;
2647
int ret = 0;
2648
2649
if (!smu->pm_enabled)
2650
return -EOPNOTSUPP;
2651
2652
if (smu->ppt_funcs &&
2653
smu->ppt_funcs->set_mp1_state)
2654
ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2655
2656
return ret;
2657
}
2658
2659
static int smu_set_df_cstate(void *handle,
2660
enum pp_df_cstate state)
2661
{
2662
struct smu_context *smu = handle;
2663
int ret = 0;
2664
2665
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2666
return -EOPNOTSUPP;
2667
2668
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2669
return 0;
2670
2671
ret = smu->ppt_funcs->set_df_cstate(smu, state);
2672
if (ret)
2673
dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2674
2675
return ret;
2676
}
2677
2678
int smu_write_watermarks_table(struct smu_context *smu)
2679
{
2680
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2681
return -EOPNOTSUPP;
2682
2683
return smu_set_watermarks_table(smu, NULL);
2684
}
2685
2686
static int smu_set_watermarks_for_clock_ranges(void *handle,
2687
struct pp_smu_wm_range_sets *clock_ranges)
2688
{
2689
struct smu_context *smu = handle;
2690
2691
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2692
return -EOPNOTSUPP;
2693
2694
if (smu->disable_watermark)
2695
return 0;
2696
2697
return smu_set_watermarks_table(smu, clock_ranges);
2698
}
2699
2700
int smu_set_ac_dc(struct smu_context *smu)
2701
{
2702
int ret = 0;
2703
2704
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2705
return -EOPNOTSUPP;
2706
2707
/* controlled by firmware */
2708
if (smu->dc_controlled_by_gpio)
2709
return 0;
2710
2711
ret = smu_set_power_source(smu,
2712
smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2713
SMU_POWER_SOURCE_DC);
2714
if (ret)
2715
dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2716
smu->adev->pm.ac_power ? "AC" : "DC");
2717
2718
return ret;
2719
}
2720
2721
const struct amd_ip_funcs smu_ip_funcs = {
2722
.name = "smu",
2723
.early_init = smu_early_init,
2724
.late_init = smu_late_init,
2725
.sw_init = smu_sw_init,
2726
.sw_fini = smu_sw_fini,
2727
.hw_init = smu_hw_init,
2728
.hw_fini = smu_hw_fini,
2729
.late_fini = smu_late_fini,
2730
.suspend = smu_suspend,
2731
.resume = smu_resume,
2732
.is_idle = NULL,
2733
.check_soft_reset = NULL,
2734
.wait_for_idle = NULL,
2735
.soft_reset = NULL,
2736
.set_clockgating_state = smu_set_clockgating_state,
2737
.set_powergating_state = smu_set_powergating_state,
2738
};
2739
2740
const struct amdgpu_ip_block_version smu_v11_0_ip_block = {
2741
.type = AMD_IP_BLOCK_TYPE_SMC,
2742
.major = 11,
2743
.minor = 0,
2744
.rev = 0,
2745
.funcs = &smu_ip_funcs,
2746
};
2747
2748
const struct amdgpu_ip_block_version smu_v12_0_ip_block = {
2749
.type = AMD_IP_BLOCK_TYPE_SMC,
2750
.major = 12,
2751
.minor = 0,
2752
.rev = 0,
2753
.funcs = &smu_ip_funcs,
2754
};
2755
2756
const struct amdgpu_ip_block_version smu_v13_0_ip_block = {
2757
.type = AMD_IP_BLOCK_TYPE_SMC,
2758
.major = 13,
2759
.minor = 0,
2760
.rev = 0,
2761
.funcs = &smu_ip_funcs,
2762
};
2763
2764
const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
2765
.type = AMD_IP_BLOCK_TYPE_SMC,
2766
.major = 14,
2767
.minor = 0,
2768
.rev = 0,
2769
.funcs = &smu_ip_funcs,
2770
};
2771
2772
static int smu_load_microcode(void *handle)
2773
{
2774
struct smu_context *smu = handle;
2775
struct amdgpu_device *adev = smu->adev;
2776
int ret = 0;
2777
2778
if (!smu->pm_enabled)
2779
return -EOPNOTSUPP;
2780
2781
/* This should be used for non PSP loading */
2782
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
2783
return 0;
2784
2785
if (smu->ppt_funcs->load_microcode) {
2786
ret = smu->ppt_funcs->load_microcode(smu);
2787
if (ret) {
2788
dev_err(adev->dev, "Load microcode failed\n");
2789
return ret;
2790
}
2791
}
2792
2793
if (smu->ppt_funcs->check_fw_status) {
2794
ret = smu->ppt_funcs->check_fw_status(smu);
2795
if (ret) {
2796
dev_err(adev->dev, "SMC is not ready\n");
2797
return ret;
2798
}
2799
}
2800
2801
return ret;
2802
}
2803
2804
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2805
{
2806
int ret = 0;
2807
2808
if (smu->ppt_funcs->set_gfx_cgpg)
2809
ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2810
2811
return ret;
2812
}
2813
2814
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
2815
{
2816
struct smu_context *smu = handle;
2817
int ret = 0;
2818
2819
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2820
return -EOPNOTSUPP;
2821
2822
if (!smu->ppt_funcs->set_fan_speed_rpm)
2823
return -EOPNOTSUPP;
2824
2825
if (speed == U32_MAX)
2826
return -EINVAL;
2827
2828
ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2829
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2830
smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2831
smu->user_dpm_profile.fan_speed_rpm = speed;
2832
2833
/* Override custom PWM setting as they cannot co-exist */
2834
smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2835
smu->user_dpm_profile.fan_speed_pwm = 0;
2836
}
2837
2838
return ret;
2839
}
2840
2841
/**
2842
* smu_get_power_limit - Request one of the SMU Power Limits
2843
*
2844
* @handle: pointer to smu context
2845
* @limit: requested limit is written back to this variable
2846
* @pp_limit_level: &pp_power_limit_level which limit of the power to return
2847
* @pp_power_type: &pp_power_type type of power
2848
* Return: 0 on success, <0 on error
2849
*
2850
*/
2851
int smu_get_power_limit(void *handle,
2852
uint32_t *limit,
2853
enum pp_power_limit_level pp_limit_level,
2854
enum pp_power_type pp_power_type)
2855
{
2856
struct smu_context *smu = handle;
2857
struct amdgpu_device *adev = smu->adev;
2858
enum smu_ppt_limit_level limit_level;
2859
uint32_t limit_type;
2860
int ret = 0;
2861
2862
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2863
return -EOPNOTSUPP;
2864
2865
switch (pp_power_type) {
2866
case PP_PWR_TYPE_SUSTAINED:
2867
limit_type = SMU_DEFAULT_PPT_LIMIT;
2868
break;
2869
case PP_PWR_TYPE_FAST:
2870
limit_type = SMU_FAST_PPT_LIMIT;
2871
break;
2872
default:
2873
return -EOPNOTSUPP;
2874
}
2875
2876
switch (pp_limit_level) {
2877
case PP_PWR_LIMIT_CURRENT:
2878
limit_level = SMU_PPT_LIMIT_CURRENT;
2879
break;
2880
case PP_PWR_LIMIT_DEFAULT:
2881
limit_level = SMU_PPT_LIMIT_DEFAULT;
2882
break;
2883
case PP_PWR_LIMIT_MAX:
2884
limit_level = SMU_PPT_LIMIT_MAX;
2885
break;
2886
case PP_PWR_LIMIT_MIN:
2887
limit_level = SMU_PPT_LIMIT_MIN;
2888
break;
2889
default:
2890
return -EOPNOTSUPP;
2891
}
2892
2893
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
2894
if (smu->ppt_funcs->get_ppt_limit)
2895
ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2896
} else {
2897
switch (limit_level) {
2898
case SMU_PPT_LIMIT_CURRENT:
2899
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
2900
case IP_VERSION(13, 0, 2):
2901
case IP_VERSION(13, 0, 6):
2902
case IP_VERSION(13, 0, 12):
2903
case IP_VERSION(13, 0, 14):
2904
case IP_VERSION(11, 0, 7):
2905
case IP_VERSION(11, 0, 11):
2906
case IP_VERSION(11, 0, 12):
2907
case IP_VERSION(11, 0, 13):
2908
ret = smu_get_asic_power_limits(smu,
2909
&smu->current_power_limit,
2910
NULL, NULL, NULL);
2911
break;
2912
default:
2913
break;
2914
}
2915
*limit = smu->current_power_limit;
2916
break;
2917
case SMU_PPT_LIMIT_DEFAULT:
2918
*limit = smu->default_power_limit;
2919
break;
2920
case SMU_PPT_LIMIT_MAX:
2921
*limit = smu->max_power_limit;
2922
break;
2923
case SMU_PPT_LIMIT_MIN:
2924
*limit = smu->min_power_limit;
2925
break;
2926
default:
2927
return -EINVAL;
2928
}
2929
}
2930
2931
return ret;
2932
}
2933
2934
static int smu_set_power_limit(void *handle, uint32_t limit)
2935
{
2936
struct smu_context *smu = handle;
2937
uint32_t limit_type = limit >> 24;
2938
int ret = 0;
2939
2940
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2941
return -EOPNOTSUPP;
2942
2943
limit &= (1<<24)-1;
2944
if (limit_type != SMU_DEFAULT_PPT_LIMIT)
2945
if (smu->ppt_funcs->set_power_limit)
2946
return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2947
2948
if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2949
dev_err(smu->adev->dev,
2950
"New power limit (%d) is out of range [%d,%d]\n",
2951
limit, smu->min_power_limit, smu->max_power_limit);
2952
return -EINVAL;
2953
}
2954
2955
if (!limit)
2956
limit = smu->current_power_limit;
2957
2958
if (smu->ppt_funcs->set_power_limit) {
2959
ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2960
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2961
smu->user_dpm_profile.power_limit = limit;
2962
}
2963
2964
return ret;
2965
}
2966
2967
static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2968
{
2969
int ret = 0;
2970
2971
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2972
return -EOPNOTSUPP;
2973
2974
if (smu->ppt_funcs->print_clk_levels)
2975
ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2976
2977
return ret;
2978
}
2979
2980
static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
2981
{
2982
enum smu_clk_type clk_type;
2983
2984
switch (type) {
2985
case PP_SCLK:
2986
clk_type = SMU_SCLK; break;
2987
case PP_MCLK:
2988
clk_type = SMU_MCLK; break;
2989
case PP_PCIE:
2990
clk_type = SMU_PCIE; break;
2991
case PP_SOCCLK:
2992
clk_type = SMU_SOCCLK; break;
2993
case PP_FCLK:
2994
clk_type = SMU_FCLK; break;
2995
case PP_DCEFCLK:
2996
clk_type = SMU_DCEFCLK; break;
2997
case PP_VCLK:
2998
clk_type = SMU_VCLK; break;
2999
case PP_VCLK1:
3000
clk_type = SMU_VCLK1; break;
3001
case PP_DCLK:
3002
clk_type = SMU_DCLK; break;
3003
case PP_DCLK1:
3004
clk_type = SMU_DCLK1; break;
3005
case PP_ISPICLK:
3006
clk_type = SMU_ISPICLK;
3007
break;
3008
case PP_ISPXCLK:
3009
clk_type = SMU_ISPXCLK;
3010
break;
3011
case OD_SCLK:
3012
clk_type = SMU_OD_SCLK; break;
3013
case OD_MCLK:
3014
clk_type = SMU_OD_MCLK; break;
3015
case OD_VDDC_CURVE:
3016
clk_type = SMU_OD_VDDC_CURVE; break;
3017
case OD_RANGE:
3018
clk_type = SMU_OD_RANGE; break;
3019
case OD_VDDGFX_OFFSET:
3020
clk_type = SMU_OD_VDDGFX_OFFSET; break;
3021
case OD_CCLK:
3022
clk_type = SMU_OD_CCLK; break;
3023
case OD_FAN_CURVE:
3024
clk_type = SMU_OD_FAN_CURVE; break;
3025
case OD_ACOUSTIC_LIMIT:
3026
clk_type = SMU_OD_ACOUSTIC_LIMIT; break;
3027
case OD_ACOUSTIC_TARGET:
3028
clk_type = SMU_OD_ACOUSTIC_TARGET; break;
3029
case OD_FAN_TARGET_TEMPERATURE:
3030
clk_type = SMU_OD_FAN_TARGET_TEMPERATURE; break;
3031
case OD_FAN_MINIMUM_PWM:
3032
clk_type = SMU_OD_FAN_MINIMUM_PWM; break;
3033
case OD_FAN_ZERO_RPM_ENABLE:
3034
clk_type = SMU_OD_FAN_ZERO_RPM_ENABLE; break;
3035
case OD_FAN_ZERO_RPM_STOP_TEMP:
3036
clk_type = SMU_OD_FAN_ZERO_RPM_STOP_TEMP; break;
3037
default:
3038
clk_type = SMU_CLK_COUNT; break;
3039
}
3040
3041
return clk_type;
3042
}
3043
3044
static int smu_print_ppclk_levels(void *handle,
3045
enum pp_clock_type type,
3046
char *buf)
3047
{
3048
struct smu_context *smu = handle;
3049
enum smu_clk_type clk_type;
3050
3051
clk_type = smu_convert_to_smuclk(type);
3052
if (clk_type == SMU_CLK_COUNT)
3053
return -EINVAL;
3054
3055
return smu_print_smuclk_levels(smu, clk_type, buf);
3056
}
3057
3058
static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
3059
{
3060
struct smu_context *smu = handle;
3061
enum smu_clk_type clk_type;
3062
3063
clk_type = smu_convert_to_smuclk(type);
3064
if (clk_type == SMU_CLK_COUNT)
3065
return -EINVAL;
3066
3067
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3068
return -EOPNOTSUPP;
3069
3070
if (!smu->ppt_funcs->emit_clk_levels)
3071
return -ENOENT;
3072
3073
return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
3074
3075
}
3076
3077
static int smu_od_edit_dpm_table(void *handle,
3078
enum PP_OD_DPM_TABLE_COMMAND type,
3079
long *input, uint32_t size)
3080
{
3081
struct smu_context *smu = handle;
3082
int ret = 0;
3083
3084
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3085
return -EOPNOTSUPP;
3086
3087
if (smu->ppt_funcs->od_edit_dpm_table) {
3088
ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
3089
}
3090
3091
return ret;
3092
}
3093
3094
static int smu_read_sensor(void *handle,
3095
int sensor,
3096
void *data,
3097
int *size_arg)
3098
{
3099
struct smu_context *smu = handle;
3100
struct amdgpu_device *adev = smu->adev;
3101
struct smu_umd_pstate_table *pstate_table =
3102
&smu->pstate_table;
3103
int i, ret = 0;
3104
uint32_t *size, size_val;
3105
3106
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3107
return -EOPNOTSUPP;
3108
3109
if (!data || !size_arg)
3110
return -EINVAL;
3111
3112
size_val = *size_arg;
3113
size = &size_val;
3114
3115
if (smu->ppt_funcs->read_sensor)
3116
if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
3117
goto unlock;
3118
3119
switch (sensor) {
3120
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
3121
*((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100;
3122
*size = 4;
3123
break;
3124
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
3125
*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
3126
*size = 4;
3127
break;
3128
case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
3129
*((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
3130
*size = 4;
3131
break;
3132
case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
3133
*((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
3134
*size = 4;
3135
break;
3136
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
3137
ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
3138
*size = 8;
3139
break;
3140
case AMDGPU_PP_SENSOR_UVD_POWER:
3141
*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
3142
*size = 4;
3143
break;
3144
case AMDGPU_PP_SENSOR_VCE_POWER:
3145
*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
3146
*size = 4;
3147
break;
3148
case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
3149
*(uint32_t *)data = 0;
3150
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
3151
if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) {
3152
*(uint32_t *)data = 1;
3153
break;
3154
}
3155
}
3156
*size = 4;
3157
break;
3158
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
3159
*(uint32_t *)data = 0;
3160
*size = 4;
3161
break;
3162
default:
3163
*size = 0;
3164
ret = -EOPNOTSUPP;
3165
break;
3166
}
3167
3168
unlock:
3169
// assign uint32_t to int
3170
*size_arg = size_val;
3171
3172
return ret;
3173
}
3174
3175
static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
3176
{
3177
int ret = -EOPNOTSUPP;
3178
struct smu_context *smu = handle;
3179
3180
if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
3181
ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
3182
3183
return ret;
3184
}
3185
3186
static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
3187
{
3188
int ret = -EOPNOTSUPP;
3189
struct smu_context *smu = handle;
3190
3191
if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
3192
ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
3193
3194
return ret;
3195
}
3196
3197
static int smu_get_power_profile_mode(void *handle, char *buf)
3198
{
3199
struct smu_context *smu = handle;
3200
3201
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3202
!smu->ppt_funcs->get_power_profile_mode)
3203
return -EOPNOTSUPP;
3204
if (!buf)
3205
return -EINVAL;
3206
3207
return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3208
}
3209
3210
static int smu_set_power_profile_mode(void *handle,
3211
long *param,
3212
uint32_t param_size)
3213
{
3214
struct smu_context *smu = handle;
3215
bool custom = false;
3216
int ret = 0;
3217
3218
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3219
!smu->ppt_funcs->set_power_profile_mode)
3220
return -EOPNOTSUPP;
3221
3222
if (param[param_size] == PP_SMC_POWER_PROFILE_CUSTOM) {
3223
custom = true;
3224
/* clear frontend mask so custom changes propogate */
3225
smu->workload_mask = 0;
3226
}
3227
3228
if ((param[param_size] != smu->power_profile_mode) || custom) {
3229
/* clear the old user preference */
3230
smu_power_profile_mode_put(smu, smu->power_profile_mode);
3231
/* set the new user preference */
3232
smu_power_profile_mode_get(smu, param[param_size]);
3233
ret = smu_bump_power_profile_mode(smu,
3234
custom ? param : NULL,
3235
custom ? param_size : 0);
3236
if (ret)
3237
smu_power_profile_mode_put(smu, param[param_size]);
3238
else
3239
/* store the user's preference */
3240
smu->power_profile_mode = param[param_size];
3241
}
3242
3243
return ret;
3244
}
3245
3246
static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
3247
{
3248
struct smu_context *smu = handle;
3249
3250
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3251
return -EOPNOTSUPP;
3252
3253
if (!smu->ppt_funcs->get_fan_control_mode)
3254
return -EOPNOTSUPP;
3255
3256
if (!fan_mode)
3257
return -EINVAL;
3258
3259
*fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3260
3261
return 0;
3262
}
3263
3264
static int smu_set_fan_control_mode(void *handle, u32 value)
3265
{
3266
struct smu_context *smu = handle;
3267
int ret = 0;
3268
3269
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3270
return -EOPNOTSUPP;
3271
3272
if (!smu->ppt_funcs->set_fan_control_mode)
3273
return -EOPNOTSUPP;
3274
3275
if (value == U32_MAX)
3276
return -EINVAL;
3277
3278
ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3279
if (ret)
3280
goto out;
3281
3282
if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3283
smu->user_dpm_profile.fan_mode = value;
3284
3285
/* reset user dpm fan speed */
3286
if (value != AMD_FAN_CTRL_MANUAL) {
3287
smu->user_dpm_profile.fan_speed_pwm = 0;
3288
smu->user_dpm_profile.fan_speed_rpm = 0;
3289
smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3290
}
3291
}
3292
3293
out:
3294
return ret;
3295
}
3296
3297
static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
3298
{
3299
struct smu_context *smu = handle;
3300
int ret = 0;
3301
3302
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3303
return -EOPNOTSUPP;
3304
3305
if (!smu->ppt_funcs->get_fan_speed_pwm)
3306
return -EOPNOTSUPP;
3307
3308
if (!speed)
3309
return -EINVAL;
3310
3311
ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3312
3313
return ret;
3314
}
3315
3316
static int smu_set_fan_speed_pwm(void *handle, u32 speed)
3317
{
3318
struct smu_context *smu = handle;
3319
int ret = 0;
3320
3321
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3322
return -EOPNOTSUPP;
3323
3324
if (!smu->ppt_funcs->set_fan_speed_pwm)
3325
return -EOPNOTSUPP;
3326
3327
if (speed == U32_MAX)
3328
return -EINVAL;
3329
3330
ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3331
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3332
smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3333
smu->user_dpm_profile.fan_speed_pwm = speed;
3334
3335
/* Override custom RPM setting as they cannot co-exist */
3336
smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3337
smu->user_dpm_profile.fan_speed_rpm = 0;
3338
}
3339
3340
return ret;
3341
}
3342
3343
static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
3344
{
3345
struct smu_context *smu = handle;
3346
int ret = 0;
3347
3348
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3349
return -EOPNOTSUPP;
3350
3351
if (!smu->ppt_funcs->get_fan_speed_rpm)
3352
return -EOPNOTSUPP;
3353
3354
if (!speed)
3355
return -EINVAL;
3356
3357
ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3358
3359
return ret;
3360
}
3361
3362
static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
3363
{
3364
struct smu_context *smu = handle;
3365
3366
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3367
return -EOPNOTSUPP;
3368
3369
return smu_set_min_dcef_deep_sleep(smu, clk);
3370
}
3371
3372
static int smu_get_clock_by_type_with_latency(void *handle,
3373
enum amd_pp_clock_type type,
3374
struct pp_clock_levels_with_latency *clocks)
3375
{
3376
struct smu_context *smu = handle;
3377
enum smu_clk_type clk_type;
3378
int ret = 0;
3379
3380
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3381
return -EOPNOTSUPP;
3382
3383
if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3384
switch (type) {
3385
case amd_pp_sys_clock:
3386
clk_type = SMU_GFXCLK;
3387
break;
3388
case amd_pp_mem_clock:
3389
clk_type = SMU_MCLK;
3390
break;
3391
case amd_pp_dcef_clock:
3392
clk_type = SMU_DCEFCLK;
3393
break;
3394
case amd_pp_disp_clock:
3395
clk_type = SMU_DISPCLK;
3396
break;
3397
default:
3398
dev_err(smu->adev->dev, "Invalid clock type!\n");
3399
return -EINVAL;
3400
}
3401
3402
ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3403
}
3404
3405
return ret;
3406
}
3407
3408
static int smu_display_clock_voltage_request(void *handle,
3409
struct pp_display_clock_request *clock_req)
3410
{
3411
struct smu_context *smu = handle;
3412
int ret = 0;
3413
3414
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3415
return -EOPNOTSUPP;
3416
3417
if (smu->ppt_funcs->display_clock_voltage_request)
3418
ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3419
3420
return ret;
3421
}
3422
3423
3424
static int smu_display_disable_memory_clock_switch(void *handle,
3425
bool disable_memory_clock_switch)
3426
{
3427
struct smu_context *smu = handle;
3428
int ret = -EINVAL;
3429
3430
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3431
return -EOPNOTSUPP;
3432
3433
if (smu->ppt_funcs->display_disable_memory_clock_switch)
3434
ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3435
3436
return ret;
3437
}
3438
3439
static int smu_set_xgmi_pstate(void *handle,
3440
uint32_t pstate)
3441
{
3442
struct smu_context *smu = handle;
3443
int ret = 0;
3444
3445
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3446
return -EOPNOTSUPP;
3447
3448
if (smu->ppt_funcs->set_xgmi_pstate)
3449
ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3450
3451
if (ret)
3452
dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3453
3454
return ret;
3455
}
3456
3457
static int smu_get_baco_capability(void *handle)
3458
{
3459
struct smu_context *smu = handle;
3460
3461
if (!smu->pm_enabled)
3462
return false;
3463
3464
if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support)
3465
return false;
3466
3467
return smu->ppt_funcs->get_bamaco_support(smu);
3468
}
3469
3470
static int smu_baco_set_state(void *handle, int state)
3471
{
3472
struct smu_context *smu = handle;
3473
int ret = 0;
3474
3475
if (!smu->pm_enabled)
3476
return -EOPNOTSUPP;
3477
3478
if (state == 0) {
3479
if (smu->ppt_funcs->baco_exit)
3480
ret = smu->ppt_funcs->baco_exit(smu);
3481
} else if (state == 1) {
3482
if (smu->ppt_funcs->baco_enter)
3483
ret = smu->ppt_funcs->baco_enter(smu);
3484
} else {
3485
return -EINVAL;
3486
}
3487
3488
if (ret)
3489
dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3490
(state)?"enter":"exit");
3491
3492
return ret;
3493
}
3494
3495
bool smu_mode1_reset_is_support(struct smu_context *smu)
3496
{
3497
bool ret = false;
3498
3499
if (!smu->pm_enabled)
3500
return false;
3501
3502
if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3503
ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3504
3505
return ret;
3506
}
3507
3508
bool smu_link_reset_is_support(struct smu_context *smu)
3509
{
3510
bool ret = false;
3511
3512
if (!smu->pm_enabled)
3513
return false;
3514
3515
if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
3516
ret = smu->ppt_funcs->link_reset_is_support(smu);
3517
3518
return ret;
3519
}
3520
3521
int smu_mode1_reset(struct smu_context *smu)
3522
{
3523
int ret = 0;
3524
3525
if (!smu->pm_enabled)
3526
return -EOPNOTSUPP;
3527
3528
if (smu->ppt_funcs->mode1_reset)
3529
ret = smu->ppt_funcs->mode1_reset(smu);
3530
3531
return ret;
3532
}
3533
3534
static int smu_mode2_reset(void *handle)
3535
{
3536
struct smu_context *smu = handle;
3537
int ret = 0;
3538
3539
if (!smu->pm_enabled)
3540
return -EOPNOTSUPP;
3541
3542
if (smu->ppt_funcs->mode2_reset)
3543
ret = smu->ppt_funcs->mode2_reset(smu);
3544
3545
if (ret)
3546
dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3547
3548
return ret;
3549
}
3550
3551
int smu_link_reset(struct smu_context *smu)
3552
{
3553
int ret = 0;
3554
3555
if (!smu->pm_enabled)
3556
return -EOPNOTSUPP;
3557
3558
if (smu->ppt_funcs->link_reset)
3559
ret = smu->ppt_funcs->link_reset(smu);
3560
3561
return ret;
3562
}
3563
3564
static int smu_enable_gfx_features(void *handle)
3565
{
3566
struct smu_context *smu = handle;
3567
int ret = 0;
3568
3569
if (!smu->pm_enabled)
3570
return -EOPNOTSUPP;
3571
3572
if (smu->ppt_funcs->enable_gfx_features)
3573
ret = smu->ppt_funcs->enable_gfx_features(smu);
3574
3575
if (ret)
3576
dev_err(smu->adev->dev, "enable gfx features failed!\n");
3577
3578
return ret;
3579
}
3580
3581
static int smu_get_max_sustainable_clocks_by_dc(void *handle,
3582
struct pp_smu_nv_clock_table *max_clocks)
3583
{
3584
struct smu_context *smu = handle;
3585
int ret = 0;
3586
3587
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3588
return -EOPNOTSUPP;
3589
3590
if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3591
ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3592
3593
return ret;
3594
}
3595
3596
static int smu_get_uclk_dpm_states(void *handle,
3597
unsigned int *clock_values_in_khz,
3598
unsigned int *num_states)
3599
{
3600
struct smu_context *smu = handle;
3601
int ret = 0;
3602
3603
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3604
return -EOPNOTSUPP;
3605
3606
if (smu->ppt_funcs->get_uclk_dpm_states)
3607
ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3608
3609
return ret;
3610
}
3611
3612
static enum amd_pm_state_type smu_get_current_power_state(void *handle)
3613
{
3614
struct smu_context *smu = handle;
3615
enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
3616
3617
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3618
return -EOPNOTSUPP;
3619
3620
if (smu->ppt_funcs->get_current_power_state)
3621
pm_state = smu->ppt_funcs->get_current_power_state(smu);
3622
3623
return pm_state;
3624
}
3625
3626
static int smu_get_dpm_clock_table(void *handle,
3627
struct dpm_clocks *clock_table)
3628
{
3629
struct smu_context *smu = handle;
3630
int ret = 0;
3631
3632
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3633
return -EOPNOTSUPP;
3634
3635
if (smu->ppt_funcs->get_dpm_clock_table)
3636
ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3637
3638
return ret;
3639
}
3640
3641
static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
3642
{
3643
struct smu_context *smu = handle;
3644
3645
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3646
return -EOPNOTSUPP;
3647
3648
if (!smu->ppt_funcs->get_gpu_metrics)
3649
return -EOPNOTSUPP;
3650
3651
return smu->ppt_funcs->get_gpu_metrics(smu, table);
3652
}
3653
3654
static ssize_t smu_sys_get_pm_metrics(void *handle, void *pm_metrics,
3655
size_t size)
3656
{
3657
struct smu_context *smu = handle;
3658
3659
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3660
return -EOPNOTSUPP;
3661
3662
if (!smu->ppt_funcs->get_pm_metrics)
3663
return -EOPNOTSUPP;
3664
3665
return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3666
}
3667
3668
static int smu_enable_mgpu_fan_boost(void *handle)
3669
{
3670
struct smu_context *smu = handle;
3671
int ret = 0;
3672
3673
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3674
return -EOPNOTSUPP;
3675
3676
if (smu->ppt_funcs->enable_mgpu_fan_boost)
3677
ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3678
3679
return ret;
3680
}
3681
3682
static int smu_gfx_state_change_set(void *handle,
3683
uint32_t state)
3684
{
3685
struct smu_context *smu = handle;
3686
int ret = 0;
3687
3688
if (smu->ppt_funcs->gfx_state_change_set)
3689
ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3690
3691
return ret;
3692
}
3693
3694
int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3695
{
3696
int ret = 0;
3697
3698
if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3699
ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3700
3701
return ret;
3702
}
3703
3704
int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3705
{
3706
int ret = -EOPNOTSUPP;
3707
3708
if (smu->ppt_funcs &&
3709
smu->ppt_funcs->get_ecc_info)
3710
ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3711
3712
return ret;
3713
3714
}
3715
3716
static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
3717
{
3718
struct smu_context *smu = handle;
3719
struct smu_table_context *smu_table = &smu->smu_table;
3720
struct smu_table *memory_pool = &smu_table->memory_pool;
3721
3722
if (!addr || !size)
3723
return -EINVAL;
3724
3725
*addr = NULL;
3726
*size = 0;
3727
if (memory_pool->bo) {
3728
*addr = memory_pool->cpu_addr;
3729
*size = memory_pool->size;
3730
}
3731
3732
return 0;
3733
}
3734
3735
static void smu_print_dpm_policy(struct smu_dpm_policy *policy, char *sysbuf,
3736
size_t *size)
3737
{
3738
size_t offset = *size;
3739
int level;
3740
3741
for_each_set_bit(level, &policy->level_mask, PP_POLICY_MAX_LEVELS) {
3742
if (level == policy->current_level)
3743
offset += sysfs_emit_at(sysbuf, offset,
3744
"%d : %s*\n", level,
3745
policy->desc->get_desc(policy, level));
3746
else
3747
offset += sysfs_emit_at(sysbuf, offset,
3748
"%d : %s\n", level,
3749
policy->desc->get_desc(policy, level));
3750
}
3751
3752
*size = offset;
3753
}
3754
3755
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
3756
enum pp_pm_policy p_type, char *sysbuf)
3757
{
3758
struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3759
struct smu_dpm_policy_ctxt *policy_ctxt;
3760
struct smu_dpm_policy *dpm_policy;
3761
size_t offset = 0;
3762
3763
policy_ctxt = dpm_ctxt->dpm_policies;
3764
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3765
!policy_ctxt->policy_mask)
3766
return -EOPNOTSUPP;
3767
3768
if (p_type == PP_PM_POLICY_NONE)
3769
return -EINVAL;
3770
3771
dpm_policy = smu_get_pm_policy(smu, p_type);
3772
if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->desc)
3773
return -ENOENT;
3774
3775
if (!sysbuf)
3776
return -EINVAL;
3777
3778
smu_print_dpm_policy(dpm_policy, sysbuf, &offset);
3779
3780
return offset;
3781
}
3782
3783
struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
3784
enum pp_pm_policy p_type)
3785
{
3786
struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3787
struct smu_dpm_policy_ctxt *policy_ctxt;
3788
int i;
3789
3790
policy_ctxt = dpm_ctxt->dpm_policies;
3791
if (!policy_ctxt)
3792
return NULL;
3793
3794
for (i = 0; i < hweight32(policy_ctxt->policy_mask); ++i) {
3795
if (policy_ctxt->policies[i].policy_type == p_type)
3796
return &policy_ctxt->policies[i];
3797
}
3798
3799
return NULL;
3800
}
3801
3802
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
3803
int level)
3804
{
3805
struct smu_dpm_context *dpm_ctxt = &smu->smu_dpm;
3806
struct smu_dpm_policy *dpm_policy = NULL;
3807
struct smu_dpm_policy_ctxt *policy_ctxt;
3808
int ret = -EOPNOTSUPP;
3809
3810
policy_ctxt = dpm_ctxt->dpm_policies;
3811
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !policy_ctxt ||
3812
!policy_ctxt->policy_mask)
3813
return ret;
3814
3815
if (level < 0 || level >= PP_POLICY_MAX_LEVELS)
3816
return -EINVAL;
3817
3818
dpm_policy = smu_get_pm_policy(smu, p_type);
3819
3820
if (!dpm_policy || !dpm_policy->level_mask || !dpm_policy->set_policy)
3821
return ret;
3822
3823
if (dpm_policy->current_level == level)
3824
return 0;
3825
3826
ret = dpm_policy->set_policy(smu, level);
3827
3828
if (!ret)
3829
dpm_policy->current_level = level;
3830
3831
return ret;
3832
}
3833
3834
static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
3835
{
3836
struct smu_context *smu = handle;
3837
3838
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3839
return -EOPNOTSUPP;
3840
3841
if (!smu->adev->xcp_mgr || !smu->ppt_funcs->get_xcp_metrics)
3842
return -EOPNOTSUPP;
3843
3844
return smu->ppt_funcs->get_xcp_metrics(smu, xcp_id, table);
3845
}
3846
3847
static const struct amd_pm_funcs swsmu_pm_funcs = {
3848
/* export for sysfs */
3849
.set_fan_control_mode = smu_set_fan_control_mode,
3850
.get_fan_control_mode = smu_get_fan_control_mode,
3851
.set_fan_speed_pwm = smu_set_fan_speed_pwm,
3852
.get_fan_speed_pwm = smu_get_fan_speed_pwm,
3853
.force_clock_level = smu_force_ppclk_levels,
3854
.print_clock_levels = smu_print_ppclk_levels,
3855
.emit_clock_levels = smu_emit_ppclk_levels,
3856
.force_performance_level = smu_force_performance_level,
3857
.read_sensor = smu_read_sensor,
3858
.get_apu_thermal_limit = smu_get_apu_thermal_limit,
3859
.set_apu_thermal_limit = smu_set_apu_thermal_limit,
3860
.get_performance_level = smu_get_performance_level,
3861
.get_current_power_state = smu_get_current_power_state,
3862
.get_fan_speed_rpm = smu_get_fan_speed_rpm,
3863
.set_fan_speed_rpm = smu_set_fan_speed_rpm,
3864
.get_pp_num_states = smu_get_power_num_states,
3865
.get_pp_table = smu_sys_get_pp_table,
3866
.set_pp_table = smu_sys_set_pp_table,
3867
.switch_power_profile = smu_switch_power_profile,
3868
.pause_power_profile = smu_pause_power_profile,
3869
/* export to amdgpu */
3870
.dispatch_tasks = smu_handle_dpm_task,
3871
.load_firmware = smu_load_microcode,
3872
.set_powergating_by_smu = smu_dpm_set_power_gate,
3873
.set_power_limit = smu_set_power_limit,
3874
.get_power_limit = smu_get_power_limit,
3875
.get_power_profile_mode = smu_get_power_profile_mode,
3876
.set_power_profile_mode = smu_set_power_profile_mode,
3877
.odn_edit_dpm_table = smu_od_edit_dpm_table,
3878
.set_mp1_state = smu_set_mp1_state,
3879
.gfx_state_change_set = smu_gfx_state_change_set,
3880
/* export to DC */
3881
.get_sclk = smu_get_sclk,
3882
.get_mclk = smu_get_mclk,
3883
.display_configuration_change = smu_display_configuration_change,
3884
.get_clock_by_type_with_latency = smu_get_clock_by_type_with_latency,
3885
.display_clock_voltage_request = smu_display_clock_voltage_request,
3886
.enable_mgpu_fan_boost = smu_enable_mgpu_fan_boost,
3887
.set_active_display_count = smu_set_display_count,
3888
.set_min_deep_sleep_dcefclk = smu_set_deep_sleep_dcefclk,
3889
.get_asic_baco_capability = smu_get_baco_capability,
3890
.set_asic_baco_state = smu_baco_set_state,
3891
.get_ppfeature_status = smu_sys_get_pp_feature_mask,
3892
.set_ppfeature_status = smu_sys_set_pp_feature_mask,
3893
.asic_reset_mode_2 = smu_mode2_reset,
3894
.asic_reset_enable_gfx_features = smu_enable_gfx_features,
3895
.set_df_cstate = smu_set_df_cstate,
3896
.set_xgmi_pstate = smu_set_xgmi_pstate,
3897
.get_gpu_metrics = smu_sys_get_gpu_metrics,
3898
.get_pm_metrics = smu_sys_get_pm_metrics,
3899
.set_watermarks_for_clock_ranges = smu_set_watermarks_for_clock_ranges,
3900
.display_disable_memory_clock_switch = smu_display_disable_memory_clock_switch,
3901
.get_max_sustainable_clocks_by_dc = smu_get_max_sustainable_clocks_by_dc,
3902
.get_uclk_dpm_states = smu_get_uclk_dpm_states,
3903
.get_dpm_clock_table = smu_get_dpm_clock_table,
3904
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
3905
.get_xcp_metrics = smu_sys_get_xcp_metrics,
3906
};
3907
3908
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3909
uint64_t event_arg)
3910
{
3911
int ret = -EINVAL;
3912
3913
if (smu->ppt_funcs->wait_for_event)
3914
ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3915
3916
return ret;
3917
}
3918
3919
int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3920
{
3921
3922
if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3923
return -EOPNOTSUPP;
3924
3925
/* Confirm the buffer allocated is of correct size */
3926
if (size != smu->stb_context.stb_buf_size)
3927
return -EINVAL;
3928
3929
/*
3930
* No need to lock smu mutex as we access STB directly through MMIO
3931
* and not going through SMU messaging route (for now at least).
3932
* For registers access rely on implementation internal locking.
3933
*/
3934
return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3935
}
3936
3937
#if defined(CONFIG_DEBUG_FS)
3938
3939
static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
3940
{
3941
struct amdgpu_device *adev = filp->f_inode->i_private;
3942
struct smu_context *smu = adev->powerplay.pp_handle;
3943
unsigned char *buf;
3944
int r;
3945
3946
buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3947
if (!buf)
3948
return -ENOMEM;
3949
3950
r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3951
if (r)
3952
goto out;
3953
3954
filp->private_data = buf;
3955
3956
return 0;
3957
3958
out:
3959
kvfree(buf);
3960
return r;
3961
}
3962
3963
static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size,
3964
loff_t *pos)
3965
{
3966
struct amdgpu_device *adev = filp->f_inode->i_private;
3967
struct smu_context *smu = adev->powerplay.pp_handle;
3968
3969
3970
if (!filp->private_data)
3971
return -EINVAL;
3972
3973
return simple_read_from_buffer(buf,
3974
size,
3975
pos, filp->private_data,
3976
smu->stb_context.stb_buf_size);
3977
}
3978
3979
static int smu_stb_debugfs_release(struct inode *inode, struct file *filp)
3980
{
3981
kvfree(filp->private_data);
3982
filp->private_data = NULL;
3983
3984
return 0;
3985
}
3986
3987
/*
3988
* We have to define not only read method but also
3989
* open and release because .read takes up to PAGE_SIZE
3990
* data each time so and so is invoked multiple times.
3991
* We allocate the STB buffer in .open and release it
3992
* in .release
3993
*/
3994
static const struct file_operations smu_stb_debugfs_fops = {
3995
.owner = THIS_MODULE,
3996
.open = smu_stb_debugfs_open,
3997
.read = smu_stb_debugfs_read,
3998
.release = smu_stb_debugfs_release,
3999
.llseek = default_llseek,
4000
};
4001
4002
#endif
4003
4004
void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
4005
{
4006
#if defined(CONFIG_DEBUG_FS)
4007
4008
struct smu_context *smu = adev->powerplay.pp_handle;
4009
4010
if (!smu || (!smu->stb_context.stb_buf_size))
4011
return;
4012
4013
debugfs_create_file_size("amdgpu_smu_stb_dump",
4014
S_IRUSR,
4015
adev_to_drm(adev)->primary->debugfs_root,
4016
adev,
4017
&smu_stb_debugfs_fops,
4018
smu->stb_context.stb_buf_size);
4019
#endif
4020
}
4021
4022
int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
4023
{
4024
int ret = 0;
4025
4026
if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
4027
ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
4028
4029
return ret;
4030
}
4031
4032
int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
4033
{
4034
int ret = 0;
4035
4036
if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
4037
ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
4038
4039
return ret;
4040
}
4041
4042
int smu_send_rma_reason(struct smu_context *smu)
4043
{
4044
int ret = 0;
4045
4046
if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
4047
ret = smu->ppt_funcs->send_rma_reason(smu);
4048
4049
return ret;
4050
}
4051
4052
/**
4053
* smu_reset_sdma_is_supported - Check if SDMA reset is supported by SMU
4054
* @smu: smu_context pointer
4055
*
4056
* This function checks if the SMU supports resetting the SDMA engine.
4057
* It returns true if supported, false otherwise.
4058
*/
4059
bool smu_reset_sdma_is_supported(struct smu_context *smu)
4060
{
4061
bool ret = false;
4062
4063
if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported)
4064
ret = smu->ppt_funcs->reset_sdma_is_supported(smu);
4065
4066
return ret;
4067
}
4068
4069
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
4070
{
4071
int ret = 0;
4072
4073
if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma)
4074
ret = smu->ppt_funcs->reset_sdma(smu, inst_mask);
4075
4076
return ret;
4077
}
4078
4079
int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
4080
{
4081
if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
4082
smu->ppt_funcs->dpm_reset_vcn(smu, inst_mask);
4083
4084
return 0;
4085
}
4086
4087