Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
51654 views
1
/*
2
* Copyright 2015 Advanced Micro Devices, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*
22
*/
23
#include <linux/types.h>
24
#include <linux/kernel.h>
25
#include <linux/gfp.h>
26
#include <linux/slab.h>
27
#include <linux/firmware.h>
28
#include <linux/reboot.h>
29
#include "amd_shared.h"
30
#include "power_state.h"
31
#include "amdgpu.h"
32
#include "hwmgr.h"
33
#include "amdgpu_dpm_internal.h"
34
35
static const struct amd_pm_funcs pp_dpm_funcs;
36
37
static int amd_powerplay_create(struct amdgpu_device *adev)
38
{
39
struct pp_hwmgr *hwmgr;
40
41
if (adev == NULL)
42
return -EINVAL;
43
44
hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
45
if (hwmgr == NULL)
46
return -ENOMEM;
47
48
hwmgr->adev = adev;
49
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
50
hwmgr->device = amdgpu_cgs_create_device(adev);
51
if (!hwmgr->device) {
52
kfree(hwmgr);
53
return -ENOMEM;
54
}
55
56
mutex_init(&hwmgr->msg_lock);
57
hwmgr->chip_family = adev->family;
58
hwmgr->chip_id = adev->asic_type;
59
hwmgr->feature_mask = adev->pm.pp_feature;
60
hwmgr->display_config = &adev->pm.pm_display_cfg;
61
adev->powerplay.pp_handle = hwmgr;
62
adev->powerplay.pp_funcs = &pp_dpm_funcs;
63
return 0;
64
}
65
66
67
static void amd_powerplay_destroy(struct amdgpu_device *adev)
68
{
69
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
70
71
mutex_destroy(&hwmgr->msg_lock);
72
73
kfree(hwmgr->hardcode_pp_table);
74
hwmgr->hardcode_pp_table = NULL;
75
76
kfree(hwmgr);
77
hwmgr = NULL;
78
}
79
80
static int pp_early_init(struct amdgpu_ip_block *ip_block)
81
{
82
int ret;
83
struct amdgpu_device *adev = ip_block->adev;
84
ret = amd_powerplay_create(adev);
85
86
if (ret != 0)
87
return ret;
88
89
ret = hwmgr_early_init(adev->powerplay.pp_handle);
90
if (ret)
91
return -EINVAL;
92
93
return 0;
94
}
95
96
static void pp_swctf_delayed_work_handler(struct work_struct *work)
97
{
98
struct pp_hwmgr *hwmgr =
99
container_of(work, struct pp_hwmgr, swctf_delayed_work.work);
100
struct amdgpu_device *adev = hwmgr->adev;
101
struct amdgpu_dpm_thermal *range =
102
&adev->pm.dpm.thermal;
103
uint32_t gpu_temperature, size = sizeof(gpu_temperature);
104
int ret;
105
106
/*
107
* If the hotspot/edge temperature is confirmed as below SW CTF setting point
108
* after the delay enforced, nothing will be done.
109
* Otherwise, a graceful shutdown will be performed to prevent further damage.
110
*/
111
if (range->sw_ctf_threshold &&
112
hwmgr->hwmgr_func->read_sensor) {
113
ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
114
AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
115
&gpu_temperature,
116
&size);
117
/*
118
* For some legacy ASICs, hotspot temperature retrieving might be not
119
* supported. Check the edge temperature instead then.
120
*/
121
if (ret == -EOPNOTSUPP)
122
ret = hwmgr->hwmgr_func->read_sensor(hwmgr,
123
AMDGPU_PP_SENSOR_EDGE_TEMP,
124
&gpu_temperature,
125
&size);
126
if (!ret && gpu_temperature / 1000 < range->sw_ctf_threshold)
127
return;
128
}
129
130
dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
131
dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
132
orderly_poweroff(true);
133
}
134
135
static int pp_sw_init(struct amdgpu_ip_block *ip_block)
136
{
137
struct amdgpu_device *adev = ip_block->adev;
138
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
139
int ret = 0;
140
141
ret = hwmgr_sw_init(hwmgr);
142
143
pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
144
145
if (!ret)
146
INIT_DELAYED_WORK(&hwmgr->swctf_delayed_work,
147
pp_swctf_delayed_work_handler);
148
149
return ret;
150
}
151
152
static int pp_sw_fini(struct amdgpu_ip_block *ip_block)
153
{
154
struct amdgpu_device *adev = ip_block->adev;
155
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
156
157
hwmgr_sw_fini(hwmgr);
158
159
amdgpu_ucode_release(&adev->pm.fw);
160
161
return 0;
162
}
163
164
static int pp_hw_init(struct amdgpu_ip_block *ip_block)
165
{
166
int ret = 0;
167
struct amdgpu_device *adev = ip_block->adev;
168
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
169
170
ret = hwmgr_hw_init(hwmgr);
171
172
if (ret)
173
pr_err("powerplay hw init failed\n");
174
175
return ret;
176
}
177
178
static int pp_hw_fini(struct amdgpu_ip_block *ip_block)
179
{
180
struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle;
181
182
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
183
184
hwmgr_hw_fini(hwmgr);
185
186
return 0;
187
}
188
189
static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
190
{
191
int r = -EINVAL;
192
void *cpu_ptr = NULL;
193
uint64_t gpu_addr;
194
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
195
196
if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
197
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
198
&adev->pm.smu_prv_buffer,
199
&gpu_addr,
200
&cpu_ptr)) {
201
drm_err(adev_to_drm(adev), "failed to create smu prv buffer\n");
202
return;
203
}
204
205
if (hwmgr->hwmgr_func->notify_cac_buffer_info)
206
r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
207
lower_32_bits((unsigned long)cpu_ptr),
208
upper_32_bits((unsigned long)cpu_ptr),
209
lower_32_bits(gpu_addr),
210
upper_32_bits(gpu_addr),
211
adev->pm.smu_prv_buffer_size);
212
213
if (r) {
214
amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
215
adev->pm.smu_prv_buffer = NULL;
216
drm_err(adev_to_drm(adev), "failed to notify SMU buffer address\n");
217
}
218
}
219
220
static int pp_late_init(struct amdgpu_ip_block *ip_block)
221
{
222
struct amdgpu_device *adev = ip_block->adev;
223
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
224
225
if (hwmgr && hwmgr->pm_en)
226
hwmgr_handle_task(hwmgr,
227
AMD_PP_TASK_COMPLETE_INIT, NULL);
228
if (adev->pm.smu_prv_buffer_size != 0)
229
pp_reserve_vram_for_smu(adev);
230
231
return 0;
232
}
233
234
static void pp_late_fini(struct amdgpu_ip_block *ip_block)
235
{
236
struct amdgpu_device *adev = ip_block->adev;
237
238
if (adev->pm.smu_prv_buffer)
239
amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
240
amd_powerplay_destroy(adev);
241
}
242
243
244
static bool pp_is_idle(struct amdgpu_ip_block *ip_block)
245
{
246
return false;
247
}
248
249
static int pp_set_powergating_state(struct amdgpu_ip_block *ip_block,
250
enum amd_powergating_state state)
251
{
252
return 0;
253
}
254
255
static int pp_suspend(struct amdgpu_ip_block *ip_block)
256
{
257
struct amdgpu_device *adev = ip_block->adev;
258
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
259
260
cancel_delayed_work_sync(&hwmgr->swctf_delayed_work);
261
262
return hwmgr_suspend(hwmgr);
263
}
264
265
static int pp_resume(struct amdgpu_ip_block *ip_block)
266
{
267
struct pp_hwmgr *hwmgr = ip_block->adev->powerplay.pp_handle;
268
269
return hwmgr_resume(hwmgr);
270
}
271
272
static int pp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
273
enum amd_clockgating_state state)
274
{
275
return 0;
276
}
277
278
static const struct amd_ip_funcs pp_ip_funcs = {
279
.name = "powerplay",
280
.early_init = pp_early_init,
281
.late_init = pp_late_init,
282
.sw_init = pp_sw_init,
283
.sw_fini = pp_sw_fini,
284
.hw_init = pp_hw_init,
285
.hw_fini = pp_hw_fini,
286
.late_fini = pp_late_fini,
287
.suspend = pp_suspend,
288
.resume = pp_resume,
289
.is_idle = pp_is_idle,
290
.set_clockgating_state = pp_set_clockgating_state,
291
.set_powergating_state = pp_set_powergating_state,
292
};
293
294
const struct amdgpu_ip_block_version pp_smu_ip_block =
295
{
296
.type = AMD_IP_BLOCK_TYPE_SMC,
297
.major = 1,
298
.minor = 0,
299
.rev = 0,
300
.funcs = &pp_ip_funcs,
301
};
302
303
/* This interface only be supported On Vi,
304
* because only smu7/8 can help to load gfx/sdma fw,
305
* smu need to be enabled before load other ip's fw.
306
* so call start smu to load smu7 fw and other ip's fw
307
*/
308
static int pp_dpm_load_fw(void *handle)
309
{
310
struct pp_hwmgr *hwmgr = handle;
311
312
if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
313
return -EINVAL;
314
315
if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
316
pr_err("fw load failed\n");
317
return -EINVAL;
318
}
319
320
return 0;
321
}
322
323
static int pp_dpm_fw_loading_complete(void *handle)
324
{
325
return 0;
326
}
327
328
static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
329
{
330
struct pp_hwmgr *hwmgr = handle;
331
332
if (!hwmgr || !hwmgr->pm_en)
333
return -EINVAL;
334
335
if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
336
pr_info_ratelimited("%s was not implemented.\n", __func__);
337
return 0;
338
}
339
340
return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
341
}
342
343
static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
344
enum amd_dpm_forced_level *level)
345
{
346
uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
347
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
348
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
349
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
350
351
if (!(hwmgr->dpm_level & profile_mode_mask)) {
352
/* enter umd pstate, save current level, disable gfx cg*/
353
if (*level & profile_mode_mask) {
354
hwmgr->saved_dpm_level = hwmgr->dpm_level;
355
hwmgr->en_umd_pstate = true;
356
}
357
} else {
358
/* exit umd pstate, restore level, enable gfx cg*/
359
if (!(*level & profile_mode_mask)) {
360
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
361
*level = hwmgr->saved_dpm_level;
362
hwmgr->en_umd_pstate = false;
363
}
364
}
365
}
366
367
static int pp_dpm_force_performance_level(void *handle,
368
enum amd_dpm_forced_level level)
369
{
370
struct pp_hwmgr *hwmgr = handle;
371
372
if (!hwmgr || !hwmgr->pm_en)
373
return -EINVAL;
374
375
if (level == hwmgr->dpm_level)
376
return 0;
377
378
pp_dpm_en_umd_pstate(hwmgr, &level);
379
hwmgr->request_dpm_level = level;
380
hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
381
382
return 0;
383
}
384
385
static enum amd_dpm_forced_level pp_dpm_get_performance_level(
386
void *handle)
387
{
388
struct pp_hwmgr *hwmgr = handle;
389
390
if (!hwmgr || !hwmgr->pm_en)
391
return -EINVAL;
392
393
return hwmgr->dpm_level;
394
}
395
396
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
397
{
398
struct pp_hwmgr *hwmgr = handle;
399
400
if (!hwmgr || !hwmgr->pm_en)
401
return 0;
402
403
if (hwmgr->hwmgr_func->get_sclk == NULL) {
404
pr_info_ratelimited("%s was not implemented.\n", __func__);
405
return 0;
406
}
407
return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
408
}
409
410
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
411
{
412
struct pp_hwmgr *hwmgr = handle;
413
414
if (!hwmgr || !hwmgr->pm_en)
415
return 0;
416
417
if (hwmgr->hwmgr_func->get_mclk == NULL) {
418
pr_info_ratelimited("%s was not implemented.\n", __func__);
419
return 0;
420
}
421
return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
422
}
423
424
static void pp_dpm_powergate_vce(void *handle, bool gate)
425
{
426
struct pp_hwmgr *hwmgr = handle;
427
428
if (!hwmgr || !hwmgr->pm_en)
429
return;
430
431
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
432
pr_info_ratelimited("%s was not implemented.\n", __func__);
433
return;
434
}
435
hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
436
}
437
438
static void pp_dpm_powergate_uvd(void *handle, bool gate)
439
{
440
struct pp_hwmgr *hwmgr = handle;
441
442
if (!hwmgr || !hwmgr->pm_en)
443
return;
444
445
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
446
pr_info_ratelimited("%s was not implemented.\n", __func__);
447
return;
448
}
449
hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
450
}
451
452
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
453
enum amd_pm_state_type *user_state)
454
{
455
struct pp_hwmgr *hwmgr = handle;
456
457
if (!hwmgr || !hwmgr->pm_en)
458
return -EINVAL;
459
460
return hwmgr_handle_task(hwmgr, task_id, user_state);
461
}
462
463
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
464
{
465
struct pp_hwmgr *hwmgr = handle;
466
struct pp_power_state *state;
467
enum amd_pm_state_type pm_type;
468
469
if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
470
return -EINVAL;
471
472
state = hwmgr->current_ps;
473
474
switch (state->classification.ui_label) {
475
case PP_StateUILabel_Battery:
476
pm_type = POWER_STATE_TYPE_BATTERY;
477
break;
478
case PP_StateUILabel_Balanced:
479
pm_type = POWER_STATE_TYPE_BALANCED;
480
break;
481
case PP_StateUILabel_Performance:
482
pm_type = POWER_STATE_TYPE_PERFORMANCE;
483
break;
484
default:
485
if (state->classification.flags & PP_StateClassificationFlag_Boot)
486
pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
487
else
488
pm_type = POWER_STATE_TYPE_DEFAULT;
489
break;
490
}
491
492
return pm_type;
493
}
494
495
static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
496
{
497
struct pp_hwmgr *hwmgr = handle;
498
499
if (!hwmgr || !hwmgr->pm_en)
500
return -EOPNOTSUPP;
501
502
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
503
return -EOPNOTSUPP;
504
505
if (mode == U32_MAX)
506
return -EINVAL;
507
508
hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
509
510
return 0;
511
}
512
513
static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
514
{
515
struct pp_hwmgr *hwmgr = handle;
516
517
if (!hwmgr || !hwmgr->pm_en)
518
return -EOPNOTSUPP;
519
520
if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
521
return -EOPNOTSUPP;
522
523
if (!fan_mode)
524
return -EINVAL;
525
526
*fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
527
return 0;
528
}
529
530
static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
531
{
532
struct pp_hwmgr *hwmgr = handle;
533
534
if (!hwmgr || !hwmgr->pm_en)
535
return -EOPNOTSUPP;
536
537
if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
538
return -EOPNOTSUPP;
539
540
if (speed == U32_MAX)
541
return -EINVAL;
542
543
return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
544
}
545
546
static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
547
{
548
struct pp_hwmgr *hwmgr = handle;
549
550
if (!hwmgr || !hwmgr->pm_en)
551
return -EOPNOTSUPP;
552
553
if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
554
return -EOPNOTSUPP;
555
556
if (!speed)
557
return -EINVAL;
558
559
return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
560
}
561
562
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
563
{
564
struct pp_hwmgr *hwmgr = handle;
565
566
if (!hwmgr || !hwmgr->pm_en)
567
return -EOPNOTSUPP;
568
569
if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
570
return -EOPNOTSUPP;
571
572
if (!rpm)
573
return -EINVAL;
574
575
return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
576
}
577
578
static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
579
{
580
struct pp_hwmgr *hwmgr = handle;
581
582
if (!hwmgr || !hwmgr->pm_en)
583
return -EOPNOTSUPP;
584
585
if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
586
return -EOPNOTSUPP;
587
588
if (rpm == U32_MAX)
589
return -EINVAL;
590
591
return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
592
}
593
594
static int pp_dpm_get_pp_num_states(void *handle,
595
struct pp_states_info *data)
596
{
597
struct pp_hwmgr *hwmgr = handle;
598
int i;
599
600
memset(data, 0, sizeof(*data));
601
602
if (!hwmgr || !hwmgr->pm_en || !hwmgr->ps)
603
return -EINVAL;
604
605
data->nums = hwmgr->num_ps;
606
607
for (i = 0; i < hwmgr->num_ps; i++) {
608
struct pp_power_state *state = (struct pp_power_state *)
609
((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
610
switch (state->classification.ui_label) {
611
case PP_StateUILabel_Battery:
612
data->states[i] = POWER_STATE_TYPE_BATTERY;
613
break;
614
case PP_StateUILabel_Balanced:
615
data->states[i] = POWER_STATE_TYPE_BALANCED;
616
break;
617
case PP_StateUILabel_Performance:
618
data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
619
break;
620
default:
621
if (state->classification.flags & PP_StateClassificationFlag_Boot)
622
data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
623
else
624
data->states[i] = POWER_STATE_TYPE_DEFAULT;
625
}
626
}
627
return 0;
628
}
629
630
static int pp_dpm_get_pp_table(void *handle, char **table)
631
{
632
struct pp_hwmgr *hwmgr = handle;
633
634
if (!hwmgr || !hwmgr->pm_en || !table)
635
return -EINVAL;
636
637
if (!hwmgr->soft_pp_table)
638
return -EOPNOTSUPP;
639
640
*table = (char *)hwmgr->soft_pp_table;
641
return hwmgr->soft_pp_table_size;
642
}
643
644
static int amd_powerplay_reset(void *handle)
645
{
646
struct pp_hwmgr *hwmgr = handle;
647
int ret;
648
649
ret = hwmgr_hw_fini(hwmgr);
650
if (ret)
651
return ret;
652
653
ret = hwmgr_hw_init(hwmgr);
654
if (ret)
655
return ret;
656
657
return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
658
}
659
660
static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
661
{
662
struct pp_hwmgr *hwmgr = handle;
663
int ret = -ENOMEM;
664
665
if (!hwmgr || !hwmgr->pm_en)
666
return -EINVAL;
667
668
if (!hwmgr->hardcode_pp_table) {
669
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
670
hwmgr->soft_pp_table_size,
671
GFP_KERNEL);
672
if (!hwmgr->hardcode_pp_table)
673
return ret;
674
}
675
676
memcpy(hwmgr->hardcode_pp_table, buf, size);
677
678
hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
679
680
ret = amd_powerplay_reset(handle);
681
if (ret)
682
return ret;
683
684
if (hwmgr->hwmgr_func->avfs_control)
685
ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
686
687
return ret;
688
}
689
690
static int pp_dpm_force_clock_level(void *handle,
691
enum pp_clock_type type, uint32_t mask)
692
{
693
struct pp_hwmgr *hwmgr = handle;
694
695
if (!hwmgr || !hwmgr->pm_en)
696
return -EINVAL;
697
698
if (hwmgr->hwmgr_func->force_clock_level == NULL) {
699
pr_info_ratelimited("%s was not implemented.\n", __func__);
700
return 0;
701
}
702
703
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
704
pr_debug("force clock level is for dpm manual mode only.\n");
705
return -EINVAL;
706
}
707
708
return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
709
}
710
711
static int pp_dpm_emit_clock_levels(void *handle,
712
enum pp_clock_type type,
713
char *buf,
714
int *offset)
715
{
716
struct pp_hwmgr *hwmgr = handle;
717
718
if (!hwmgr || !hwmgr->pm_en)
719
return -EOPNOTSUPP;
720
721
if (!hwmgr->hwmgr_func->emit_clock_levels)
722
return -ENOENT;
723
724
return hwmgr->hwmgr_func->emit_clock_levels(hwmgr, type, buf, offset);
725
}
726
727
static int pp_dpm_get_sclk_od(void *handle)
728
{
729
struct pp_hwmgr *hwmgr = handle;
730
731
if (!hwmgr || !hwmgr->pm_en)
732
return -EINVAL;
733
734
if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
735
pr_info_ratelimited("%s was not implemented.\n", __func__);
736
return 0;
737
}
738
return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
739
}
740
741
static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
742
{
743
struct pp_hwmgr *hwmgr = handle;
744
745
if (!hwmgr || !hwmgr->pm_en)
746
return -EINVAL;
747
748
if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
749
pr_info_ratelimited("%s was not implemented.\n", __func__);
750
return 0;
751
}
752
753
return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
754
}
755
756
static int pp_dpm_get_mclk_od(void *handle)
757
{
758
struct pp_hwmgr *hwmgr = handle;
759
760
if (!hwmgr || !hwmgr->pm_en)
761
return -EINVAL;
762
763
if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
764
pr_info_ratelimited("%s was not implemented.\n", __func__);
765
return 0;
766
}
767
return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
768
}
769
770
static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
771
{
772
struct pp_hwmgr *hwmgr = handle;
773
774
if (!hwmgr || !hwmgr->pm_en)
775
return -EINVAL;
776
777
if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
778
pr_info_ratelimited("%s was not implemented.\n", __func__);
779
return 0;
780
}
781
return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
782
}
783
784
static int pp_dpm_read_sensor(void *handle, int idx,
785
void *value, int *size)
786
{
787
struct pp_hwmgr *hwmgr = handle;
788
789
if (!hwmgr || !hwmgr->pm_en || !value)
790
return -EINVAL;
791
792
switch (idx) {
793
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
794
*((uint32_t *)value) = hwmgr->pstate_sclk * 100;
795
return 0;
796
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
797
*((uint32_t *)value) = hwmgr->pstate_mclk * 100;
798
return 0;
799
case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
800
*((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
801
return 0;
802
case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
803
*((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
804
return 0;
805
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
806
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
807
return 0;
808
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
809
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
810
return 0;
811
default:
812
return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
813
}
814
}
815
816
static struct amd_vce_state*
817
pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
818
{
819
struct pp_hwmgr *hwmgr = handle;
820
821
if (!hwmgr || !hwmgr->pm_en)
822
return NULL;
823
824
if (idx < hwmgr->num_vce_state_tables)
825
return &hwmgr->vce_states[idx];
826
return NULL;
827
}
828
829
static int pp_get_power_profile_mode(void *handle, char *buf)
830
{
831
struct pp_hwmgr *hwmgr = handle;
832
833
if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
834
return -EOPNOTSUPP;
835
if (!buf)
836
return -EINVAL;
837
838
return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
839
}
840
841
static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
842
{
843
struct pp_hwmgr *hwmgr = handle;
844
845
if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
846
return -EOPNOTSUPP;
847
848
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
849
pr_debug("power profile setting is for manual dpm mode only.\n");
850
return -EINVAL;
851
}
852
853
return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
854
}
855
856
static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
857
{
858
struct pp_hwmgr *hwmgr = handle;
859
860
if (!hwmgr || !hwmgr->pm_en)
861
return -EINVAL;
862
863
if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
864
return 0;
865
866
return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
867
}
868
869
static int pp_odn_edit_dpm_table(void *handle, enum PP_OD_DPM_TABLE_COMMAND type,
870
long *input, uint32_t size)
871
{
872
struct pp_hwmgr *hwmgr = handle;
873
874
if (!hwmgr || !hwmgr->pm_en)
875
return -EINVAL;
876
877
if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
878
pr_info_ratelimited("%s was not implemented.\n", __func__);
879
return 0;
880
}
881
882
return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
883
}
884
885
static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
886
{
887
struct pp_hwmgr *hwmgr = handle;
888
889
if (!hwmgr)
890
return -EINVAL;
891
892
if (!hwmgr->pm_en)
893
return 0;
894
895
if (hwmgr->hwmgr_func->set_mp1_state)
896
return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
897
898
return 0;
899
}
900
901
static int pp_dpm_switch_power_profile(void *handle,
902
enum PP_SMC_POWER_PROFILE type, bool en)
903
{
904
struct pp_hwmgr *hwmgr = handle;
905
long workload[1];
906
uint32_t index;
907
908
if (!hwmgr || !hwmgr->pm_en)
909
return -EINVAL;
910
911
if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
912
pr_info_ratelimited("%s was not implemented.\n", __func__);
913
return -EINVAL;
914
}
915
916
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
917
return -EINVAL;
918
919
if (!en) {
920
hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
921
index = fls(hwmgr->workload_mask);
922
index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
923
workload[0] = hwmgr->workload_setting[index];
924
} else {
925
hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
926
index = fls(hwmgr->workload_mask);
927
index = index <= Workload_Policy_Max ? index - 1 : 0;
928
workload[0] = hwmgr->workload_setting[index];
929
}
930
931
if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
932
hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
933
if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
934
return -EINVAL;
935
}
936
937
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
938
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
939
940
return 0;
941
}
942
943
static int pp_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)
944
{
945
struct pp_hwmgr *hwmgr = handle;
946
uint32_t max_power_limit;
947
948
if (!hwmgr || !hwmgr->pm_en)
949
return -EINVAL;
950
951
if (hwmgr->hwmgr_func->set_power_limit == NULL) {
952
pr_info_ratelimited("%s was not implemented.\n", __func__);
953
return -EINVAL;
954
}
955
956
if (limit == 0)
957
limit = hwmgr->default_power_limit;
958
959
max_power_limit = hwmgr->default_power_limit;
960
if (hwmgr->od_enabled) {
961
max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
962
max_power_limit /= 100;
963
}
964
965
if (limit > max_power_limit)
966
return -EINVAL;
967
968
hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
969
hwmgr->power_limit = limit;
970
return 0;
971
}
972
973
static int pp_get_power_limit(void *handle, uint32_t *limit,
974
enum pp_power_limit_level pp_limit_level,
975
enum pp_power_type power_type)
976
{
977
struct pp_hwmgr *hwmgr = handle;
978
int ret = 0;
979
980
if (!hwmgr || !hwmgr->pm_en || !limit)
981
return -EINVAL;
982
983
if (power_type != PP_PWR_TYPE_SUSTAINED)
984
return -EOPNOTSUPP;
985
986
switch (pp_limit_level) {
987
case PP_PWR_LIMIT_CURRENT:
988
*limit = hwmgr->power_limit;
989
break;
990
case PP_PWR_LIMIT_DEFAULT:
991
*limit = hwmgr->default_power_limit;
992
break;
993
case PP_PWR_LIMIT_MAX:
994
*limit = hwmgr->default_power_limit;
995
if (hwmgr->od_enabled) {
996
*limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
997
*limit /= 100;
998
}
999
break;
1000
case PP_PWR_LIMIT_MIN:
1001
*limit = 0;
1002
break;
1003
default:
1004
ret = -EOPNOTSUPP;
1005
break;
1006
}
1007
1008
return ret;
1009
}
1010
1011
static int pp_display_configuration_change(void *handle,
1012
const struct amd_pp_display_configuration *display_config)
1013
{
1014
struct pp_hwmgr *hwmgr = handle;
1015
1016
if (!hwmgr || !hwmgr->pm_en)
1017
return -EINVAL;
1018
1019
phm_store_dal_configuration_data(hwmgr, display_config);
1020
return 0;
1021
}
1022
1023
static int pp_get_display_power_level(void *handle,
1024
struct amd_pp_simple_clock_info *output)
1025
{
1026
struct pp_hwmgr *hwmgr = handle;
1027
1028
if (!hwmgr || !hwmgr->pm_en || !output)
1029
return -EINVAL;
1030
1031
return phm_get_dal_power_level(hwmgr, output);
1032
}
1033
1034
static int pp_get_current_clocks(void *handle,
1035
struct amd_pp_clock_info *clocks)
1036
{
1037
struct amd_pp_simple_clock_info simple_clocks = { 0 };
1038
struct pp_clock_info hw_clocks;
1039
struct pp_hwmgr *hwmgr = handle;
1040
int ret = 0;
1041
1042
if (!hwmgr || !hwmgr->pm_en)
1043
return -EINVAL;
1044
1045
phm_get_dal_power_level(hwmgr, &simple_clocks);
1046
1047
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1048
PHM_PlatformCaps_PowerContainment))
1049
ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1050
&hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1051
else
1052
ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1053
&hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1054
1055
if (ret) {
1056
drm_err(adev_to_drm(hwmgr->adev),
1057
"Error in phm_get_clock_info\n");
1058
return -EINVAL;
1059
}
1060
1061
clocks->min_engine_clock = hw_clocks.min_eng_clk;
1062
clocks->max_engine_clock = hw_clocks.max_eng_clk;
1063
clocks->min_memory_clock = hw_clocks.min_mem_clk;
1064
clocks->max_memory_clock = hw_clocks.max_mem_clk;
1065
clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1066
clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1067
1068
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1069
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1070
1071
if (simple_clocks.level == 0)
1072
clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1073
else
1074
clocks->max_clocks_state = simple_clocks.level;
1075
1076
if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1077
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1078
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1079
}
1080
return 0;
1081
}
1082
1083
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1084
{
1085
struct pp_hwmgr *hwmgr = handle;
1086
1087
if (!hwmgr || !hwmgr->pm_en)
1088
return -EINVAL;
1089
1090
if (clocks == NULL)
1091
return -EINVAL;
1092
1093
return phm_get_clock_by_type(hwmgr, type, clocks);
1094
}
1095
1096
static int pp_get_clock_by_type_with_latency(void *handle,
1097
enum amd_pp_clock_type type,
1098
struct pp_clock_levels_with_latency *clocks)
1099
{
1100
struct pp_hwmgr *hwmgr = handle;
1101
1102
if (!hwmgr || !hwmgr->pm_en || !clocks)
1103
return -EINVAL;
1104
1105
return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1106
}
1107
1108
static int pp_get_clock_by_type_with_voltage(void *handle,
1109
enum amd_pp_clock_type type,
1110
struct pp_clock_levels_with_voltage *clocks)
1111
{
1112
struct pp_hwmgr *hwmgr = handle;
1113
1114
if (!hwmgr || !hwmgr->pm_en || !clocks)
1115
return -EINVAL;
1116
1117
return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1118
}
1119
1120
static int pp_set_watermarks_for_clocks_ranges(void *handle,
1121
void *clock_ranges)
1122
{
1123
struct pp_hwmgr *hwmgr = handle;
1124
1125
if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1126
return -EINVAL;
1127
1128
return phm_set_watermarks_for_clocks_ranges(hwmgr,
1129
clock_ranges);
1130
}
1131
1132
static int pp_display_clock_voltage_request(void *handle,
1133
struct pp_display_clock_request *clock)
1134
{
1135
struct pp_hwmgr *hwmgr = handle;
1136
1137
if (!hwmgr || !hwmgr->pm_en || !clock)
1138
return -EINVAL;
1139
1140
return phm_display_clock_voltage_request(hwmgr, clock);
1141
}
1142
1143
static int pp_get_display_mode_validation_clocks(void *handle,
1144
struct amd_pp_simple_clock_info *clocks)
1145
{
1146
struct pp_hwmgr *hwmgr = handle;
1147
int ret = 0;
1148
1149
if (!hwmgr || !hwmgr->pm_en || !clocks)
1150
return -EINVAL;
1151
1152
clocks->level = PP_DAL_POWERLEVEL_7;
1153
1154
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1155
ret = phm_get_max_high_clocks(hwmgr, clocks);
1156
1157
return ret;
1158
}
1159
1160
static int pp_dpm_powergate_mmhub(void *handle)
1161
{
1162
struct pp_hwmgr *hwmgr = handle;
1163
1164
if (!hwmgr || !hwmgr->pm_en)
1165
return -EINVAL;
1166
1167
if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1168
pr_info_ratelimited("%s was not implemented.\n", __func__);
1169
return 0;
1170
}
1171
1172
return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1173
}
1174
1175
static int pp_dpm_powergate_gfx(void *handle, bool gate)
1176
{
1177
struct pp_hwmgr *hwmgr = handle;
1178
1179
if (!hwmgr || !hwmgr->pm_en)
1180
return 0;
1181
1182
if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1183
pr_info_ratelimited("%s was not implemented.\n", __func__);
1184
return 0;
1185
}
1186
1187
return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1188
}
1189
1190
static void pp_dpm_powergate_acp(void *handle, bool gate)
1191
{
1192
struct pp_hwmgr *hwmgr = handle;
1193
1194
if (!hwmgr || !hwmgr->pm_en)
1195
return;
1196
1197
if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1198
pr_info_ratelimited("%s was not implemented.\n", __func__);
1199
return;
1200
}
1201
1202
hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1203
}
1204
1205
static void pp_dpm_powergate_sdma(void *handle, bool gate)
1206
{
1207
struct pp_hwmgr *hwmgr = handle;
1208
1209
if (!hwmgr)
1210
return;
1211
1212
if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1213
pr_info_ratelimited("%s was not implemented.\n", __func__);
1214
return;
1215
}
1216
1217
hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1218
}
1219
1220
static int pp_set_powergating_by_smu(void *handle,
1221
uint32_t block_type,
1222
bool gate,
1223
int inst)
1224
{
1225
int ret = 0;
1226
1227
switch (block_type) {
1228
case AMD_IP_BLOCK_TYPE_UVD:
1229
case AMD_IP_BLOCK_TYPE_VCN:
1230
pp_dpm_powergate_uvd(handle, gate);
1231
break;
1232
case AMD_IP_BLOCK_TYPE_VCE:
1233
pp_dpm_powergate_vce(handle, gate);
1234
break;
1235
case AMD_IP_BLOCK_TYPE_GMC:
1236
/*
1237
* For now, this is only used on PICASSO.
1238
* And only "gate" operation is supported.
1239
*/
1240
if (gate)
1241
pp_dpm_powergate_mmhub(handle);
1242
break;
1243
case AMD_IP_BLOCK_TYPE_GFX:
1244
ret = pp_dpm_powergate_gfx(handle, gate);
1245
break;
1246
case AMD_IP_BLOCK_TYPE_ACP:
1247
pp_dpm_powergate_acp(handle, gate);
1248
break;
1249
case AMD_IP_BLOCK_TYPE_SDMA:
1250
pp_dpm_powergate_sdma(handle, gate);
1251
break;
1252
default:
1253
break;
1254
}
1255
return ret;
1256
}
1257
1258
static int pp_notify_smu_enable_pwe(void *handle)
1259
{
1260
struct pp_hwmgr *hwmgr = handle;
1261
1262
if (!hwmgr || !hwmgr->pm_en)
1263
return -EINVAL;
1264
1265
if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1266
pr_info_ratelimited("%s was not implemented.\n", __func__);
1267
return -EINVAL;
1268
}
1269
1270
hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1271
1272
return 0;
1273
}
1274
1275
static int pp_enable_mgpu_fan_boost(void *handle)
1276
{
1277
struct pp_hwmgr *hwmgr = handle;
1278
1279
if (!hwmgr)
1280
return -EINVAL;
1281
1282
if (!hwmgr->pm_en ||
1283
hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1284
return 0;
1285
1286
hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1287
1288
return 0;
1289
}
1290
1291
static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1292
{
1293
struct pp_hwmgr *hwmgr = handle;
1294
1295
if (!hwmgr || !hwmgr->pm_en)
1296
return -EINVAL;
1297
1298
if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1299
pr_debug("%s was not implemented.\n", __func__);
1300
return -EINVAL;
1301
}
1302
1303
hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1304
1305
return 0;
1306
}
1307
1308
static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1309
{
1310
struct pp_hwmgr *hwmgr = handle;
1311
1312
if (!hwmgr || !hwmgr->pm_en)
1313
return -EINVAL;
1314
1315
if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1316
pr_debug("%s was not implemented.\n", __func__);
1317
return -EINVAL;
1318
}
1319
1320
hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1321
1322
return 0;
1323
}
1324
1325
static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1326
{
1327
struct pp_hwmgr *hwmgr = handle;
1328
1329
if (!hwmgr || !hwmgr->pm_en)
1330
return -EINVAL;
1331
1332
if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1333
pr_debug("%s was not implemented.\n", __func__);
1334
return -EINVAL;
1335
}
1336
1337
hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1338
1339
return 0;
1340
}
1341
1342
static int pp_set_active_display_count(void *handle, uint32_t count)
1343
{
1344
struct pp_hwmgr *hwmgr = handle;
1345
1346
if (!hwmgr || !hwmgr->pm_en)
1347
return -EINVAL;
1348
1349
return phm_set_active_display_count(hwmgr, count);
1350
}
1351
1352
static int pp_get_asic_baco_capability(void *handle)
1353
{
1354
struct pp_hwmgr *hwmgr = handle;
1355
1356
if (!hwmgr)
1357
return false;
1358
1359
if (!(hwmgr->not_vf && amdgpu_dpm) ||
1360
!hwmgr->hwmgr_func->get_bamaco_support)
1361
return false;
1362
1363
return hwmgr->hwmgr_func->get_bamaco_support(hwmgr);
1364
}
1365
1366
static int pp_get_asic_baco_state(void *handle, int *state)
1367
{
1368
struct pp_hwmgr *hwmgr = handle;
1369
1370
if (!hwmgr)
1371
return -EINVAL;
1372
1373
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1374
return 0;
1375
1376
hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1377
1378
return 0;
1379
}
1380
1381
static int pp_set_asic_baco_state(void *handle, int state)
1382
{
1383
struct pp_hwmgr *hwmgr = handle;
1384
1385
if (!hwmgr)
1386
return -EINVAL;
1387
1388
if (!(hwmgr->not_vf && amdgpu_dpm) ||
1389
!hwmgr->hwmgr_func->set_asic_baco_state)
1390
return 0;
1391
1392
hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1393
1394
return 0;
1395
}
1396
1397
static int pp_get_ppfeature_status(void *handle, char *buf)
1398
{
1399
struct pp_hwmgr *hwmgr = handle;
1400
1401
if (!hwmgr || !hwmgr->pm_en || !buf)
1402
return -EINVAL;
1403
1404
if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1405
pr_info_ratelimited("%s was not implemented.\n", __func__);
1406
return -EINVAL;
1407
}
1408
1409
return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1410
}
1411
1412
static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1413
{
1414
struct pp_hwmgr *hwmgr = handle;
1415
1416
if (!hwmgr || !hwmgr->pm_en)
1417
return -EINVAL;
1418
1419
if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1420
pr_info_ratelimited("%s was not implemented.\n", __func__);
1421
return -EINVAL;
1422
}
1423
1424
return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1425
}
1426
1427
static int pp_asic_reset_mode_2(void *handle)
1428
{
1429
struct pp_hwmgr *hwmgr = handle;
1430
1431
if (!hwmgr || !hwmgr->pm_en)
1432
return -EINVAL;
1433
1434
if (hwmgr->hwmgr_func->asic_reset == NULL) {
1435
pr_info_ratelimited("%s was not implemented.\n", __func__);
1436
return -EINVAL;
1437
}
1438
1439
return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1440
}
1441
1442
static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1443
{
1444
struct pp_hwmgr *hwmgr = handle;
1445
1446
if (!hwmgr || !hwmgr->pm_en)
1447
return -EINVAL;
1448
1449
if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1450
pr_info_ratelimited("%s was not implemented.\n", __func__);
1451
return -EINVAL;
1452
}
1453
1454
return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1455
}
1456
1457
static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1458
{
1459
struct pp_hwmgr *hwmgr = handle;
1460
1461
if (!hwmgr)
1462
return -EINVAL;
1463
1464
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1465
return 0;
1466
1467
hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1468
1469
return 0;
1470
}
1471
1472
static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1473
{
1474
struct pp_hwmgr *hwmgr = handle;
1475
1476
if (!hwmgr)
1477
return -EINVAL;
1478
1479
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1480
return 0;
1481
1482
hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1483
1484
return 0;
1485
}
1486
1487
static ssize_t pp_get_gpu_metrics(void *handle, void **table)
1488
{
1489
struct pp_hwmgr *hwmgr = handle;
1490
1491
if (!hwmgr)
1492
return -EINVAL;
1493
1494
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
1495
return -EOPNOTSUPP;
1496
1497
return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
1498
}
1499
1500
static int pp_gfx_state_change_set(void *handle, uint32_t state)
1501
{
1502
struct pp_hwmgr *hwmgr = handle;
1503
1504
if (!hwmgr || !hwmgr->pm_en)
1505
return -EINVAL;
1506
1507
if (hwmgr->hwmgr_func->gfx_state_change == NULL) {
1508
pr_info_ratelimited("%s was not implemented.\n", __func__);
1509
return -EINVAL;
1510
}
1511
1512
hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
1513
return 0;
1514
}
1515
1516
static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
1517
{
1518
struct pp_hwmgr *hwmgr = handle;
1519
struct amdgpu_device *adev = hwmgr->adev;
1520
int err;
1521
1522
if (!addr || !size)
1523
return -EINVAL;
1524
1525
*addr = NULL;
1526
*size = 0;
1527
if (adev->pm.smu_prv_buffer) {
1528
err = amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
1529
if (err)
1530
return err;
1531
*size = adev->pm.smu_prv_buffer_size;
1532
}
1533
1534
return 0;
1535
}
1536
1537
static void pp_pm_compute_clocks(void *handle)
1538
{
1539
struct pp_hwmgr *hwmgr = handle;
1540
struct amdgpu_device *adev = hwmgr->adev;
1541
1542
if (!adev->dc_enabled) {
1543
amdgpu_dpm_get_display_cfg(adev);
1544
pp_display_configuration_change(handle,
1545
&adev->pm.pm_display_cfg);
1546
}
1547
1548
pp_dpm_dispatch_tasks(handle,
1549
AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
1550
NULL);
1551
}
1552
1553
static const struct amd_pm_funcs pp_dpm_funcs = {
1554
.load_firmware = pp_dpm_load_fw,
1555
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1556
.force_performance_level = pp_dpm_force_performance_level,
1557
.get_performance_level = pp_dpm_get_performance_level,
1558
.get_current_power_state = pp_dpm_get_current_power_state,
1559
.dispatch_tasks = pp_dpm_dispatch_tasks,
1560
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
1561
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
1562
.set_fan_speed_pwm = pp_dpm_set_fan_speed_pwm,
1563
.get_fan_speed_pwm = pp_dpm_get_fan_speed_pwm,
1564
.get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1565
.set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1566
.get_pp_num_states = pp_dpm_get_pp_num_states,
1567
.get_pp_table = pp_dpm_get_pp_table,
1568
.set_pp_table = pp_dpm_set_pp_table,
1569
.force_clock_level = pp_dpm_force_clock_level,
1570
.emit_clock_levels = pp_dpm_emit_clock_levels,
1571
.get_sclk_od = pp_dpm_get_sclk_od,
1572
.set_sclk_od = pp_dpm_set_sclk_od,
1573
.get_mclk_od = pp_dpm_get_mclk_od,
1574
.set_mclk_od = pp_dpm_set_mclk_od,
1575
.read_sensor = pp_dpm_read_sensor,
1576
.get_vce_clock_state = pp_dpm_get_vce_clock_state,
1577
.switch_power_profile = pp_dpm_switch_power_profile,
1578
.set_clockgating_by_smu = pp_set_clockgating_by_smu,
1579
.set_powergating_by_smu = pp_set_powergating_by_smu,
1580
.get_power_profile_mode = pp_get_power_profile_mode,
1581
.set_power_profile_mode = pp_set_power_profile_mode,
1582
.set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
1583
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
1584
.set_mp1_state = pp_dpm_set_mp1_state,
1585
.set_power_limit = pp_set_power_limit,
1586
.get_power_limit = pp_get_power_limit,
1587
/* export to DC */
1588
.get_sclk = pp_dpm_get_sclk,
1589
.get_mclk = pp_dpm_get_mclk,
1590
.display_configuration_change = pp_display_configuration_change,
1591
.get_display_power_level = pp_get_display_power_level,
1592
.get_current_clocks = pp_get_current_clocks,
1593
.get_clock_by_type = pp_get_clock_by_type,
1594
.get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1595
.get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1596
.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1597
.display_clock_voltage_request = pp_display_clock_voltage_request,
1598
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1599
.notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1600
.enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1601
.set_active_display_count = pp_set_active_display_count,
1602
.set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1603
.set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1604
.set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1605
.get_asic_baco_capability = pp_get_asic_baco_capability,
1606
.get_asic_baco_state = pp_get_asic_baco_state,
1607
.set_asic_baco_state = pp_set_asic_baco_state,
1608
.get_ppfeature_status = pp_get_ppfeature_status,
1609
.set_ppfeature_status = pp_set_ppfeature_status,
1610
.asic_reset_mode_2 = pp_asic_reset_mode_2,
1611
.smu_i2c_bus_access = pp_smu_i2c_bus_access,
1612
.set_df_cstate = pp_set_df_cstate,
1613
.set_xgmi_pstate = pp_set_xgmi_pstate,
1614
.get_gpu_metrics = pp_get_gpu_metrics,
1615
.gfx_state_change_set = pp_gfx_state_change_set,
1616
.get_smu_prv_buf_details = pp_get_prv_buffer_details,
1617
.pm_compute_clocks = pp_pm_compute_clocks,
1618
};
1619
1620