Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
26535 views
1
/*
2
* Copyright 2021 Advanced Micro Devices, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*/
22
23
#include "amdgpu.h"
24
#include "amdgpu_i2c.h"
25
#include "amdgpu_atombios.h"
26
#include "atom.h"
27
#include "amd_pcie.h"
28
#include "legacy_dpm.h"
29
#include "amdgpu_dpm_internal.h"
30
#include "amdgpu_display.h"
31
32
#define amdgpu_dpm_pre_set_power_state(adev) \
33
((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
34
35
#define amdgpu_dpm_post_set_power_state(adev) \
36
((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
37
38
#define amdgpu_dpm_display_configuration_changed(adev) \
39
((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
40
41
#define amdgpu_dpm_print_power_state(adev, ps) \
42
((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
43
44
#define amdgpu_dpm_vblank_too_short(adev) \
45
((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
46
47
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
48
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
49
50
void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2)
51
{
52
const char *s;
53
54
switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
55
case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
56
default:
57
s = "none";
58
break;
59
case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
60
s = "battery";
61
break;
62
case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
63
s = "balanced";
64
break;
65
case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
66
s = "performance";
67
break;
68
}
69
drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s);
70
if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
71
(class2 == 0))
72
drm_dbg(adev_to_drm(adev), "\tinternal class: none\n");
73
else
74
drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
75
(class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "",
76
(class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "",
77
(class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "",
78
(class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "",
79
(class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "",
80
(class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "",
81
(class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "",
82
(class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "",
83
(class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "",
84
(class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "",
85
(class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "",
86
(class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "",
87
(class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "",
88
(class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "",
89
(class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "",
90
(class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : "");
91
}
92
93
void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps)
94
{
95
drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n",
96
(caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "",
97
(caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "",
98
(caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : "");
99
}
100
101
void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev,
102
struct amdgpu_ps *rps)
103
{
104
drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n",
105
rps == adev->pm.dpm.current_ps ? " c" : "",
106
rps == adev->pm.dpm.requested_ps ? " r" : "",
107
rps == adev->pm.dpm.boot_ps ? " b" : "");
108
}
109
110
void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
111
{
112
int i;
113
114
if (adev->powerplay.pp_funcs->print_power_state == NULL)
115
return;
116
117
for (i = 0; i < adev->pm.dpm.num_ps; i++)
118
amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
119
120
}
121
122
union power_info {
123
struct _ATOM_POWERPLAY_INFO info;
124
struct _ATOM_POWERPLAY_INFO_V2 info_2;
125
struct _ATOM_POWERPLAY_INFO_V3 info_3;
126
struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
127
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
128
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
129
struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
130
struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
131
};
132
133
int amdgpu_get_platform_caps(struct amdgpu_device *adev)
134
{
135
struct amdgpu_mode_info *mode_info = &adev->mode_info;
136
union power_info *power_info;
137
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
138
u16 data_offset;
139
u8 frev, crev;
140
141
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
142
&frev, &crev, &data_offset))
143
return -EINVAL;
144
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
145
146
adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
147
adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
148
adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
149
150
return 0;
151
}
152
153
union fan_info {
154
struct _ATOM_PPLIB_FANTABLE fan;
155
struct _ATOM_PPLIB_FANTABLE2 fan2;
156
struct _ATOM_PPLIB_FANTABLE3 fan3;
157
};
158
159
static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
160
ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
161
{
162
u32 size = atom_table->ucNumEntries *
163
sizeof(struct amdgpu_clock_voltage_dependency_entry);
164
int i;
165
ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
166
167
amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
168
if (!amdgpu_table->entries)
169
return -ENOMEM;
170
171
entry = &atom_table->entries[0];
172
for (i = 0; i < atom_table->ucNumEntries; i++) {
173
amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
174
(entry->ucClockHigh << 16);
175
amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
176
entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
177
((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
178
}
179
amdgpu_table->count = atom_table->ucNumEntries;
180
181
return 0;
182
}
183
184
/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
185
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
186
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
187
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
188
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
189
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
190
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
191
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
192
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
193
194
int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
195
{
196
struct amdgpu_mode_info *mode_info = &adev->mode_info;
197
union power_info *power_info;
198
union fan_info *fan_info;
199
ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
200
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
201
u16 data_offset;
202
u8 frev, crev;
203
int ret, i;
204
205
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
206
&frev, &crev, &data_offset))
207
return -EINVAL;
208
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
209
210
/* fan table */
211
if (le16_to_cpu(power_info->pplib.usTableSize) >=
212
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
213
if (power_info->pplib3.usFanTableOffset) {
214
fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
215
le16_to_cpu(power_info->pplib3.usFanTableOffset));
216
adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
217
adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
218
adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
219
adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
220
adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
221
adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
222
adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
223
if (fan_info->fan.ucFanTableFormat >= 2)
224
adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
225
else
226
adev->pm.dpm.fan.t_max = 10900;
227
adev->pm.dpm.fan.cycle_delay = 100000;
228
if (fan_info->fan.ucFanTableFormat >= 3) {
229
adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
230
adev->pm.dpm.fan.default_max_fan_pwm =
231
le16_to_cpu(fan_info->fan3.usFanPWMMax);
232
adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
233
adev->pm.dpm.fan.fan_output_sensitivity =
234
le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
235
}
236
adev->pm.dpm.fan.ucode_fan_control = true;
237
}
238
}
239
240
/* clock dependancy tables, shedding tables */
241
if (le16_to_cpu(power_info->pplib.usTableSize) >=
242
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
243
if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
244
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
245
(mode_info->atom_context->bios + data_offset +
246
le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
247
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
248
dep_table);
249
if (ret)
250
return ret;
251
}
252
if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
253
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
254
(mode_info->atom_context->bios + data_offset +
255
le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
256
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
257
dep_table);
258
if (ret)
259
return ret;
260
}
261
if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
262
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
263
(mode_info->atom_context->bios + data_offset +
264
le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
265
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
266
dep_table);
267
if (ret)
268
return ret;
269
}
270
if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
271
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
272
(mode_info->atom_context->bios + data_offset +
273
le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
274
ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
275
dep_table);
276
if (ret)
277
return ret;
278
}
279
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
280
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
281
(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
282
(mode_info->atom_context->bios + data_offset +
283
le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
284
if (clk_v->ucNumEntries) {
285
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
286
le16_to_cpu(clk_v->entries[0].usSclkLow) |
287
(clk_v->entries[0].ucSclkHigh << 16);
288
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
289
le16_to_cpu(clk_v->entries[0].usMclkLow) |
290
(clk_v->entries[0].ucMclkHigh << 16);
291
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
292
le16_to_cpu(clk_v->entries[0].usVddc);
293
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
294
le16_to_cpu(clk_v->entries[0].usVddci);
295
}
296
}
297
if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
298
ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
299
(ATOM_PPLIB_PhaseSheddingLimits_Table *)
300
(mode_info->atom_context->bios + data_offset +
301
le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
302
ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
303
304
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
305
kcalloc(psl->ucNumEntries,
306
sizeof(struct amdgpu_phase_shedding_limits_entry),
307
GFP_KERNEL);
308
if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
309
return -ENOMEM;
310
311
entry = &psl->entries[0];
312
for (i = 0; i < psl->ucNumEntries; i++) {
313
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
314
le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
315
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
316
le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
317
adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
318
le16_to_cpu(entry->usVoltage);
319
entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
320
((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
321
}
322
adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
323
psl->ucNumEntries;
324
}
325
}
326
327
/* cac data */
328
if (le16_to_cpu(power_info->pplib.usTableSize) >=
329
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
330
adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
331
adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
332
adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
333
adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
334
if (adev->pm.dpm.tdp_od_limit)
335
adev->pm.dpm.power_control = true;
336
else
337
adev->pm.dpm.power_control = false;
338
adev->pm.dpm.tdp_adjustment = 0;
339
adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
340
adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
341
adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
342
if (power_info->pplib5.usCACLeakageTableOffset) {
343
ATOM_PPLIB_CAC_Leakage_Table *cac_table =
344
(ATOM_PPLIB_CAC_Leakage_Table *)
345
(mode_info->atom_context->bios + data_offset +
346
le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
347
ATOM_PPLIB_CAC_Leakage_Record *entry;
348
u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
349
adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
350
if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries)
351
return -ENOMEM;
352
entry = &cac_table->entries[0];
353
for (i = 0; i < cac_table->ucNumEntries; i++) {
354
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
355
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
356
le16_to_cpu(entry->usVddc1);
357
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
358
le16_to_cpu(entry->usVddc2);
359
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
360
le16_to_cpu(entry->usVddc3);
361
} else {
362
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
363
le16_to_cpu(entry->usVddc);
364
adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
365
le32_to_cpu(entry->ulLeakageValue);
366
}
367
entry = (ATOM_PPLIB_CAC_Leakage_Record *)
368
((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
369
}
370
adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
371
}
372
}
373
374
/* ext tables */
375
if (le16_to_cpu(power_info->pplib.usTableSize) >=
376
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
377
ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
378
(mode_info->atom_context->bios + data_offset +
379
le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
380
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
381
ext_hdr->usVCETableOffset) {
382
VCEClockInfoArray *array = (VCEClockInfoArray *)
383
(mode_info->atom_context->bios + data_offset +
384
le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
385
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
386
(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
387
(mode_info->atom_context->bios + data_offset +
388
le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
389
1 + array->ucNumEntries * sizeof(VCEClockInfo));
390
ATOM_PPLIB_VCE_State_Table *states =
391
(ATOM_PPLIB_VCE_State_Table *)
392
(mode_info->atom_context->bios + data_offset +
393
le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
394
1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
395
1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
396
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
397
ATOM_PPLIB_VCE_State_Record *state_entry;
398
VCEClockInfo *vce_clk;
399
u32 size = limits->numEntries *
400
sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
401
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
402
kzalloc(size, GFP_KERNEL);
403
if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
404
return -ENOMEM;
405
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
406
limits->numEntries;
407
entry = &limits->entries[0];
408
state_entry = &states->entries[0];
409
for (i = 0; i < limits->numEntries; i++) {
410
vce_clk = (VCEClockInfo *)
411
((u8 *)&array->entries[0] +
412
(entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
413
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
414
le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
415
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
416
le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
417
adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
418
le16_to_cpu(entry->usVoltage);
419
entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
420
((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
421
}
422
adev->pm.dpm.num_of_vce_states =
423
states->numEntries > AMD_MAX_VCE_LEVELS ?
424
AMD_MAX_VCE_LEVELS : states->numEntries;
425
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
426
vce_clk = (VCEClockInfo *)
427
((u8 *)&array->entries[0] +
428
(state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
429
adev->pm.dpm.vce_states[i].evclk =
430
le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
431
adev->pm.dpm.vce_states[i].ecclk =
432
le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
433
adev->pm.dpm.vce_states[i].clk_idx =
434
state_entry->ucClockInfoIndex & 0x3f;
435
adev->pm.dpm.vce_states[i].pstate =
436
(state_entry->ucClockInfoIndex & 0xc0) >> 6;
437
state_entry = (ATOM_PPLIB_VCE_State_Record *)
438
((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
439
}
440
}
441
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
442
ext_hdr->usUVDTableOffset) {
443
UVDClockInfoArray *array = (UVDClockInfoArray *)
444
(mode_info->atom_context->bios + data_offset +
445
le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
446
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
447
(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
448
(mode_info->atom_context->bios + data_offset +
449
le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
450
1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
451
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
452
u32 size = limits->numEntries *
453
sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
454
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
455
kzalloc(size, GFP_KERNEL);
456
if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
457
return -ENOMEM;
458
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
459
limits->numEntries;
460
entry = &limits->entries[0];
461
for (i = 0; i < limits->numEntries; i++) {
462
UVDClockInfo *uvd_clk = (UVDClockInfo *)
463
((u8 *)&array->entries[0] +
464
(entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
465
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
466
le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
467
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
468
le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
469
adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
470
le16_to_cpu(entry->usVoltage);
471
entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
472
((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
473
}
474
}
475
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
476
ext_hdr->usSAMUTableOffset) {
477
ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
478
(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
479
(mode_info->atom_context->bios + data_offset +
480
le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
481
ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
482
u32 size = limits->numEntries *
483
sizeof(struct amdgpu_clock_voltage_dependency_entry);
484
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
485
kzalloc(size, GFP_KERNEL);
486
if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
487
return -ENOMEM;
488
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
489
limits->numEntries;
490
entry = &limits->entries[0];
491
for (i = 0; i < limits->numEntries; i++) {
492
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
493
le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
494
adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
495
le16_to_cpu(entry->usVoltage);
496
entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
497
((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
498
}
499
}
500
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
501
ext_hdr->usPPMTableOffset) {
502
ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
503
(mode_info->atom_context->bios + data_offset +
504
le16_to_cpu(ext_hdr->usPPMTableOffset));
505
adev->pm.dpm.dyn_state.ppm_table =
506
kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
507
if (!adev->pm.dpm.dyn_state.ppm_table)
508
return -ENOMEM;
509
adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
510
adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
511
le16_to_cpu(ppm->usCpuCoreNumber);
512
adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
513
le32_to_cpu(ppm->ulPlatformTDP);
514
adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
515
le32_to_cpu(ppm->ulSmallACPlatformTDP);
516
adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
517
le32_to_cpu(ppm->ulPlatformTDC);
518
adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
519
le32_to_cpu(ppm->ulSmallACPlatformTDC);
520
adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
521
le32_to_cpu(ppm->ulApuTDP);
522
adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
523
le32_to_cpu(ppm->ulDGpuTDP);
524
adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
525
le32_to_cpu(ppm->ulDGpuUlvPower);
526
adev->pm.dpm.dyn_state.ppm_table->tj_max =
527
le32_to_cpu(ppm->ulTjmax);
528
}
529
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
530
ext_hdr->usACPTableOffset) {
531
ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
532
(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
533
(mode_info->atom_context->bios + data_offset +
534
le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
535
ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
536
u32 size = limits->numEntries *
537
sizeof(struct amdgpu_clock_voltage_dependency_entry);
538
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
539
kzalloc(size, GFP_KERNEL);
540
if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
541
return -ENOMEM;
542
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
543
limits->numEntries;
544
entry = &limits->entries[0];
545
for (i = 0; i < limits->numEntries; i++) {
546
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
547
le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
548
adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
549
le16_to_cpu(entry->usVoltage);
550
entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
551
((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
552
}
553
}
554
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
555
ext_hdr->usPowerTuneTableOffset) {
556
u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
557
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
558
ATOM_PowerTune_Table *pt;
559
adev->pm.dpm.dyn_state.cac_tdp_table =
560
kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
561
if (!adev->pm.dpm.dyn_state.cac_tdp_table)
562
return -ENOMEM;
563
if (rev > 0) {
564
ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
565
(mode_info->atom_context->bios + data_offset +
566
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
567
adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
568
ppt->usMaximumPowerDeliveryLimit;
569
pt = &ppt->power_tune_table;
570
} else {
571
ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
572
(mode_info->atom_context->bios + data_offset +
573
le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
574
adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
575
pt = &ppt->power_tune_table;
576
}
577
adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
578
adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
579
le16_to_cpu(pt->usConfigurableTDP);
580
adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
581
adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
582
le16_to_cpu(pt->usBatteryPowerLimit);
583
adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
584
le16_to_cpu(pt->usSmallPowerLimit);
585
adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
586
le16_to_cpu(pt->usLowCACLeakage);
587
adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
588
le16_to_cpu(pt->usHighCACLeakage);
589
}
590
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
591
ext_hdr->usSclkVddgfxTableOffset) {
592
dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
593
(mode_info->atom_context->bios + data_offset +
594
le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
595
ret = amdgpu_parse_clk_voltage_dep_table(
596
&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
597
dep_table);
598
if (ret)
599
return ret;
600
}
601
}
602
603
return 0;
604
}
605
606
void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
607
{
608
struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
609
610
kfree(dyn_state->vddc_dependency_on_sclk.entries);
611
kfree(dyn_state->vddci_dependency_on_mclk.entries);
612
kfree(dyn_state->vddc_dependency_on_mclk.entries);
613
kfree(dyn_state->mvdd_dependency_on_mclk.entries);
614
kfree(dyn_state->cac_leakage_table.entries);
615
kfree(dyn_state->phase_shedding_limits_table.entries);
616
kfree(dyn_state->ppm_table);
617
kfree(dyn_state->cac_tdp_table);
618
kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
619
kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
620
kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
621
kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
622
kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
623
}
624
625
static const char *pp_lib_thermal_controller_names[] = {
626
"NONE",
627
"lm63",
628
"adm1032",
629
"adm1030",
630
"max6649",
631
"lm64",
632
"f75375",
633
"RV6xx",
634
"RV770",
635
"adt7473",
636
"NONE",
637
"External GPIO",
638
"Evergreen",
639
"emc2103",
640
"Sumo",
641
"Northern Islands",
642
"Southern Islands",
643
"lm96163",
644
"Sea Islands",
645
"Kaveri/Kabini",
646
};
647
648
void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
649
{
650
struct amdgpu_mode_info *mode_info = &adev->mode_info;
651
ATOM_PPLIB_POWERPLAYTABLE *power_table;
652
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
653
ATOM_PPLIB_THERMALCONTROLLER *controller;
654
struct amdgpu_i2c_bus_rec i2c_bus;
655
u16 data_offset;
656
u8 frev, crev;
657
658
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
659
&frev, &crev, &data_offset))
660
return;
661
power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
662
(mode_info->atom_context->bios + data_offset);
663
controller = &power_table->sThermalController;
664
665
/* add the i2c bus for thermal/fan chip */
666
if (controller->ucType > 0) {
667
if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
668
adev->pm.no_fan = true;
669
adev->pm.fan_pulses_per_revolution =
670
controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
671
if (adev->pm.fan_pulses_per_revolution) {
672
adev->pm.fan_min_rpm = controller->ucFanMinRPM;
673
adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
674
}
675
if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
676
drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
677
(controller->ucFanParameters &
678
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
679
adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
680
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
681
drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
682
(controller->ucFanParameters &
683
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
684
adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
685
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
686
drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
687
(controller->ucFanParameters &
688
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
689
adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
690
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
691
drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
692
(controller->ucFanParameters &
693
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
694
adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
695
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
696
drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
697
(controller->ucFanParameters &
698
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
699
adev->pm.int_thermal_type = THERMAL_TYPE_NI;
700
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
701
drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
702
(controller->ucFanParameters &
703
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
704
adev->pm.int_thermal_type = THERMAL_TYPE_SI;
705
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
706
drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
707
(controller->ucFanParameters &
708
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
709
adev->pm.int_thermal_type = THERMAL_TYPE_CI;
710
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
711
drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n",
712
(controller->ucFanParameters &
713
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
714
adev->pm.int_thermal_type = THERMAL_TYPE_KV;
715
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
716
drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n",
717
(controller->ucFanParameters &
718
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
719
adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
720
} else if (controller->ucType ==
721
ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
722
drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n",
723
(controller->ucFanParameters &
724
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
725
adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
726
} else if (controller->ucType ==
727
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
728
drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n",
729
(controller->ucFanParameters &
730
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
731
adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
732
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
733
drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n",
734
pp_lib_thermal_controller_names[controller->ucType],
735
controller->ucI2cAddress >> 1,
736
(controller->ucFanParameters &
737
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
738
adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
739
i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
740
adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
741
if (adev->pm.i2c_bus) {
742
struct i2c_board_info info = { };
743
const char *name = pp_lib_thermal_controller_names[controller->ucType];
744
info.addr = controller->ucI2cAddress >> 1;
745
strscpy(info.type, name, sizeof(info.type));
746
i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
747
}
748
} else {
749
drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n",
750
controller->ucType,
751
controller->ucI2cAddress >> 1,
752
(controller->ucFanParameters &
753
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
754
}
755
}
756
}
757
758
struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx)
759
{
760
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
761
762
if (idx < adev->pm.dpm.num_of_vce_states)
763
return &adev->pm.dpm.vce_states[idx];
764
765
return NULL;
766
}
767
768
static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
769
enum amd_pm_state_type dpm_state)
770
{
771
int i;
772
struct amdgpu_ps *ps;
773
u32 ui_class;
774
bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
775
true : false;
776
777
/* check if the vblank period is too short to adjust the mclk */
778
if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
779
if (amdgpu_dpm_vblank_too_short(adev))
780
single_display = false;
781
}
782
783
/* certain older asics have a separare 3D performance state,
784
* so try that first if the user selected performance
785
*/
786
if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
787
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
788
/* balanced states don't exist at the moment */
789
if (dpm_state == POWER_STATE_TYPE_BALANCED)
790
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
791
792
restart_search:
793
/* Pick the best power state based on current conditions */
794
for (i = 0; i < adev->pm.dpm.num_ps; i++) {
795
ps = &adev->pm.dpm.ps[i];
796
ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
797
switch (dpm_state) {
798
/* user states */
799
case POWER_STATE_TYPE_BATTERY:
800
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
801
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
802
if (single_display)
803
return ps;
804
} else
805
return ps;
806
}
807
break;
808
case POWER_STATE_TYPE_PERFORMANCE:
809
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
810
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
811
if (single_display)
812
return ps;
813
} else
814
return ps;
815
}
816
break;
817
/* internal states */
818
case POWER_STATE_TYPE_INTERNAL_UVD:
819
if (adev->pm.dpm.uvd_ps)
820
return adev->pm.dpm.uvd_ps;
821
else
822
break;
823
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
824
if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
825
return ps;
826
break;
827
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
828
if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
829
return ps;
830
break;
831
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
832
if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
833
return ps;
834
break;
835
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
836
if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
837
return ps;
838
break;
839
case POWER_STATE_TYPE_INTERNAL_BOOT:
840
return adev->pm.dpm.boot_ps;
841
case POWER_STATE_TYPE_INTERNAL_THERMAL:
842
if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
843
return ps;
844
break;
845
case POWER_STATE_TYPE_INTERNAL_ACPI:
846
if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
847
return ps;
848
break;
849
case POWER_STATE_TYPE_INTERNAL_ULV:
850
if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
851
return ps;
852
break;
853
case POWER_STATE_TYPE_INTERNAL_3DPERF:
854
if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
855
return ps;
856
break;
857
default:
858
break;
859
}
860
}
861
/* use a fallback state if we didn't match */
862
switch (dpm_state) {
863
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
864
dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
865
goto restart_search;
866
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
867
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
868
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
869
if (adev->pm.dpm.uvd_ps) {
870
return adev->pm.dpm.uvd_ps;
871
} else {
872
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
873
goto restart_search;
874
}
875
case POWER_STATE_TYPE_INTERNAL_THERMAL:
876
dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
877
goto restart_search;
878
case POWER_STATE_TYPE_INTERNAL_ACPI:
879
dpm_state = POWER_STATE_TYPE_BATTERY;
880
goto restart_search;
881
case POWER_STATE_TYPE_BATTERY:
882
case POWER_STATE_TYPE_BALANCED:
883
case POWER_STATE_TYPE_INTERNAL_3DPERF:
884
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
885
goto restart_search;
886
default:
887
break;
888
}
889
890
return NULL;
891
}
892
893
static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
894
{
895
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
896
struct amdgpu_ps *ps;
897
enum amd_pm_state_type dpm_state;
898
int ret;
899
bool equal = false;
900
901
/* if dpm init failed */
902
if (!adev->pm.dpm_enabled)
903
return 0;
904
905
if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
906
/* add other state override checks here */
907
if ((!adev->pm.dpm.thermal_active) &&
908
(!adev->pm.dpm.uvd_active))
909
adev->pm.dpm.state = adev->pm.dpm.user_state;
910
}
911
dpm_state = adev->pm.dpm.state;
912
913
ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
914
if (ps)
915
adev->pm.dpm.requested_ps = ps;
916
else
917
return -EINVAL;
918
919
if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
920
drm_dbg(adev_to_drm(adev), "switching from power state\n");
921
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
922
drm_dbg(adev_to_drm(adev), "switching to power state\n");
923
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
924
}
925
926
/* update whether vce is active */
927
ps->vce_active = adev->pm.dpm.vce_active;
928
if (pp_funcs->display_configuration_changed)
929
amdgpu_dpm_display_configuration_changed(adev);
930
931
ret = amdgpu_dpm_pre_set_power_state(adev);
932
if (ret)
933
return ret;
934
935
if (pp_funcs->check_state_equal) {
936
if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
937
equal = false;
938
}
939
940
if (equal)
941
return 0;
942
943
if (pp_funcs->set_power_state)
944
pp_funcs->set_power_state(adev->powerplay.pp_handle);
945
946
amdgpu_dpm_post_set_power_state(adev);
947
948
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
949
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
950
951
if (pp_funcs->force_performance_level) {
952
if (adev->pm.dpm.thermal_active) {
953
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
954
/* force low perf level for thermal */
955
pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
956
/* save the user's level */
957
adev->pm.dpm.forced_level = level;
958
} else {
959
/* otherwise, user selected level */
960
pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
961
}
962
}
963
964
return 0;
965
}
966
967
void amdgpu_legacy_dpm_compute_clocks(void *handle)
968
{
969
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
970
971
amdgpu_dpm_get_active_displays(adev);
972
973
amdgpu_dpm_change_power_state_locked(adev);
974
}
975
976
void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
977
{
978
struct amdgpu_device *adev =
979
container_of(work, struct amdgpu_device,
980
pm.dpm.thermal.work);
981
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
982
/* switch to the thermal state */
983
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
984
int temp, size = sizeof(temp);
985
986
mutex_lock(&adev->pm.mutex);
987
988
if (!adev->pm.dpm_enabled) {
989
mutex_unlock(&adev->pm.mutex);
990
return;
991
}
992
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
993
AMDGPU_PP_SENSOR_GPU_TEMP,
994
(void *)&temp,
995
&size)) {
996
if (temp < adev->pm.dpm.thermal.min_temp)
997
/* switch back the user state */
998
dpm_state = adev->pm.dpm.user_state;
999
} else {
1000
if (adev->pm.dpm.thermal.high_to_low)
1001
/* switch back the user state */
1002
dpm_state = adev->pm.dpm.user_state;
1003
}
1004
1005
if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1006
adev->pm.dpm.thermal_active = true;
1007
else
1008
adev->pm.dpm.thermal_active = false;
1009
1010
adev->pm.dpm.state = dpm_state;
1011
1012
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
1013
mutex_unlock(&adev->pm.mutex);
1014
}
1015
1016