Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/amd/pm/amdgpu_pm.c
26517 views
1
/*
2
* Copyright 2017 Advanced Micro Devices, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*
22
* Authors: RafaÅ‚ MiÅ‚ecki <[email protected]>
23
* Alex Deucher <[email protected]>
24
*/
25
26
#include "amdgpu.h"
27
#include "amdgpu_drv.h"
28
#include "amdgpu_pm.h"
29
#include "amdgpu_dpm.h"
30
#include "atom.h"
31
#include <linux/pci.h>
32
#include <linux/hwmon.h>
33
#include <linux/hwmon-sysfs.h>
34
#include <linux/nospec.h>
35
#include <linux/pm_runtime.h>
36
#include <asm/processor.h>
37
38
#define MAX_NUM_OF_FEATURES_PER_SUBSET 8
39
#define MAX_NUM_OF_SUBSETS 8
40
41
#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name)
42
43
struct od_attribute {
44
struct kobj_attribute attribute;
45
struct list_head entry;
46
};
47
48
struct od_kobj {
49
struct kobject kobj;
50
struct list_head entry;
51
struct list_head attribute;
52
void *priv;
53
};
54
55
struct od_feature_ops {
56
umode_t (*is_visible)(struct amdgpu_device *adev);
57
ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr,
58
char *buf);
59
ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
60
const char *buf, size_t count);
61
};
62
63
struct od_feature_item {
64
const char *name;
65
struct od_feature_ops ops;
66
};
67
68
struct od_feature_container {
69
char *name;
70
struct od_feature_ops ops;
71
struct od_feature_item sub_feature[MAX_NUM_OF_FEATURES_PER_SUBSET];
72
};
73
74
struct od_feature_set {
75
struct od_feature_container containers[MAX_NUM_OF_SUBSETS];
76
};
77
78
static const struct hwmon_temp_label {
79
enum PP_HWMON_TEMP channel;
80
const char *label;
81
} temp_label[] = {
82
{PP_TEMP_EDGE, "edge"},
83
{PP_TEMP_JUNCTION, "junction"},
84
{PP_TEMP_MEM, "mem"},
85
};
86
87
const char * const amdgpu_pp_profile_name[] = {
88
"BOOTUP_DEFAULT",
89
"3D_FULL_SCREEN",
90
"POWER_SAVING",
91
"VIDEO",
92
"VR",
93
"COMPUTE",
94
"CUSTOM",
95
"WINDOW_3D",
96
"CAPPED",
97
"UNCAPPED",
98
};
99
100
/**
101
* amdgpu_pm_dev_state_check - Check if device can be accessed.
102
* @adev: Target device.
103
* @runpm: Check runpm status for suspend state checks.
104
*
105
* Checks the state of the @adev for access. Return 0 if the device is
106
* accessible or a negative error code otherwise.
107
*/
108
static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
109
{
110
bool runpm_check = runpm ? adev->in_runpm : false;
111
112
if (amdgpu_in_reset(adev))
113
return -EPERM;
114
if (adev->in_suspend && !runpm_check)
115
return -EPERM;
116
117
return 0;
118
}
119
120
/**
121
* amdgpu_pm_get_access - Check if device can be accessed, resume if needed.
122
* @adev: Target device.
123
*
124
* Checks the state of the @adev for access. Use runtime pm API to resume if
125
* needed. Return 0 if the device is accessible or a negative error code
126
* otherwise.
127
*/
128
static int amdgpu_pm_get_access(struct amdgpu_device *adev)
129
{
130
int ret;
131
132
ret = amdgpu_pm_dev_state_check(adev, true);
133
if (ret)
134
return ret;
135
136
return pm_runtime_resume_and_get(adev->dev);
137
}
138
139
/**
140
* amdgpu_pm_get_access_if_active - Check if device is active for access.
141
* @adev: Target device.
142
*
143
* Checks the state of the @adev for access. Use runtime pm API to determine
144
* if device is active. Allow access only if device is active.Return 0 if the
145
* device is accessible or a negative error code otherwise.
146
*/
147
static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
148
{
149
int ret;
150
151
/* Ignore runpm status. If device is in suspended state, deny access */
152
ret = amdgpu_pm_dev_state_check(adev, false);
153
if (ret)
154
return ret;
155
156
/*
157
* Allow only if device is active. If runpm is disabled also, as in
158
* kernels without CONFIG_PM, allow access.
159
*/
160
ret = pm_runtime_get_if_active(adev->dev);
161
if (!ret)
162
return -EPERM;
163
164
return 0;
165
}
166
167
/**
168
* amdgpu_pm_put_access - Put to auto suspend mode after a device access.
169
* @adev: Target device.
170
*
171
* Should be paired with amdgpu_pm_get_access* calls
172
*/
173
static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
174
{
175
pm_runtime_mark_last_busy(adev->dev);
176
pm_runtime_put_autosuspend(adev->dev);
177
}
178
179
/**
180
* DOC: power_dpm_state
181
*
182
* The power_dpm_state file is a legacy interface and is only provided for
183
* backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
184
* certain power related parameters. The file power_dpm_state is used for this.
185
* It accepts the following arguments:
186
*
187
* - battery
188
*
189
* - balanced
190
*
191
* - performance
192
*
193
* battery
194
*
195
* On older GPUs, the vbios provided a special power state for battery
196
* operation. Selecting battery switched to this state. This is no
197
* longer provided on newer GPUs so the option does nothing in that case.
198
*
199
* balanced
200
*
201
* On older GPUs, the vbios provided a special power state for balanced
202
* operation. Selecting balanced switched to this state. This is no
203
* longer provided on newer GPUs so the option does nothing in that case.
204
*
205
* performance
206
*
207
* On older GPUs, the vbios provided a special power state for performance
208
* operation. Selecting performance switched to this state. This is no
209
* longer provided on newer GPUs so the option does nothing in that case.
210
*
211
*/
212
213
static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
214
struct device_attribute *attr,
215
char *buf)
216
{
217
struct drm_device *ddev = dev_get_drvdata(dev);
218
struct amdgpu_device *adev = drm_to_adev(ddev);
219
enum amd_pm_state_type pm;
220
int ret;
221
222
ret = amdgpu_pm_get_access_if_active(adev);
223
if (ret)
224
return ret;
225
226
amdgpu_dpm_get_current_power_state(adev, &pm);
227
228
amdgpu_pm_put_access(adev);
229
230
return sysfs_emit(buf, "%s\n",
231
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
232
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
233
}
234
235
static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
236
struct device_attribute *attr,
237
const char *buf,
238
size_t count)
239
{
240
struct drm_device *ddev = dev_get_drvdata(dev);
241
struct amdgpu_device *adev = drm_to_adev(ddev);
242
enum amd_pm_state_type state;
243
int ret;
244
245
if (strncmp("battery", buf, strlen("battery")) == 0)
246
state = POWER_STATE_TYPE_BATTERY;
247
else if (strncmp("balanced", buf, strlen("balanced")) == 0)
248
state = POWER_STATE_TYPE_BALANCED;
249
else if (strncmp("performance", buf, strlen("performance")) == 0)
250
state = POWER_STATE_TYPE_PERFORMANCE;
251
else
252
return -EINVAL;
253
254
ret = amdgpu_pm_get_access(adev);
255
if (ret < 0)
256
return ret;
257
258
amdgpu_dpm_set_power_state(adev, state);
259
260
amdgpu_pm_put_access(adev);
261
262
return count;
263
}
264
265
266
/**
267
* DOC: power_dpm_force_performance_level
268
*
269
* The amdgpu driver provides a sysfs API for adjusting certain power
270
* related parameters. The file power_dpm_force_performance_level is
271
* used for this. It accepts the following arguments:
272
*
273
* - auto
274
*
275
* - low
276
*
277
* - high
278
*
279
* - manual
280
*
281
* - profile_standard
282
*
283
* - profile_min_sclk
284
*
285
* - profile_min_mclk
286
*
287
* - profile_peak
288
*
289
* auto
290
*
291
* When auto is selected, the driver will attempt to dynamically select
292
* the optimal power profile for current conditions in the driver.
293
*
294
* low
295
*
296
* When low is selected, the clocks are forced to the lowest power state.
297
*
298
* high
299
*
300
* When high is selected, the clocks are forced to the highest power state.
301
*
302
* manual
303
*
304
* When manual is selected, the user can manually adjust which power states
305
* are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
306
* and pp_dpm_pcie files and adjust the power state transition heuristics
307
* via the pp_power_profile_mode sysfs file.
308
*
309
* profile_standard
310
* profile_min_sclk
311
* profile_min_mclk
312
* profile_peak
313
*
314
* When the profiling modes are selected, clock and power gating are
315
* disabled and the clocks are set for different profiling cases. This
316
* mode is recommended for profiling specific work loads where you do
317
* not want clock or power gating for clock fluctuation to interfere
318
* with your results. profile_standard sets the clocks to a fixed clock
319
* level which varies from asic to asic. profile_min_sclk forces the sclk
320
* to the lowest level. profile_min_mclk forces the mclk to the lowest level.
321
* profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
322
*
323
*/
324
325
static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
326
struct device_attribute *attr,
327
char *buf)
328
{
329
struct drm_device *ddev = dev_get_drvdata(dev);
330
struct amdgpu_device *adev = drm_to_adev(ddev);
331
enum amd_dpm_forced_level level = 0xff;
332
int ret;
333
334
ret = amdgpu_pm_get_access_if_active(adev);
335
if (ret)
336
return ret;
337
338
level = amdgpu_dpm_get_performance_level(adev);
339
340
amdgpu_pm_put_access(adev);
341
342
return sysfs_emit(buf, "%s\n",
343
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
344
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
345
(level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
346
(level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
347
(level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
348
(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
349
(level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
350
(level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
351
(level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
352
"unknown");
353
}
354
355
static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
356
struct device_attribute *attr,
357
const char *buf,
358
size_t count)
359
{
360
struct drm_device *ddev = dev_get_drvdata(dev);
361
struct amdgpu_device *adev = drm_to_adev(ddev);
362
enum amd_dpm_forced_level level;
363
int ret = 0;
364
365
if (strncmp("low", buf, strlen("low")) == 0) {
366
level = AMD_DPM_FORCED_LEVEL_LOW;
367
} else if (strncmp("high", buf, strlen("high")) == 0) {
368
level = AMD_DPM_FORCED_LEVEL_HIGH;
369
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
370
level = AMD_DPM_FORCED_LEVEL_AUTO;
371
} else if (strncmp("manual", buf, strlen("manual")) == 0) {
372
level = AMD_DPM_FORCED_LEVEL_MANUAL;
373
} else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
374
level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
375
} else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
376
level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
377
} else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
378
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
379
} else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
380
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
381
} else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
382
level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
383
} else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
384
level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
385
} else {
386
return -EINVAL;
387
}
388
389
ret = amdgpu_pm_get_access(adev);
390
if (ret < 0)
391
return ret;
392
393
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
394
if (amdgpu_dpm_force_performance_level(adev, level)) {
395
amdgpu_pm_put_access(adev);
396
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
397
return -EINVAL;
398
}
399
/* override whatever a user ctx may have set */
400
adev->pm.stable_pstate_ctx = NULL;
401
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
402
403
amdgpu_pm_put_access(adev);
404
405
return count;
406
}
407
408
static ssize_t amdgpu_get_pp_num_states(struct device *dev,
409
struct device_attribute *attr,
410
char *buf)
411
{
412
struct drm_device *ddev = dev_get_drvdata(dev);
413
struct amdgpu_device *adev = drm_to_adev(ddev);
414
struct pp_states_info data;
415
uint32_t i;
416
int buf_len, ret;
417
418
ret = amdgpu_pm_get_access_if_active(adev);
419
if (ret)
420
return ret;
421
422
if (amdgpu_dpm_get_pp_num_states(adev, &data))
423
memset(&data, 0, sizeof(data));
424
425
amdgpu_pm_put_access(adev);
426
427
buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
428
for (i = 0; i < data.nums; i++)
429
buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
430
(data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
431
(data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
432
(data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
433
(data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
434
435
return buf_len;
436
}
437
438
static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
439
struct device_attribute *attr,
440
char *buf)
441
{
442
struct drm_device *ddev = dev_get_drvdata(dev);
443
struct amdgpu_device *adev = drm_to_adev(ddev);
444
struct pp_states_info data = {0};
445
enum amd_pm_state_type pm = 0;
446
int i = 0, ret = 0;
447
448
ret = amdgpu_pm_get_access_if_active(adev);
449
if (ret)
450
return ret;
451
452
amdgpu_dpm_get_current_power_state(adev, &pm);
453
454
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
455
456
amdgpu_pm_put_access(adev);
457
458
if (ret)
459
return ret;
460
461
for (i = 0; i < data.nums; i++) {
462
if (pm == data.states[i])
463
break;
464
}
465
466
if (i == data.nums)
467
i = -EINVAL;
468
469
return sysfs_emit(buf, "%d\n", i);
470
}
471
472
static ssize_t amdgpu_get_pp_force_state(struct device *dev,
473
struct device_attribute *attr,
474
char *buf)
475
{
476
struct drm_device *ddev = dev_get_drvdata(dev);
477
struct amdgpu_device *adev = drm_to_adev(ddev);
478
479
if (adev->pm.pp_force_state_enabled)
480
return amdgpu_get_pp_cur_state(dev, attr, buf);
481
else
482
return sysfs_emit(buf, "\n");
483
}
484
485
static ssize_t amdgpu_set_pp_force_state(struct device *dev,
486
struct device_attribute *attr,
487
const char *buf,
488
size_t count)
489
{
490
struct drm_device *ddev = dev_get_drvdata(dev);
491
struct amdgpu_device *adev = drm_to_adev(ddev);
492
enum amd_pm_state_type state = 0;
493
struct pp_states_info data;
494
unsigned long idx;
495
int ret;
496
497
adev->pm.pp_force_state_enabled = false;
498
499
if (strlen(buf) == 1)
500
return count;
501
502
ret = kstrtoul(buf, 0, &idx);
503
if (ret || idx >= ARRAY_SIZE(data.states))
504
return -EINVAL;
505
506
idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
507
508
ret = amdgpu_pm_get_access(adev);
509
if (ret < 0)
510
return ret;
511
512
ret = amdgpu_dpm_get_pp_num_states(adev, &data);
513
if (ret)
514
goto err_out;
515
516
state = data.states[idx];
517
518
/* only set user selected power states */
519
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
520
state != POWER_STATE_TYPE_DEFAULT) {
521
ret = amdgpu_dpm_dispatch_task(adev,
522
AMD_PP_TASK_ENABLE_USER_STATE, &state);
523
if (ret)
524
goto err_out;
525
526
adev->pm.pp_force_state_enabled = true;
527
}
528
529
amdgpu_pm_put_access(adev);
530
531
return count;
532
533
err_out:
534
amdgpu_pm_put_access(adev);
535
536
return ret;
537
}
538
539
/**
540
* DOC: pp_table
541
*
542
* The amdgpu driver provides a sysfs API for uploading new powerplay
543
* tables. The file pp_table is used for this. Reading the file
544
* will dump the current power play table. Writing to the file
545
* will attempt to upload a new powerplay table and re-initialize
546
* powerplay using that new table.
547
*
548
*/
549
550
static ssize_t amdgpu_get_pp_table(struct device *dev,
551
struct device_attribute *attr,
552
char *buf)
553
{
554
struct drm_device *ddev = dev_get_drvdata(dev);
555
struct amdgpu_device *adev = drm_to_adev(ddev);
556
char *table = NULL;
557
int size, ret;
558
559
ret = amdgpu_pm_get_access_if_active(adev);
560
if (ret)
561
return ret;
562
563
size = amdgpu_dpm_get_pp_table(adev, &table);
564
565
amdgpu_pm_put_access(adev);
566
567
if (size <= 0)
568
return size;
569
570
if (size >= PAGE_SIZE)
571
size = PAGE_SIZE - 1;
572
573
memcpy(buf, table, size);
574
575
return size;
576
}
577
578
static ssize_t amdgpu_set_pp_table(struct device *dev,
579
struct device_attribute *attr,
580
const char *buf,
581
size_t count)
582
{
583
struct drm_device *ddev = dev_get_drvdata(dev);
584
struct amdgpu_device *adev = drm_to_adev(ddev);
585
int ret = 0;
586
587
ret = amdgpu_pm_get_access(adev);
588
if (ret < 0)
589
return ret;
590
591
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
592
593
amdgpu_pm_put_access(adev);
594
595
if (ret)
596
return ret;
597
598
return count;
599
}
600
601
/**
602
* DOC: pp_od_clk_voltage
603
*
604
* The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
605
* in each power level within a power state. The pp_od_clk_voltage is used for
606
* this.
607
*
608
* Note that the actual memory controller clock rate are exposed, not
609
* the effective memory clock of the DRAMs. To translate it, use the
610
* following formula:
611
*
612
* Clock conversion (Mhz):
613
*
614
* HBM: effective_memory_clock = memory_controller_clock * 1
615
*
616
* G5: effective_memory_clock = memory_controller_clock * 1
617
*
618
* G6: effective_memory_clock = memory_controller_clock * 2
619
*
620
* DRAM data rate (MT/s):
621
*
622
* HBM: effective_memory_clock * 2 = data_rate
623
*
624
* G5: effective_memory_clock * 4 = data_rate
625
*
626
* G6: effective_memory_clock * 8 = data_rate
627
*
628
* Bandwidth (MB/s):
629
*
630
* data_rate * vram_bit_width / 8 = memory_bandwidth
631
*
632
* Some examples:
633
*
634
* G5 on RX460:
635
*
636
* memory_controller_clock = 1750 Mhz
637
*
638
* effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
639
*
640
* data rate = 1750 * 4 = 7000 MT/s
641
*
642
* memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
643
*
644
* G6 on RX5700:
645
*
646
* memory_controller_clock = 875 Mhz
647
*
648
* effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
649
*
650
* data rate = 1750 * 8 = 14000 MT/s
651
*
652
* memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
653
*
654
* < For Vega10 and previous ASICs >
655
*
656
* Reading the file will display:
657
*
658
* - a list of engine clock levels and voltages labeled OD_SCLK
659
*
660
* - a list of memory clock levels and voltages labeled OD_MCLK
661
*
662
* - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
663
*
664
* To manually adjust these settings, first select manual using
665
* power_dpm_force_performance_level. Enter a new value for each
666
* level by writing a string that contains "s/m level clock voltage" to
667
* the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
668
* at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
669
* 810 mV. When you have edited all of the states as needed, write
670
* "c" (commit) to the file to commit your changes. If you want to reset to the
671
* default power levels, write "r" (reset) to the file to reset them.
672
*
673
*
674
* < For Vega20 and newer ASICs >
675
*
676
* Reading the file will display:
677
*
678
* - minimum and maximum engine clock labeled OD_SCLK
679
*
680
* - minimum(not available for Vega20 and Navi1x) and maximum memory
681
* clock labeled OD_MCLK
682
*
683
* - three <frequency, voltage> points labeled OD_VDDC_CURVE.
684
* They can be used to calibrate the sclk voltage curve. This is
685
* available for Vega20 and NV1X.
686
*
687
* - voltage offset(in mV) applied on target voltage calculation.
688
* This is available for Sienna Cichlid, Navy Flounder, Dimgrey
689
* Cavefish and some later SMU13 ASICs. For these ASICs, the target
690
* voltage calculation can be illustrated by "voltage = voltage
691
* calculated from v/f curve + overdrive vddgfx offset"
692
*
693
* - a list of valid ranges for sclk, mclk, voltage curve points
694
* or voltage offset labeled OD_RANGE
695
*
696
* < For APUs >
697
*
698
* Reading the file will display:
699
*
700
* - minimum and maximum engine clock labeled OD_SCLK
701
*
702
* - a list of valid ranges for sclk labeled OD_RANGE
703
*
704
* < For VanGogh >
705
*
706
* Reading the file will display:
707
*
708
* - minimum and maximum engine clock labeled OD_SCLK
709
* - minimum and maximum core clocks labeled OD_CCLK
710
*
711
* - a list of valid ranges for sclk and cclk labeled OD_RANGE
712
*
713
* To manually adjust these settings:
714
*
715
* - First select manual using power_dpm_force_performance_level
716
*
717
* - For clock frequency setting, enter a new value by writing a
718
* string that contains "s/m index clock" to the file. The index
719
* should be 0 if to set minimum clock. And 1 if to set maximum
720
* clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
721
* "m 1 800" will update maximum mclk to be 800Mhz. For core
722
* clocks on VanGogh, the string contains "p core index clock".
723
* E.g., "p 2 0 800" would set the minimum core clock on core
724
* 2 to 800Mhz.
725
*
726
* For sclk voltage curve supported by Vega20 and NV1X, enter the new
727
* values by writing a string that contains "vc point clock voltage"
728
* to the file. The points are indexed by 0, 1 and 2. E.g., "vc 0 300
729
* 600" will update point1 with clock set as 300Mhz and voltage as 600mV.
730
* "vc 2 1000 1000" will update point3 with clock set as 1000Mhz and
731
* voltage 1000mV.
732
*
733
* For voltage offset supported by Sienna Cichlid, Navy Flounder, Dimgrey
734
* Cavefish and some later SMU13 ASICs, enter the new value by writing a
735
* string that contains "vo offset". E.g., "vo -10" will update the extra
736
* voltage offset applied to the whole v/f curve line as -10mv.
737
*
738
* - When you have edited all of the states as needed, write "c" (commit)
739
* to the file to commit your changes
740
*
741
* - If you want to reset to the default power levels, write "r" (reset)
742
* to the file to reset them
743
*
744
*/
745
746
static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
747
struct device_attribute *attr,
748
const char *buf,
749
size_t count)
750
{
751
struct drm_device *ddev = dev_get_drvdata(dev);
752
struct amdgpu_device *adev = drm_to_adev(ddev);
753
int ret;
754
uint32_t parameter_size = 0;
755
long parameter[64];
756
char buf_cpy[128];
757
char *tmp_str;
758
char *sub_str;
759
const char delimiter[3] = {' ', '\n', '\0'};
760
uint32_t type;
761
762
if (count > 127 || count == 0)
763
return -EINVAL;
764
765
if (*buf == 's')
766
type = PP_OD_EDIT_SCLK_VDDC_TABLE;
767
else if (*buf == 'p')
768
type = PP_OD_EDIT_CCLK_VDDC_TABLE;
769
else if (*buf == 'm')
770
type = PP_OD_EDIT_MCLK_VDDC_TABLE;
771
else if (*buf == 'r')
772
type = PP_OD_RESTORE_DEFAULT_TABLE;
773
else if (*buf == 'c')
774
type = PP_OD_COMMIT_DPM_TABLE;
775
else if (!strncmp(buf, "vc", 2))
776
type = PP_OD_EDIT_VDDC_CURVE;
777
else if (!strncmp(buf, "vo", 2))
778
type = PP_OD_EDIT_VDDGFX_OFFSET;
779
else
780
return -EINVAL;
781
782
memcpy(buf_cpy, buf, count);
783
buf_cpy[count] = 0;
784
785
tmp_str = buf_cpy;
786
787
if ((type == PP_OD_EDIT_VDDC_CURVE) ||
788
(type == PP_OD_EDIT_VDDGFX_OFFSET))
789
tmp_str++;
790
while (isspace(*++tmp_str));
791
792
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
793
if (strlen(sub_str) == 0)
794
continue;
795
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
796
if (ret)
797
return -EINVAL;
798
parameter_size++;
799
800
if (!tmp_str)
801
break;
802
803
while (isspace(*tmp_str))
804
tmp_str++;
805
}
806
807
ret = amdgpu_pm_get_access(adev);
808
if (ret < 0)
809
return ret;
810
811
if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
812
type,
813
parameter,
814
parameter_size))
815
goto err_out;
816
817
if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
818
parameter, parameter_size))
819
goto err_out;
820
821
if (type == PP_OD_COMMIT_DPM_TABLE) {
822
if (amdgpu_dpm_dispatch_task(adev,
823
AMD_PP_TASK_READJUST_POWER_STATE,
824
NULL))
825
goto err_out;
826
}
827
828
amdgpu_pm_put_access(adev);
829
830
return count;
831
832
err_out:
833
amdgpu_pm_put_access(adev);
834
835
return -EINVAL;
836
}
837
838
static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
839
struct device_attribute *attr,
840
char *buf)
841
{
842
struct drm_device *ddev = dev_get_drvdata(dev);
843
struct amdgpu_device *adev = drm_to_adev(ddev);
844
int size = 0;
845
int ret;
846
enum pp_clock_type od_clocks[6] = {
847
OD_SCLK,
848
OD_MCLK,
849
OD_VDDC_CURVE,
850
OD_RANGE,
851
OD_VDDGFX_OFFSET,
852
OD_CCLK,
853
};
854
uint clk_index;
855
856
ret = amdgpu_pm_get_access_if_active(adev);
857
if (ret)
858
return ret;
859
860
for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
861
ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
862
if (ret)
863
break;
864
}
865
if (ret == -ENOENT) {
866
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
867
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
868
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
869
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
870
size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
871
size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
872
}
873
874
if (size == 0)
875
size = sysfs_emit(buf, "\n");
876
877
amdgpu_pm_put_access(adev);
878
879
return size;
880
}
881
882
/**
883
* DOC: pp_features
884
*
885
* The amdgpu driver provides a sysfs API for adjusting what powerplay
886
* features to be enabled. The file pp_features is used for this. And
887
* this is only available for Vega10 and later dGPUs.
888
*
889
* Reading back the file will show you the followings:
890
* - Current ppfeature masks
891
* - List of the all supported powerplay features with their naming,
892
* bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
893
*
894
* To manually enable or disable a specific feature, just set or clear
895
* the corresponding bit from original ppfeature masks and input the
896
* new ppfeature masks.
897
*/
898
static ssize_t amdgpu_set_pp_features(struct device *dev,
899
struct device_attribute *attr,
900
const char *buf,
901
size_t count)
902
{
903
struct drm_device *ddev = dev_get_drvdata(dev);
904
struct amdgpu_device *adev = drm_to_adev(ddev);
905
uint64_t featuremask;
906
int ret;
907
908
ret = kstrtou64(buf, 0, &featuremask);
909
if (ret)
910
return -EINVAL;
911
912
ret = amdgpu_pm_get_access(adev);
913
if (ret < 0)
914
return ret;
915
916
ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
917
918
amdgpu_pm_put_access(adev);
919
920
if (ret)
921
return -EINVAL;
922
923
return count;
924
}
925
926
static ssize_t amdgpu_get_pp_features(struct device *dev,
927
struct device_attribute *attr,
928
char *buf)
929
{
930
struct drm_device *ddev = dev_get_drvdata(dev);
931
struct amdgpu_device *adev = drm_to_adev(ddev);
932
ssize_t size;
933
int ret;
934
935
ret = amdgpu_pm_get_access_if_active(adev);
936
if (ret)
937
return ret;
938
939
size = amdgpu_dpm_get_ppfeature_status(adev, buf);
940
if (size <= 0)
941
size = sysfs_emit(buf, "\n");
942
943
amdgpu_pm_put_access(adev);
944
945
return size;
946
}
947
948
/**
949
* DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
950
*
951
* The amdgpu driver provides a sysfs API for adjusting what power levels
952
* are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
953
* pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
954
* this.
955
*
956
* pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
957
* Vega10 and later ASICs.
958
* pp_dpm_fclk interface is only available for Vega20 and later ASICs.
959
*
960
* Reading back the files will show you the available power levels within
961
* the power state and the clock information for those levels. If deep sleep is
962
* applied to a clock, the level will be denoted by a special level 'S:'
963
* E.g., ::
964
*
965
* S: 19Mhz *
966
* 0: 615Mhz
967
* 1: 800Mhz
968
* 2: 888Mhz
969
* 3: 1000Mhz
970
*
971
*
972
* To manually adjust these states, first select manual using
973
* power_dpm_force_performance_level.
974
* Secondly, enter a new value for each level by inputing a string that
975
* contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
976
* E.g.,
977
*
978
* .. code-block:: bash
979
*
980
* echo "4 5 6" > pp_dpm_sclk
981
*
982
* will enable sclk levels 4, 5, and 6.
983
*
984
* NOTE: change to the dcefclk max dpm level is not supported now
985
*/
986
987
static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
988
enum pp_clock_type type,
989
char *buf)
990
{
991
struct drm_device *ddev = dev_get_drvdata(dev);
992
struct amdgpu_device *adev = drm_to_adev(ddev);
993
int size = 0;
994
int ret = 0;
995
996
ret = amdgpu_pm_get_access_if_active(adev);
997
if (ret)
998
return ret;
999
1000
ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1001
if (ret == -ENOENT)
1002
size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1003
1004
if (size == 0)
1005
size = sysfs_emit(buf, "\n");
1006
1007
amdgpu_pm_put_access(adev);
1008
1009
return size;
1010
}
1011
1012
/*
1013
* Worst case: 32 bits individually specified, in octal at 12 characters
1014
* per line (+1 for \n).
1015
*/
1016
#define AMDGPU_MASK_BUF_MAX (32 * 13)
1017
1018
static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1019
{
1020
int ret;
1021
unsigned long level;
1022
char *sub_str = NULL;
1023
char *tmp;
1024
char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1025
const char delimiter[3] = {' ', '\n', '\0'};
1026
size_t bytes;
1027
1028
*mask = 0;
1029
1030
bytes = min(count, sizeof(buf_cpy) - 1);
1031
memcpy(buf_cpy, buf, bytes);
1032
buf_cpy[bytes] = '\0';
1033
tmp = buf_cpy;
1034
while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1035
if (strlen(sub_str)) {
1036
ret = kstrtoul(sub_str, 0, &level);
1037
if (ret || level > 31)
1038
return -EINVAL;
1039
*mask |= 1 << level;
1040
} else
1041
break;
1042
}
1043
1044
return 0;
1045
}
1046
1047
static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1048
enum pp_clock_type type,
1049
const char *buf,
1050
size_t count)
1051
{
1052
struct drm_device *ddev = dev_get_drvdata(dev);
1053
struct amdgpu_device *adev = drm_to_adev(ddev);
1054
int ret;
1055
uint32_t mask = 0;
1056
1057
ret = amdgpu_read_mask(buf, count, &mask);
1058
if (ret)
1059
return ret;
1060
1061
ret = amdgpu_pm_get_access(adev);
1062
if (ret < 0)
1063
return ret;
1064
1065
ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1066
1067
amdgpu_pm_put_access(adev);
1068
1069
if (ret)
1070
return -EINVAL;
1071
1072
return count;
1073
}
1074
1075
static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1076
struct device_attribute *attr,
1077
char *buf)
1078
{
1079
return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1080
}
1081
1082
static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1083
struct device_attribute *attr,
1084
const char *buf,
1085
size_t count)
1086
{
1087
return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1088
}
1089
1090
static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1091
struct device_attribute *attr,
1092
char *buf)
1093
{
1094
return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1095
}
1096
1097
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1098
struct device_attribute *attr,
1099
const char *buf,
1100
size_t count)
1101
{
1102
return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1103
}
1104
1105
static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1106
struct device_attribute *attr,
1107
char *buf)
1108
{
1109
return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1110
}
1111
1112
static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1113
struct device_attribute *attr,
1114
const char *buf,
1115
size_t count)
1116
{
1117
return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1118
}
1119
1120
static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1121
struct device_attribute *attr,
1122
char *buf)
1123
{
1124
return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1125
}
1126
1127
static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1128
struct device_attribute *attr,
1129
const char *buf,
1130
size_t count)
1131
{
1132
return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1133
}
1134
1135
static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1136
struct device_attribute *attr,
1137
char *buf)
1138
{
1139
return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1140
}
1141
1142
static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1143
struct device_attribute *attr,
1144
const char *buf,
1145
size_t count)
1146
{
1147
return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1148
}
1149
1150
static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1151
struct device_attribute *attr,
1152
char *buf)
1153
{
1154
return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1155
}
1156
1157
static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1158
struct device_attribute *attr,
1159
const char *buf,
1160
size_t count)
1161
{
1162
return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1163
}
1164
1165
static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1166
struct device_attribute *attr,
1167
char *buf)
1168
{
1169
return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1170
}
1171
1172
static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1173
struct device_attribute *attr,
1174
const char *buf,
1175
size_t count)
1176
{
1177
return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1178
}
1179
1180
static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1181
struct device_attribute *attr,
1182
char *buf)
1183
{
1184
return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1185
}
1186
1187
static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1188
struct device_attribute *attr,
1189
const char *buf,
1190
size_t count)
1191
{
1192
return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1193
}
1194
1195
static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1196
struct device_attribute *attr,
1197
char *buf)
1198
{
1199
return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1200
}
1201
1202
static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1203
struct device_attribute *attr,
1204
const char *buf,
1205
size_t count)
1206
{
1207
return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1208
}
1209
1210
static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1211
struct device_attribute *attr,
1212
char *buf)
1213
{
1214
return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1215
}
1216
1217
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1218
struct device_attribute *attr,
1219
const char *buf,
1220
size_t count)
1221
{
1222
return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1223
}
1224
1225
static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1226
struct device_attribute *attr,
1227
char *buf)
1228
{
1229
struct drm_device *ddev = dev_get_drvdata(dev);
1230
struct amdgpu_device *adev = drm_to_adev(ddev);
1231
uint32_t value = 0;
1232
int ret;
1233
1234
ret = amdgpu_pm_get_access_if_active(adev);
1235
if (ret)
1236
return ret;
1237
1238
value = amdgpu_dpm_get_sclk_od(adev);
1239
1240
amdgpu_pm_put_access(adev);
1241
1242
return sysfs_emit(buf, "%d\n", value);
1243
}
1244
1245
static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1246
struct device_attribute *attr,
1247
const char *buf,
1248
size_t count)
1249
{
1250
struct drm_device *ddev = dev_get_drvdata(dev);
1251
struct amdgpu_device *adev = drm_to_adev(ddev);
1252
int ret;
1253
long int value;
1254
1255
ret = kstrtol(buf, 0, &value);
1256
1257
if (ret)
1258
return -EINVAL;
1259
1260
ret = amdgpu_pm_get_access(adev);
1261
if (ret < 0)
1262
return ret;
1263
1264
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1265
1266
amdgpu_pm_put_access(adev);
1267
1268
return count;
1269
}
1270
1271
static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1272
struct device_attribute *attr,
1273
char *buf)
1274
{
1275
struct drm_device *ddev = dev_get_drvdata(dev);
1276
struct amdgpu_device *adev = drm_to_adev(ddev);
1277
uint32_t value = 0;
1278
int ret;
1279
1280
ret = amdgpu_pm_get_access_if_active(adev);
1281
if (ret)
1282
return ret;
1283
1284
value = amdgpu_dpm_get_mclk_od(adev);
1285
1286
amdgpu_pm_put_access(adev);
1287
1288
return sysfs_emit(buf, "%d\n", value);
1289
}
1290
1291
static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1292
struct device_attribute *attr,
1293
const char *buf,
1294
size_t count)
1295
{
1296
struct drm_device *ddev = dev_get_drvdata(dev);
1297
struct amdgpu_device *adev = drm_to_adev(ddev);
1298
int ret;
1299
long int value;
1300
1301
ret = kstrtol(buf, 0, &value);
1302
1303
if (ret)
1304
return -EINVAL;
1305
1306
ret = amdgpu_pm_get_access(adev);
1307
if (ret < 0)
1308
return ret;
1309
1310
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1311
1312
amdgpu_pm_put_access(adev);
1313
1314
return count;
1315
}
1316
1317
/**
1318
* DOC: pp_power_profile_mode
1319
*
1320
* The amdgpu driver provides a sysfs API for adjusting the heuristics
1321
* related to switching between power levels in a power state. The file
1322
* pp_power_profile_mode is used for this.
1323
*
1324
* Reading this file outputs a list of all of the predefined power profiles
1325
* and the relevant heuristics settings for that profile.
1326
*
1327
* To select a profile or create a custom profile, first select manual using
1328
* power_dpm_force_performance_level. Writing the number of a predefined
1329
* profile to pp_power_profile_mode will enable those heuristics. To
1330
* create a custom set of heuristics, write a string of numbers to the file
1331
* starting with the number of the custom profile along with a setting
1332
* for each heuristic parameter. Due to differences across asic families
1333
* the heuristic parameters vary from family to family. Additionally,
1334
* you can apply the custom heuristics to different clock domains. Each
1335
* clock domain is considered a distinct operation so if you modify the
1336
* gfxclk heuristics and then the memclk heuristics, the all of the
1337
* custom heuristics will be retained until you switch to another profile.
1338
*
1339
*/
1340
1341
static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1342
struct device_attribute *attr,
1343
char *buf)
1344
{
1345
struct drm_device *ddev = dev_get_drvdata(dev);
1346
struct amdgpu_device *adev = drm_to_adev(ddev);
1347
ssize_t size;
1348
int ret;
1349
1350
ret = amdgpu_pm_get_access_if_active(adev);
1351
if (ret)
1352
return ret;
1353
1354
size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1355
if (size <= 0)
1356
size = sysfs_emit(buf, "\n");
1357
1358
amdgpu_pm_put_access(adev);
1359
1360
return size;
1361
}
1362
1363
1364
static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1365
struct device_attribute *attr,
1366
const char *buf,
1367
size_t count)
1368
{
1369
int ret;
1370
struct drm_device *ddev = dev_get_drvdata(dev);
1371
struct amdgpu_device *adev = drm_to_adev(ddev);
1372
uint32_t parameter_size = 0;
1373
long parameter[64];
1374
char *sub_str, buf_cpy[128];
1375
char *tmp_str;
1376
uint32_t i = 0;
1377
char tmp[2];
1378
long int profile_mode = 0;
1379
const char delimiter[3] = {' ', '\n', '\0'};
1380
1381
tmp[0] = *(buf);
1382
tmp[1] = '\0';
1383
ret = kstrtol(tmp, 0, &profile_mode);
1384
if (ret)
1385
return -EINVAL;
1386
1387
if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1388
if (count < 2 || count > 127)
1389
return -EINVAL;
1390
while (isspace(*++buf))
1391
i++;
1392
memcpy(buf_cpy, buf, count-i);
1393
tmp_str = buf_cpy;
1394
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1395
if (strlen(sub_str) == 0)
1396
continue;
1397
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1398
if (ret)
1399
return -EINVAL;
1400
parameter_size++;
1401
if (!tmp_str)
1402
break;
1403
while (isspace(*tmp_str))
1404
tmp_str++;
1405
}
1406
}
1407
parameter[parameter_size] = profile_mode;
1408
1409
ret = amdgpu_pm_get_access(adev);
1410
if (ret < 0)
1411
return ret;
1412
1413
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1414
1415
amdgpu_pm_put_access(adev);
1416
1417
if (!ret)
1418
return count;
1419
1420
return -EINVAL;
1421
}
1422
1423
static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
1424
enum amd_pp_sensors sensor,
1425
void *query)
1426
{
1427
int r, size = sizeof(uint32_t);
1428
1429
r = amdgpu_pm_get_access_if_active(adev);
1430
if (r)
1431
return r;
1432
1433
/* get the sensor value */
1434
r = amdgpu_dpm_read_sensor(adev, sensor, query, &size);
1435
1436
amdgpu_pm_put_access(adev);
1437
1438
return r;
1439
}
1440
1441
/**
1442
* DOC: gpu_busy_percent
1443
*
1444
* The amdgpu driver provides a sysfs API for reading how busy the GPU
1445
* is as a percentage. The file gpu_busy_percent is used for this.
1446
* The SMU firmware computes a percentage of load based on the
1447
* aggregate activity level in the IP cores.
1448
*/
1449
static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1450
struct device_attribute *attr,
1451
char *buf)
1452
{
1453
struct drm_device *ddev = dev_get_drvdata(dev);
1454
struct amdgpu_device *adev = drm_to_adev(ddev);
1455
unsigned int value;
1456
int r;
1457
1458
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
1459
if (r)
1460
return r;
1461
1462
return sysfs_emit(buf, "%d\n", value);
1463
}
1464
1465
/**
1466
* DOC: mem_busy_percent
1467
*
1468
* The amdgpu driver provides a sysfs API for reading how busy the VRAM
1469
* is as a percentage. The file mem_busy_percent is used for this.
1470
* The SMU firmware computes a percentage of load based on the
1471
* aggregate activity level in the IP cores.
1472
*/
1473
static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1474
struct device_attribute *attr,
1475
char *buf)
1476
{
1477
struct drm_device *ddev = dev_get_drvdata(dev);
1478
struct amdgpu_device *adev = drm_to_adev(ddev);
1479
unsigned int value;
1480
int r;
1481
1482
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
1483
if (r)
1484
return r;
1485
1486
return sysfs_emit(buf, "%d\n", value);
1487
}
1488
1489
/**
1490
* DOC: vcn_busy_percent
1491
*
1492
* The amdgpu driver provides a sysfs API for reading how busy the VCN
1493
* is as a percentage. The file vcn_busy_percent is used for this.
1494
* The SMU firmware computes a percentage of load based on the
1495
* aggregate activity level in the IP cores.
1496
*/
1497
static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
1498
struct device_attribute *attr,
1499
char *buf)
1500
{
1501
struct drm_device *ddev = dev_get_drvdata(dev);
1502
struct amdgpu_device *adev = drm_to_adev(ddev);
1503
unsigned int value;
1504
int r;
1505
1506
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
1507
if (r)
1508
return r;
1509
1510
return sysfs_emit(buf, "%d\n", value);
1511
}
1512
1513
/**
1514
* DOC: pcie_bw
1515
*
1516
* The amdgpu driver provides a sysfs API for estimating how much data
1517
* has been received and sent by the GPU in the last second through PCIe.
1518
* The file pcie_bw is used for this.
1519
* The Perf counters count the number of received and sent messages and return
1520
* those values, as well as the maximum payload size of a PCIe packet (mps).
1521
* Note that it is not possible to easily and quickly obtain the size of each
1522
* packet transmitted, so we output the max payload size (mps) to allow for
1523
* quick estimation of the PCIe bandwidth usage
1524
*/
1525
static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1526
struct device_attribute *attr,
1527
char *buf)
1528
{
1529
struct drm_device *ddev = dev_get_drvdata(dev);
1530
struct amdgpu_device *adev = drm_to_adev(ddev);
1531
uint64_t count0 = 0, count1 = 0;
1532
int ret;
1533
1534
if (adev->flags & AMD_IS_APU)
1535
return -ENODATA;
1536
1537
if (!adev->asic_funcs->get_pcie_usage)
1538
return -ENODATA;
1539
1540
ret = amdgpu_pm_get_access_if_active(adev);
1541
if (ret)
1542
return ret;
1543
1544
amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1545
1546
amdgpu_pm_put_access(adev);
1547
1548
return sysfs_emit(buf, "%llu %llu %i\n",
1549
count0, count1, pcie_get_mps(adev->pdev));
1550
}
1551
1552
/**
1553
* DOC: unique_id
1554
*
1555
* The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1556
* The file unique_id is used for this.
1557
* This will provide a Unique ID that will persist from machine to machine
1558
*
1559
* NOTE: This will only work for GFX9 and newer. This file will be absent
1560
* on unsupported ASICs (GFX8 and older)
1561
*/
1562
static ssize_t amdgpu_get_unique_id(struct device *dev,
1563
struct device_attribute *attr,
1564
char *buf)
1565
{
1566
struct drm_device *ddev = dev_get_drvdata(dev);
1567
struct amdgpu_device *adev = drm_to_adev(ddev);
1568
1569
if (adev->unique_id)
1570
return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1571
1572
return 0;
1573
}
1574
1575
/**
1576
* DOC: thermal_throttling_logging
1577
*
1578
* Thermal throttling pulls down the clock frequency and thus the performance.
1579
* It's an useful mechanism to protect the chip from overheating. Since it
1580
* impacts performance, the user controls whether it is enabled and if so,
1581
* the log frequency.
1582
*
1583
* Reading back the file shows you the status(enabled or disabled) and
1584
* the interval(in seconds) between each thermal logging.
1585
*
1586
* Writing an integer to the file, sets a new logging interval, in seconds.
1587
* The value should be between 1 and 3600. If the value is less than 1,
1588
* thermal logging is disabled. Values greater than 3600 are ignored.
1589
*/
1590
static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1591
struct device_attribute *attr,
1592
char *buf)
1593
{
1594
struct drm_device *ddev = dev_get_drvdata(dev);
1595
struct amdgpu_device *adev = drm_to_adev(ddev);
1596
1597
return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1598
adev_to_drm(adev)->unique,
1599
atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1600
adev->throttling_logging_rs.interval / HZ + 1);
1601
}
1602
1603
static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1604
struct device_attribute *attr,
1605
const char *buf,
1606
size_t count)
1607
{
1608
struct drm_device *ddev = dev_get_drvdata(dev);
1609
struct amdgpu_device *adev = drm_to_adev(ddev);
1610
long throttling_logging_interval;
1611
int ret = 0;
1612
1613
ret = kstrtol(buf, 0, &throttling_logging_interval);
1614
if (ret)
1615
return ret;
1616
1617
if (throttling_logging_interval > 3600)
1618
return -EINVAL;
1619
1620
if (throttling_logging_interval > 0) {
1621
/*
1622
* Reset the ratelimit timer internals.
1623
* This can effectively restart the timer.
1624
*/
1625
ratelimit_state_reset_interval(&adev->throttling_logging_rs,
1626
(throttling_logging_interval - 1) * HZ);
1627
atomic_set(&adev->throttling_logging_enabled, 1);
1628
} else {
1629
atomic_set(&adev->throttling_logging_enabled, 0);
1630
}
1631
1632
return count;
1633
}
1634
1635
/**
1636
* DOC: apu_thermal_cap
1637
*
1638
* The amdgpu driver provides a sysfs API for retrieving/updating thermal
1639
* limit temperature in millidegrees Celsius
1640
*
1641
* Reading back the file shows you core limit value
1642
*
1643
* Writing an integer to the file, sets a new thermal limit. The value
1644
* should be between 0 and 100. If the value is less than 0 or greater
1645
* than 100, then the write request will be ignored.
1646
*/
1647
static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1648
struct device_attribute *attr,
1649
char *buf)
1650
{
1651
int ret, size;
1652
u32 limit;
1653
struct drm_device *ddev = dev_get_drvdata(dev);
1654
struct amdgpu_device *adev = drm_to_adev(ddev);
1655
1656
ret = amdgpu_pm_get_access_if_active(adev);
1657
if (ret)
1658
return ret;
1659
1660
ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1661
if (!ret)
1662
size = sysfs_emit(buf, "%u\n", limit);
1663
else
1664
size = sysfs_emit(buf, "failed to get thermal limit\n");
1665
1666
amdgpu_pm_put_access(adev);
1667
1668
return size;
1669
}
1670
1671
static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1672
struct device_attribute *attr,
1673
const char *buf,
1674
size_t count)
1675
{
1676
int ret;
1677
u32 value;
1678
struct drm_device *ddev = dev_get_drvdata(dev);
1679
struct amdgpu_device *adev = drm_to_adev(ddev);
1680
1681
ret = kstrtou32(buf, 10, &value);
1682
if (ret)
1683
return ret;
1684
1685
if (value > 100) {
1686
dev_err(dev, "Invalid argument !\n");
1687
return -EINVAL;
1688
}
1689
1690
ret = amdgpu_pm_get_access(adev);
1691
if (ret < 0)
1692
return ret;
1693
1694
ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1695
if (ret) {
1696
amdgpu_pm_put_access(adev);
1697
dev_err(dev, "failed to update thermal limit\n");
1698
return ret;
1699
}
1700
1701
amdgpu_pm_put_access(adev);
1702
1703
return count;
1704
}
1705
1706
static int amdgpu_pm_metrics_attr_update(struct amdgpu_device *adev,
1707
struct amdgpu_device_attr *attr,
1708
uint32_t mask,
1709
enum amdgpu_device_attr_states *states)
1710
{
1711
if (amdgpu_dpm_get_pm_metrics(adev, NULL, 0) == -EOPNOTSUPP)
1712
*states = ATTR_STATE_UNSUPPORTED;
1713
1714
return 0;
1715
}
1716
1717
static ssize_t amdgpu_get_pm_metrics(struct device *dev,
1718
struct device_attribute *attr, char *buf)
1719
{
1720
struct drm_device *ddev = dev_get_drvdata(dev);
1721
struct amdgpu_device *adev = drm_to_adev(ddev);
1722
ssize_t size = 0;
1723
int ret;
1724
1725
ret = amdgpu_pm_get_access_if_active(adev);
1726
if (ret)
1727
return ret;
1728
1729
size = amdgpu_dpm_get_pm_metrics(adev, buf, PAGE_SIZE);
1730
1731
amdgpu_pm_put_access(adev);
1732
1733
return size;
1734
}
1735
1736
/**
1737
* DOC: gpu_metrics
1738
*
1739
* The amdgpu driver provides a sysfs API for retrieving current gpu
1740
* metrics data. The file gpu_metrics is used for this. Reading the
1741
* file will dump all the current gpu metrics data.
1742
*
1743
* These data include temperature, frequency, engines utilization,
1744
* power consume, throttler status, fan speed and cpu core statistics(
1745
* available for APU only). That's it will give a snapshot of all sensors
1746
* at the same time.
1747
*/
1748
static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1749
struct device_attribute *attr,
1750
char *buf)
1751
{
1752
struct drm_device *ddev = dev_get_drvdata(dev);
1753
struct amdgpu_device *adev = drm_to_adev(ddev);
1754
void *gpu_metrics;
1755
ssize_t size = 0;
1756
int ret;
1757
1758
ret = amdgpu_pm_get_access_if_active(adev);
1759
if (ret)
1760
return ret;
1761
1762
size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1763
if (size <= 0)
1764
goto out;
1765
1766
if (size >= PAGE_SIZE)
1767
size = PAGE_SIZE - 1;
1768
1769
memcpy(buf, gpu_metrics, size);
1770
1771
out:
1772
amdgpu_pm_put_access(adev);
1773
1774
return size;
1775
}
1776
1777
static int amdgpu_show_powershift_percent(struct device *dev,
1778
char *buf, enum amd_pp_sensors sensor)
1779
{
1780
struct drm_device *ddev = dev_get_drvdata(dev);
1781
struct amdgpu_device *adev = drm_to_adev(ddev);
1782
uint32_t ss_power;
1783
int r = 0, i;
1784
1785
r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1786
if (r == -EOPNOTSUPP) {
1787
/* sensor not available on dGPU, try to read from APU */
1788
adev = NULL;
1789
mutex_lock(&mgpu_info.mutex);
1790
for (i = 0; i < mgpu_info.num_gpu; i++) {
1791
if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1792
adev = mgpu_info.gpu_ins[i].adev;
1793
break;
1794
}
1795
}
1796
mutex_unlock(&mgpu_info.mutex);
1797
if (adev)
1798
r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
1799
}
1800
1801
if (r)
1802
return r;
1803
1804
return sysfs_emit(buf, "%u%%\n", ss_power);
1805
}
1806
1807
/**
1808
* DOC: smartshift_apu_power
1809
*
1810
* The amdgpu driver provides a sysfs API for reporting APU power
1811
* shift in percentage if platform supports smartshift. Value 0 means that
1812
* there is no powershift and values between [1-100] means that the power
1813
* is shifted to APU, the percentage of boost is with respect to APU power
1814
* limit on the platform.
1815
*/
1816
1817
static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1818
char *buf)
1819
{
1820
return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_APU_SHARE);
1821
}
1822
1823
/**
1824
* DOC: smartshift_dgpu_power
1825
*
1826
* The amdgpu driver provides a sysfs API for reporting dGPU power
1827
* shift in percentage if platform supports smartshift. Value 0 means that
1828
* there is no powershift and values between [1-100] means that the power is
1829
* shifted to dGPU, the percentage of boost is with respect to dGPU power
1830
* limit on the platform.
1831
*/
1832
1833
static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1834
char *buf)
1835
{
1836
return amdgpu_show_powershift_percent(dev, buf, AMDGPU_PP_SENSOR_SS_DGPU_SHARE);
1837
}
1838
1839
/**
1840
* DOC: smartshift_bias
1841
*
1842
* The amdgpu driver provides a sysfs API for reporting the
1843
* smartshift(SS2.0) bias level. The value ranges from -100 to 100
1844
* and the default is 0. -100 sets maximum preference to APU
1845
* and 100 sets max perference to dGPU.
1846
*/
1847
1848
static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1849
struct device_attribute *attr,
1850
char *buf)
1851
{
1852
int r = 0;
1853
1854
r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1855
1856
return r;
1857
}
1858
1859
static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1860
struct device_attribute *attr,
1861
const char *buf, size_t count)
1862
{
1863
struct drm_device *ddev = dev_get_drvdata(dev);
1864
struct amdgpu_device *adev = drm_to_adev(ddev);
1865
int r = 0;
1866
int bias = 0;
1867
1868
r = kstrtoint(buf, 10, &bias);
1869
if (r)
1870
goto out;
1871
1872
r = amdgpu_pm_get_access(adev);
1873
if (r < 0)
1874
return r;
1875
1876
if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1877
bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1878
else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1879
bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1880
1881
amdgpu_smartshift_bias = bias;
1882
r = count;
1883
1884
/* TODO: update bias level with SMU message */
1885
1886
out:
1887
amdgpu_pm_put_access(adev);
1888
1889
return r;
1890
}
1891
1892
static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1893
uint32_t mask, enum amdgpu_device_attr_states *states)
1894
{
1895
if (!amdgpu_device_supports_smart_shift(adev))
1896
*states = ATTR_STATE_UNSUPPORTED;
1897
1898
return 0;
1899
}
1900
1901
static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1902
uint32_t mask, enum amdgpu_device_attr_states *states)
1903
{
1904
uint32_t ss_power;
1905
1906
if (!amdgpu_device_supports_smart_shift(adev))
1907
*states = ATTR_STATE_UNSUPPORTED;
1908
else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1909
(void *)&ss_power))
1910
*states = ATTR_STATE_UNSUPPORTED;
1911
else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1912
(void *)&ss_power))
1913
*states = ATTR_STATE_UNSUPPORTED;
1914
1915
return 0;
1916
}
1917
1918
static int pp_od_clk_voltage_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1919
uint32_t mask, enum amdgpu_device_attr_states *states)
1920
{
1921
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1922
1923
*states = ATTR_STATE_SUPPORTED;
1924
1925
if (!amdgpu_dpm_is_overdrive_supported(adev)) {
1926
*states = ATTR_STATE_UNSUPPORTED;
1927
return 0;
1928
}
1929
1930
/* Enable pp_od_clk_voltage node for gc 9.4.3, 9.4.4, 9.5.0 SRIOV/BM support */
1931
if (gc_ver == IP_VERSION(9, 4, 3) ||
1932
gc_ver == IP_VERSION(9, 4, 4) ||
1933
gc_ver == IP_VERSION(9, 5, 0)) {
1934
if (amdgpu_sriov_multi_vf_mode(adev))
1935
*states = ATTR_STATE_UNSUPPORTED;
1936
return 0;
1937
}
1938
1939
if (!(attr->flags & mask))
1940
*states = ATTR_STATE_UNSUPPORTED;
1941
1942
return 0;
1943
}
1944
1945
static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1946
uint32_t mask, enum amdgpu_device_attr_states *states)
1947
{
1948
struct device_attribute *dev_attr = &attr->dev_attr;
1949
uint32_t gc_ver;
1950
1951
*states = ATTR_STATE_SUPPORTED;
1952
1953
if (!(attr->flags & mask)) {
1954
*states = ATTR_STATE_UNSUPPORTED;
1955
return 0;
1956
}
1957
1958
gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1959
/* dcefclk node is not available on gfx 11.0.3 sriov */
1960
if ((gc_ver == IP_VERSION(11, 0, 3) && amdgpu_sriov_is_pp_one_vf(adev)) ||
1961
gc_ver < IP_VERSION(9, 0, 0) ||
1962
!amdgpu_device_has_display_hardware(adev))
1963
*states = ATTR_STATE_UNSUPPORTED;
1964
1965
/* SMU MP1 does not support dcefclk level setting,
1966
* setting should not be allowed from VF if not in one VF mode.
1967
*/
1968
if (gc_ver >= IP_VERSION(10, 0, 0) ||
1969
(amdgpu_sriov_multi_vf_mode(adev))) {
1970
dev_attr->attr.mode &= ~S_IWUGO;
1971
dev_attr->store = NULL;
1972
}
1973
1974
return 0;
1975
}
1976
1977
static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1978
uint32_t mask, enum amdgpu_device_attr_states *states)
1979
{
1980
struct device_attribute *dev_attr = &attr->dev_attr;
1981
enum amdgpu_device_attr_id attr_id = attr->attr_id;
1982
uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
1983
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
1984
1985
*states = ATTR_STATE_SUPPORTED;
1986
1987
if (!(attr->flags & mask)) {
1988
*states = ATTR_STATE_UNSUPPORTED;
1989
return 0;
1990
}
1991
1992
if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1993
if (gc_ver < IP_VERSION(9, 0, 0))
1994
*states = ATTR_STATE_UNSUPPORTED;
1995
} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1996
if (mp1_ver < IP_VERSION(10, 0, 0))
1997
*states = ATTR_STATE_UNSUPPORTED;
1998
} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
1999
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2000
gc_ver == IP_VERSION(10, 3, 3) ||
2001
gc_ver == IP_VERSION(10, 3, 6) ||
2002
gc_ver == IP_VERSION(10, 3, 7) ||
2003
gc_ver == IP_VERSION(10, 3, 0) ||
2004
gc_ver == IP_VERSION(10, 1, 2) ||
2005
gc_ver == IP_VERSION(11, 0, 0) ||
2006
gc_ver == IP_VERSION(11, 0, 1) ||
2007
gc_ver == IP_VERSION(11, 0, 4) ||
2008
gc_ver == IP_VERSION(11, 5, 0) ||
2009
gc_ver == IP_VERSION(11, 0, 2) ||
2010
gc_ver == IP_VERSION(11, 0, 3) ||
2011
gc_ver == IP_VERSION(9, 4, 3) ||
2012
gc_ver == IP_VERSION(9, 4, 4) ||
2013
gc_ver == IP_VERSION(9, 5, 0)))
2014
*states = ATTR_STATE_UNSUPPORTED;
2015
} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2016
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2017
gc_ver == IP_VERSION(10, 3, 0) ||
2018
gc_ver == IP_VERSION(11, 0, 2) ||
2019
gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2020
*states = ATTR_STATE_UNSUPPORTED;
2021
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2022
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2023
gc_ver == IP_VERSION(10, 3, 3) ||
2024
gc_ver == IP_VERSION(10, 3, 6) ||
2025
gc_ver == IP_VERSION(10, 3, 7) ||
2026
gc_ver == IP_VERSION(10, 3, 0) ||
2027
gc_ver == IP_VERSION(10, 1, 2) ||
2028
gc_ver == IP_VERSION(11, 0, 0) ||
2029
gc_ver == IP_VERSION(11, 0, 1) ||
2030
gc_ver == IP_VERSION(11, 0, 4) ||
2031
gc_ver == IP_VERSION(11, 5, 0) ||
2032
gc_ver == IP_VERSION(11, 0, 2) ||
2033
gc_ver == IP_VERSION(11, 0, 3) ||
2034
gc_ver == IP_VERSION(9, 4, 3) ||
2035
gc_ver == IP_VERSION(9, 4, 4) ||
2036
gc_ver == IP_VERSION(9, 5, 0)))
2037
*states = ATTR_STATE_UNSUPPORTED;
2038
} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2039
if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2040
gc_ver == IP_VERSION(10, 3, 0) ||
2041
gc_ver == IP_VERSION(11, 0, 2) ||
2042
gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2043
*states = ATTR_STATE_UNSUPPORTED;
2044
} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2045
if (gc_ver == IP_VERSION(9, 4, 2) ||
2046
gc_ver == IP_VERSION(9, 4, 3) ||
2047
gc_ver == IP_VERSION(9, 4, 4) ||
2048
gc_ver == IP_VERSION(9, 5, 0))
2049
*states = ATTR_STATE_UNSUPPORTED;
2050
}
2051
2052
switch (gc_ver) {
2053
case IP_VERSION(9, 4, 1):
2054
case IP_VERSION(9, 4, 2):
2055
/* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2056
if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2057
DEVICE_ATTR_IS(pp_dpm_socclk) ||
2058
DEVICE_ATTR_IS(pp_dpm_fclk)) {
2059
dev_attr->attr.mode &= ~S_IWUGO;
2060
dev_attr->store = NULL;
2061
}
2062
break;
2063
default:
2064
break;
2065
}
2066
2067
/* setting should not be allowed from VF if not in one VF mode */
2068
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) {
2069
dev_attr->attr.mode &= ~S_IWUGO;
2070
dev_attr->store = NULL;
2071
}
2072
2073
return 0;
2074
}
2075
2076
/* pm policy attributes */
2077
struct amdgpu_pm_policy_attr {
2078
struct device_attribute dev_attr;
2079
enum pp_pm_policy id;
2080
};
2081
2082
/**
2083
* DOC: pm_policy
2084
*
2085
* Certain SOCs can support different power policies to optimize application
2086
* performance. However, this policy is provided only at SOC level and not at a
2087
* per-process level. This is useful especially when entire SOC is utilized for
2088
* dedicated workload.
2089
*
2090
* The amdgpu driver provides a sysfs API for selecting the policy. Presently,
2091
* only two types of policies are supported through this interface.
2092
*
2093
* Pstate Policy Selection - This is to select different Pstate profiles which
2094
* decides clock/throttling preferences.
2095
*
2096
* XGMI PLPD Policy Selection - When multiple devices are connected over XGMI,
2097
* this helps to select policy to be applied for per link power down.
2098
*
2099
* The list of available policies and policy levels vary between SOCs. They can
2100
* be viewed under pm_policy node directory. If SOC doesn't support any policy,
2101
* this node won't be available. The different policies supported will be
2102
* available as separate nodes under pm_policy.
2103
*
2104
* cat /sys/bus/pci/devices/.../pm_policy/<policy_type>
2105
*
2106
* Reading the policy file shows the different levels supported. The level which
2107
* is applied presently is denoted by * (asterisk). E.g.,
2108
*
2109
* .. code-block:: console
2110
*
2111
* cat /sys/bus/pci/devices/.../pm_policy/soc_pstate
2112
* 0 : soc_pstate_default
2113
* 1 : soc_pstate_0
2114
* 2 : soc_pstate_1*
2115
* 3 : soc_pstate_2
2116
*
2117
* cat /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2118
* 0 : plpd_disallow
2119
* 1 : plpd_default
2120
* 2 : plpd_optimized*
2121
*
2122
* To apply a specific policy
2123
*
2124
* "echo <level> > /sys/bus/pci/devices/.../pm_policy/<policy_type>"
2125
*
2126
* For the levels listed in the example above, to select "plpd_optimized" for
2127
* XGMI and "soc_pstate_2" for soc pstate policy -
2128
*
2129
* .. code-block:: console
2130
*
2131
* echo "2" > /sys/bus/pci/devices/.../pm_policy/xgmi_plpd
2132
* echo "3" > /sys/bus/pci/devices/.../pm_policy/soc_pstate
2133
*
2134
*/
2135
static ssize_t amdgpu_get_pm_policy_attr(struct device *dev,
2136
struct device_attribute *attr,
2137
char *buf)
2138
{
2139
struct drm_device *ddev = dev_get_drvdata(dev);
2140
struct amdgpu_device *adev = drm_to_adev(ddev);
2141
struct amdgpu_pm_policy_attr *policy_attr;
2142
2143
policy_attr =
2144
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2145
2146
return amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, buf);
2147
}
2148
2149
static ssize_t amdgpu_set_pm_policy_attr(struct device *dev,
2150
struct device_attribute *attr,
2151
const char *buf, size_t count)
2152
{
2153
struct drm_device *ddev = dev_get_drvdata(dev);
2154
struct amdgpu_device *adev = drm_to_adev(ddev);
2155
struct amdgpu_pm_policy_attr *policy_attr;
2156
int ret, num_params = 0;
2157
char delimiter[] = " \n\t";
2158
char tmp_buf[128];
2159
char *tmp, *param;
2160
long val;
2161
2162
count = min(count, sizeof(tmp_buf));
2163
memcpy(tmp_buf, buf, count);
2164
tmp_buf[count - 1] = '\0';
2165
tmp = tmp_buf;
2166
2167
tmp = skip_spaces(tmp);
2168
while ((param = strsep(&tmp, delimiter))) {
2169
if (!strlen(param)) {
2170
tmp = skip_spaces(tmp);
2171
continue;
2172
}
2173
ret = kstrtol(param, 0, &val);
2174
if (ret)
2175
return -EINVAL;
2176
num_params++;
2177
if (num_params > 1)
2178
return -EINVAL;
2179
}
2180
2181
if (num_params != 1)
2182
return -EINVAL;
2183
2184
policy_attr =
2185
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr);
2186
2187
ret = amdgpu_pm_get_access(adev);
2188
if (ret < 0)
2189
return ret;
2190
2191
ret = amdgpu_dpm_set_pm_policy(adev, policy_attr->id, val);
2192
2193
amdgpu_pm_put_access(adev);
2194
2195
if (ret)
2196
return ret;
2197
2198
return count;
2199
}
2200
2201
#define AMDGPU_PM_POLICY_ATTR(_name, _id) \
2202
static struct amdgpu_pm_policy_attr pm_policy_attr_##_name = { \
2203
.dev_attr = __ATTR(_name, 0644, amdgpu_get_pm_policy_attr, \
2204
amdgpu_set_pm_policy_attr), \
2205
.id = PP_PM_POLICY_##_id, \
2206
};
2207
2208
#define AMDGPU_PM_POLICY_ATTR_VAR(_name) pm_policy_attr_##_name.dev_attr.attr
2209
2210
AMDGPU_PM_POLICY_ATTR(soc_pstate, SOC_PSTATE)
2211
AMDGPU_PM_POLICY_ATTR(xgmi_plpd, XGMI_PLPD)
2212
2213
static struct attribute *pm_policy_attrs[] = {
2214
&AMDGPU_PM_POLICY_ATTR_VAR(soc_pstate),
2215
&AMDGPU_PM_POLICY_ATTR_VAR(xgmi_plpd),
2216
NULL
2217
};
2218
2219
static umode_t amdgpu_pm_policy_attr_visible(struct kobject *kobj,
2220
struct attribute *attr, int n)
2221
{
2222
struct device *dev = kobj_to_dev(kobj);
2223
struct drm_device *ddev = dev_get_drvdata(dev);
2224
struct amdgpu_device *adev = drm_to_adev(ddev);
2225
struct amdgpu_pm_policy_attr *policy_attr;
2226
2227
policy_attr =
2228
container_of(attr, struct amdgpu_pm_policy_attr, dev_attr.attr);
2229
2230
if (amdgpu_dpm_get_pm_policy_info(adev, policy_attr->id, NULL) ==
2231
-ENOENT)
2232
return 0;
2233
2234
return attr->mode;
2235
}
2236
2237
const struct attribute_group amdgpu_pm_policy_attr_group = {
2238
.name = "pm_policy",
2239
.attrs = pm_policy_attrs,
2240
.is_visible = amdgpu_pm_policy_attr_visible,
2241
};
2242
2243
static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2244
AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2245
AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2246
AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2247
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2248
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2249
AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2250
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2251
.attr_update = pp_dpm_clk_default_attr_update),
2252
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2253
.attr_update = pp_dpm_clk_default_attr_update),
2254
AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2255
.attr_update = pp_dpm_clk_default_attr_update),
2256
AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2257
.attr_update = pp_dpm_clk_default_attr_update),
2258
AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2259
.attr_update = pp_dpm_clk_default_attr_update),
2260
AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2261
.attr_update = pp_dpm_clk_default_attr_update),
2262
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2263
.attr_update = pp_dpm_clk_default_attr_update),
2264
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2265
.attr_update = pp_dpm_clk_default_attr_update),
2266
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2267
.attr_update = pp_dpm_dcefclk_attr_update),
2268
AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
2269
.attr_update = pp_dpm_clk_default_attr_update),
2270
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2271
AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2272
AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2273
AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC,
2274
.attr_update = pp_od_clk_voltage_attr_update),
2275
AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2276
AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2277
AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2278
AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2279
AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2280
AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2281
AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2282
AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2283
AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2284
AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2285
.attr_update = ss_power_attr_update),
2286
AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2287
.attr_update = ss_power_attr_update),
2288
AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2289
.attr_update = ss_bias_attr_update),
2290
AMDGPU_DEVICE_ATTR_RO(pm_metrics, ATTR_FLAG_BASIC,
2291
.attr_update = amdgpu_pm_metrics_attr_update),
2292
};
2293
2294
static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2295
uint32_t mask, enum amdgpu_device_attr_states *states)
2296
{
2297
struct device_attribute *dev_attr = &attr->dev_attr;
2298
enum amdgpu_device_attr_id attr_id = attr->attr_id;
2299
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
2300
2301
if (!(attr->flags & mask)) {
2302
*states = ATTR_STATE_UNSUPPORTED;
2303
return 0;
2304
}
2305
2306
if (DEVICE_ATTR_IS(mem_busy_percent)) {
2307
if ((adev->flags & AMD_IS_APU &&
2308
gc_ver != IP_VERSION(9, 4, 3)) ||
2309
gc_ver == IP_VERSION(9, 0, 1))
2310
*states = ATTR_STATE_UNSUPPORTED;
2311
} else if (DEVICE_ATTR_IS(vcn_busy_percent)) {
2312
if (!(gc_ver == IP_VERSION(9, 3, 0) ||
2313
gc_ver == IP_VERSION(10, 3, 1) ||
2314
gc_ver == IP_VERSION(10, 3, 3) ||
2315
gc_ver == IP_VERSION(10, 3, 6) ||
2316
gc_ver == IP_VERSION(10, 3, 7) ||
2317
gc_ver == IP_VERSION(11, 0, 0) ||
2318
gc_ver == IP_VERSION(11, 0, 1) ||
2319
gc_ver == IP_VERSION(11, 0, 2) ||
2320
gc_ver == IP_VERSION(11, 0, 3) ||
2321
gc_ver == IP_VERSION(11, 0, 4) ||
2322
gc_ver == IP_VERSION(11, 5, 0) ||
2323
gc_ver == IP_VERSION(11, 5, 1) ||
2324
gc_ver == IP_VERSION(11, 5, 2) ||
2325
gc_ver == IP_VERSION(11, 5, 3) ||
2326
gc_ver == IP_VERSION(12, 0, 0) ||
2327
gc_ver == IP_VERSION(12, 0, 1)))
2328
*states = ATTR_STATE_UNSUPPORTED;
2329
} else if (DEVICE_ATTR_IS(pcie_bw)) {
2330
/* PCIe Perf counters won't work on APU nodes */
2331
if (adev->flags & AMD_IS_APU ||
2332
!adev->asic_funcs->get_pcie_usage)
2333
*states = ATTR_STATE_UNSUPPORTED;
2334
} else if (DEVICE_ATTR_IS(unique_id)) {
2335
switch (gc_ver) {
2336
case IP_VERSION(9, 0, 1):
2337
case IP_VERSION(9, 4, 0):
2338
case IP_VERSION(9, 4, 1):
2339
case IP_VERSION(9, 4, 2):
2340
case IP_VERSION(9, 4, 3):
2341
case IP_VERSION(9, 4, 4):
2342
case IP_VERSION(9, 5, 0):
2343
case IP_VERSION(10, 3, 0):
2344
case IP_VERSION(11, 0, 0):
2345
case IP_VERSION(11, 0, 1):
2346
case IP_VERSION(11, 0, 2):
2347
case IP_VERSION(11, 0, 3):
2348
case IP_VERSION(12, 0, 0):
2349
case IP_VERSION(12, 0, 1):
2350
*states = ATTR_STATE_SUPPORTED;
2351
break;
2352
default:
2353
*states = ATTR_STATE_UNSUPPORTED;
2354
}
2355
} else if (DEVICE_ATTR_IS(pp_features)) {
2356
if ((adev->flags & AMD_IS_APU &&
2357
gc_ver != IP_VERSION(9, 4, 3)) ||
2358
gc_ver < IP_VERSION(9, 0, 0))
2359
*states = ATTR_STATE_UNSUPPORTED;
2360
} else if (DEVICE_ATTR_IS(gpu_metrics)) {
2361
if (gc_ver < IP_VERSION(9, 1, 0))
2362
*states = ATTR_STATE_UNSUPPORTED;
2363
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2364
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2365
*states = ATTR_STATE_UNSUPPORTED;
2366
else if ((gc_ver == IP_VERSION(10, 3, 0) ||
2367
gc_ver == IP_VERSION(11, 0, 3)) && amdgpu_sriov_vf(adev))
2368
*states = ATTR_STATE_UNSUPPORTED;
2369
} else if (DEVICE_ATTR_IS(pp_mclk_od)) {
2370
if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
2371
*states = ATTR_STATE_UNSUPPORTED;
2372
} else if (DEVICE_ATTR_IS(pp_sclk_od)) {
2373
if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
2374
*states = ATTR_STATE_UNSUPPORTED;
2375
} else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
2376
u32 limit;
2377
2378
if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
2379
-EOPNOTSUPP)
2380
*states = ATTR_STATE_UNSUPPORTED;
2381
}
2382
2383
switch (gc_ver) {
2384
case IP_VERSION(10, 3, 0):
2385
if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2386
amdgpu_sriov_vf(adev)) {
2387
dev_attr->attr.mode &= ~0222;
2388
dev_attr->store = NULL;
2389
}
2390
break;
2391
default:
2392
break;
2393
}
2394
2395
return 0;
2396
}
2397
2398
2399
static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2400
struct amdgpu_device_attr *attr,
2401
uint32_t mask, struct list_head *attr_list)
2402
{
2403
int ret = 0;
2404
enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2405
struct amdgpu_device_attr_entry *attr_entry;
2406
struct device_attribute *dev_attr;
2407
const char *name;
2408
2409
int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2410
uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2411
2412
if (!attr)
2413
return -EINVAL;
2414
2415
dev_attr = &attr->dev_attr;
2416
name = dev_attr->attr.name;
2417
2418
attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2419
2420
ret = attr_update(adev, attr, mask, &attr_states);
2421
if (ret) {
2422
dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2423
name, ret);
2424
return ret;
2425
}
2426
2427
if (attr_states == ATTR_STATE_UNSUPPORTED)
2428
return 0;
2429
2430
ret = device_create_file(adev->dev, dev_attr);
2431
if (ret) {
2432
dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2433
name, ret);
2434
}
2435
2436
attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2437
if (!attr_entry)
2438
return -ENOMEM;
2439
2440
attr_entry->attr = attr;
2441
INIT_LIST_HEAD(&attr_entry->entry);
2442
2443
list_add_tail(&attr_entry->entry, attr_list);
2444
2445
return ret;
2446
}
2447
2448
static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2449
{
2450
struct device_attribute *dev_attr = &attr->dev_attr;
2451
2452
device_remove_file(adev->dev, dev_attr);
2453
}
2454
2455
static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2456
struct list_head *attr_list);
2457
2458
static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2459
struct amdgpu_device_attr *attrs,
2460
uint32_t counts,
2461
uint32_t mask,
2462
struct list_head *attr_list)
2463
{
2464
int ret = 0;
2465
uint32_t i = 0;
2466
2467
for (i = 0; i < counts; i++) {
2468
ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2469
if (ret)
2470
goto failed;
2471
}
2472
2473
return 0;
2474
2475
failed:
2476
amdgpu_device_attr_remove_groups(adev, attr_list);
2477
2478
return ret;
2479
}
2480
2481
static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2482
struct list_head *attr_list)
2483
{
2484
struct amdgpu_device_attr_entry *entry, *entry_tmp;
2485
2486
if (list_empty(attr_list))
2487
return ;
2488
2489
list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2490
amdgpu_device_attr_remove(adev, entry->attr);
2491
list_del(&entry->entry);
2492
kfree(entry);
2493
}
2494
}
2495
2496
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2497
struct device_attribute *attr,
2498
char *buf)
2499
{
2500
struct amdgpu_device *adev = dev_get_drvdata(dev);
2501
int channel = to_sensor_dev_attr(attr)->index;
2502
int r, temp = 0;
2503
2504
if (channel >= PP_TEMP_MAX)
2505
return -EINVAL;
2506
2507
switch (channel) {
2508
case PP_TEMP_JUNCTION:
2509
/* get current junction temperature */
2510
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2511
(void *)&temp);
2512
break;
2513
case PP_TEMP_EDGE:
2514
/* get current edge temperature */
2515
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2516
(void *)&temp);
2517
break;
2518
case PP_TEMP_MEM:
2519
/* get current memory temperature */
2520
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2521
(void *)&temp);
2522
break;
2523
default:
2524
r = -EINVAL;
2525
break;
2526
}
2527
2528
if (r)
2529
return r;
2530
2531
return sysfs_emit(buf, "%d\n", temp);
2532
}
2533
2534
static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2535
struct device_attribute *attr,
2536
char *buf)
2537
{
2538
struct amdgpu_device *adev = dev_get_drvdata(dev);
2539
int hyst = to_sensor_dev_attr(attr)->index;
2540
int temp;
2541
2542
if (hyst)
2543
temp = adev->pm.dpm.thermal.min_temp;
2544
else
2545
temp = adev->pm.dpm.thermal.max_temp;
2546
2547
return sysfs_emit(buf, "%d\n", temp);
2548
}
2549
2550
static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2551
struct device_attribute *attr,
2552
char *buf)
2553
{
2554
struct amdgpu_device *adev = dev_get_drvdata(dev);
2555
int hyst = to_sensor_dev_attr(attr)->index;
2556
int temp;
2557
2558
if (hyst)
2559
temp = adev->pm.dpm.thermal.min_hotspot_temp;
2560
else
2561
temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2562
2563
return sysfs_emit(buf, "%d\n", temp);
2564
}
2565
2566
static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2567
struct device_attribute *attr,
2568
char *buf)
2569
{
2570
struct amdgpu_device *adev = dev_get_drvdata(dev);
2571
int hyst = to_sensor_dev_attr(attr)->index;
2572
int temp;
2573
2574
if (hyst)
2575
temp = adev->pm.dpm.thermal.min_mem_temp;
2576
else
2577
temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2578
2579
return sysfs_emit(buf, "%d\n", temp);
2580
}
2581
2582
static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2583
struct device_attribute *attr,
2584
char *buf)
2585
{
2586
int channel = to_sensor_dev_attr(attr)->index;
2587
2588
if (channel >= PP_TEMP_MAX)
2589
return -EINVAL;
2590
2591
return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2592
}
2593
2594
static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2595
struct device_attribute *attr,
2596
char *buf)
2597
{
2598
struct amdgpu_device *adev = dev_get_drvdata(dev);
2599
int channel = to_sensor_dev_attr(attr)->index;
2600
int temp = 0;
2601
2602
if (channel >= PP_TEMP_MAX)
2603
return -EINVAL;
2604
2605
switch (channel) {
2606
case PP_TEMP_JUNCTION:
2607
temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2608
break;
2609
case PP_TEMP_EDGE:
2610
temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2611
break;
2612
case PP_TEMP_MEM:
2613
temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2614
break;
2615
}
2616
2617
return sysfs_emit(buf, "%d\n", temp);
2618
}
2619
2620
static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2621
struct device_attribute *attr,
2622
char *buf)
2623
{
2624
struct amdgpu_device *adev = dev_get_drvdata(dev);
2625
u32 pwm_mode = 0;
2626
int ret;
2627
2628
ret = amdgpu_pm_get_access_if_active(adev);
2629
if (ret)
2630
return ret;
2631
2632
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2633
2634
amdgpu_pm_put_access(adev);
2635
2636
if (ret)
2637
return -EINVAL;
2638
2639
return sysfs_emit(buf, "%u\n", pwm_mode);
2640
}
2641
2642
static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2643
struct device_attribute *attr,
2644
const char *buf,
2645
size_t count)
2646
{
2647
struct amdgpu_device *adev = dev_get_drvdata(dev);
2648
int err, ret;
2649
u32 pwm_mode;
2650
int value;
2651
2652
err = kstrtoint(buf, 10, &value);
2653
if (err)
2654
return err;
2655
2656
if (value == 0)
2657
pwm_mode = AMD_FAN_CTRL_NONE;
2658
else if (value == 1)
2659
pwm_mode = AMD_FAN_CTRL_MANUAL;
2660
else if (value == 2)
2661
pwm_mode = AMD_FAN_CTRL_AUTO;
2662
else
2663
return -EINVAL;
2664
2665
ret = amdgpu_pm_get_access(adev);
2666
if (ret < 0)
2667
return ret;
2668
2669
ret = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2670
2671
amdgpu_pm_put_access(adev);
2672
2673
if (ret)
2674
return -EINVAL;
2675
2676
return count;
2677
}
2678
2679
static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2680
struct device_attribute *attr,
2681
char *buf)
2682
{
2683
return sysfs_emit(buf, "%i\n", 0);
2684
}
2685
2686
static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2687
struct device_attribute *attr,
2688
char *buf)
2689
{
2690
return sysfs_emit(buf, "%i\n", 255);
2691
}
2692
2693
static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2694
struct device_attribute *attr,
2695
const char *buf, size_t count)
2696
{
2697
struct amdgpu_device *adev = dev_get_drvdata(dev);
2698
int err;
2699
u32 value;
2700
u32 pwm_mode;
2701
2702
err = kstrtou32(buf, 10, &value);
2703
if (err)
2704
return err;
2705
2706
err = amdgpu_pm_get_access(adev);
2707
if (err < 0)
2708
return err;
2709
2710
err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2711
if (err)
2712
goto out;
2713
2714
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2715
pr_info("manual fan speed control should be enabled first\n");
2716
err = -EINVAL;
2717
goto out;
2718
}
2719
2720
err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2721
2722
out:
2723
amdgpu_pm_put_access(adev);
2724
2725
if (err)
2726
return err;
2727
2728
return count;
2729
}
2730
2731
static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2732
struct device_attribute *attr,
2733
char *buf)
2734
{
2735
struct amdgpu_device *adev = dev_get_drvdata(dev);
2736
int err;
2737
u32 speed = 0;
2738
2739
err = amdgpu_pm_get_access_if_active(adev);
2740
if (err)
2741
return err;
2742
2743
err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2744
2745
amdgpu_pm_put_access(adev);
2746
2747
if (err)
2748
return err;
2749
2750
return sysfs_emit(buf, "%i\n", speed);
2751
}
2752
2753
static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2754
struct device_attribute *attr,
2755
char *buf)
2756
{
2757
struct amdgpu_device *adev = dev_get_drvdata(dev);
2758
int err;
2759
u32 speed = 0;
2760
2761
err = amdgpu_pm_get_access_if_active(adev);
2762
if (err)
2763
return err;
2764
2765
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2766
2767
amdgpu_pm_put_access(adev);
2768
2769
if (err)
2770
return err;
2771
2772
return sysfs_emit(buf, "%i\n", speed);
2773
}
2774
2775
static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2776
struct device_attribute *attr,
2777
char *buf)
2778
{
2779
struct amdgpu_device *adev = dev_get_drvdata(dev);
2780
u32 min_rpm = 0;
2781
int r;
2782
2783
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2784
(void *)&min_rpm);
2785
2786
if (r)
2787
return r;
2788
2789
return sysfs_emit(buf, "%d\n", min_rpm);
2790
}
2791
2792
static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2793
struct device_attribute *attr,
2794
char *buf)
2795
{
2796
struct amdgpu_device *adev = dev_get_drvdata(dev);
2797
u32 max_rpm = 0;
2798
int r;
2799
2800
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2801
(void *)&max_rpm);
2802
2803
if (r)
2804
return r;
2805
2806
return sysfs_emit(buf, "%d\n", max_rpm);
2807
}
2808
2809
static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2810
struct device_attribute *attr,
2811
char *buf)
2812
{
2813
struct amdgpu_device *adev = dev_get_drvdata(dev);
2814
int err;
2815
u32 rpm = 0;
2816
2817
err = amdgpu_pm_get_access_if_active(adev);
2818
if (err)
2819
return err;
2820
2821
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2822
2823
amdgpu_pm_put_access(adev);
2824
2825
if (err)
2826
return err;
2827
2828
return sysfs_emit(buf, "%i\n", rpm);
2829
}
2830
2831
static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2832
struct device_attribute *attr,
2833
const char *buf, size_t count)
2834
{
2835
struct amdgpu_device *adev = dev_get_drvdata(dev);
2836
int err;
2837
u32 value;
2838
u32 pwm_mode;
2839
2840
err = kstrtou32(buf, 10, &value);
2841
if (err)
2842
return err;
2843
2844
err = amdgpu_pm_get_access(adev);
2845
if (err < 0)
2846
return err;
2847
2848
err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2849
if (err)
2850
goto out;
2851
2852
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2853
err = -ENODATA;
2854
goto out;
2855
}
2856
2857
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2858
2859
out:
2860
amdgpu_pm_put_access(adev);
2861
2862
if (err)
2863
return err;
2864
2865
return count;
2866
}
2867
2868
static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2869
struct device_attribute *attr,
2870
char *buf)
2871
{
2872
struct amdgpu_device *adev = dev_get_drvdata(dev);
2873
u32 pwm_mode = 0;
2874
int ret;
2875
2876
ret = amdgpu_pm_get_access_if_active(adev);
2877
if (ret)
2878
return ret;
2879
2880
ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2881
2882
amdgpu_pm_put_access(adev);
2883
2884
if (ret)
2885
return -EINVAL;
2886
2887
return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2888
}
2889
2890
static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2891
struct device_attribute *attr,
2892
const char *buf,
2893
size_t count)
2894
{
2895
struct amdgpu_device *adev = dev_get_drvdata(dev);
2896
int err;
2897
int value;
2898
u32 pwm_mode;
2899
2900
err = kstrtoint(buf, 10, &value);
2901
if (err)
2902
return err;
2903
2904
if (value == 0)
2905
pwm_mode = AMD_FAN_CTRL_AUTO;
2906
else if (value == 1)
2907
pwm_mode = AMD_FAN_CTRL_MANUAL;
2908
else
2909
return -EINVAL;
2910
2911
err = amdgpu_pm_get_access(adev);
2912
if (err < 0)
2913
return err;
2914
2915
err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2916
2917
amdgpu_pm_put_access(adev);
2918
2919
if (err)
2920
return -EINVAL;
2921
2922
return count;
2923
}
2924
2925
static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2926
struct device_attribute *attr,
2927
char *buf)
2928
{
2929
struct amdgpu_device *adev = dev_get_drvdata(dev);
2930
u32 vddgfx;
2931
int r;
2932
2933
/* get the voltage */
2934
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
2935
(void *)&vddgfx);
2936
if (r)
2937
return r;
2938
2939
return sysfs_emit(buf, "%d\n", vddgfx);
2940
}
2941
2942
static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
2943
struct device_attribute *attr,
2944
char *buf)
2945
{
2946
struct amdgpu_device *adev = dev_get_drvdata(dev);
2947
u32 vddboard;
2948
int r;
2949
2950
/* get the voltage */
2951
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
2952
(void *)&vddboard);
2953
if (r)
2954
return r;
2955
2956
return sysfs_emit(buf, "%d\n", vddboard);
2957
}
2958
2959
static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2960
struct device_attribute *attr,
2961
char *buf)
2962
{
2963
return sysfs_emit(buf, "vddgfx\n");
2964
}
2965
2966
static ssize_t amdgpu_hwmon_show_vddboard_label(struct device *dev,
2967
struct device_attribute *attr,
2968
char *buf)
2969
{
2970
return sysfs_emit(buf, "vddboard\n");
2971
}
2972
static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2973
struct device_attribute *attr,
2974
char *buf)
2975
{
2976
struct amdgpu_device *adev = dev_get_drvdata(dev);
2977
u32 vddnb;
2978
int r;
2979
2980
/* only APUs have vddnb */
2981
if (!(adev->flags & AMD_IS_APU))
2982
return -EINVAL;
2983
2984
/* get the voltage */
2985
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
2986
(void *)&vddnb);
2987
if (r)
2988
return r;
2989
2990
return sysfs_emit(buf, "%d\n", vddnb);
2991
}
2992
2993
static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2994
struct device_attribute *attr,
2995
char *buf)
2996
{
2997
return sysfs_emit(buf, "vddnb\n");
2998
}
2999
3000
static int amdgpu_hwmon_get_power(struct device *dev,
3001
enum amd_pp_sensors sensor)
3002
{
3003
struct amdgpu_device *adev = dev_get_drvdata(dev);
3004
unsigned int uw;
3005
u32 query = 0;
3006
int r;
3007
3008
r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
3009
if (r)
3010
return r;
3011
3012
/* convert to microwatts */
3013
uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
3014
3015
return uw;
3016
}
3017
3018
static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
3019
struct device_attribute *attr,
3020
char *buf)
3021
{
3022
ssize_t val;
3023
3024
val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_AVG_POWER);
3025
if (val < 0)
3026
return val;
3027
3028
return sysfs_emit(buf, "%zd\n", val);
3029
}
3030
3031
static ssize_t amdgpu_hwmon_show_power_input(struct device *dev,
3032
struct device_attribute *attr,
3033
char *buf)
3034
{
3035
ssize_t val;
3036
3037
val = amdgpu_hwmon_get_power(dev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER);
3038
if (val < 0)
3039
return val;
3040
3041
return sysfs_emit(buf, "%zd\n", val);
3042
}
3043
3044
static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
3045
struct device_attribute *attr,
3046
char *buf,
3047
enum pp_power_limit_level pp_limit_level)
3048
{
3049
struct amdgpu_device *adev = dev_get_drvdata(dev);
3050
enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
3051
uint32_t limit;
3052
ssize_t size;
3053
int r;
3054
3055
r = amdgpu_pm_get_access_if_active(adev);
3056
if (r)
3057
return r;
3058
3059
r = amdgpu_dpm_get_power_limit(adev, &limit,
3060
pp_limit_level, power_type);
3061
3062
if (!r)
3063
size = sysfs_emit(buf, "%u\n", limit * 1000000);
3064
else
3065
size = sysfs_emit(buf, "\n");
3066
3067
amdgpu_pm_put_access(adev);
3068
3069
return size;
3070
}
3071
3072
static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
3073
struct device_attribute *attr,
3074
char *buf)
3075
{
3076
return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MIN);
3077
}
3078
3079
static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
3080
struct device_attribute *attr,
3081
char *buf)
3082
{
3083
return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
3084
3085
}
3086
3087
static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3088
struct device_attribute *attr,
3089
char *buf)
3090
{
3091
return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
3092
3093
}
3094
3095
static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3096
struct device_attribute *attr,
3097
char *buf)
3098
{
3099
return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
3100
3101
}
3102
3103
static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3104
struct device_attribute *attr,
3105
char *buf)
3106
{
3107
struct amdgpu_device *adev = dev_get_drvdata(dev);
3108
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3109
3110
if (gc_ver == IP_VERSION(10, 3, 1))
3111
return sysfs_emit(buf, "%s\n",
3112
to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3113
"fastPPT" : "slowPPT");
3114
else
3115
return sysfs_emit(buf, "PPT\n");
3116
}
3117
3118
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3119
struct device_attribute *attr,
3120
const char *buf,
3121
size_t count)
3122
{
3123
struct amdgpu_device *adev = dev_get_drvdata(dev);
3124
int limit_type = to_sensor_dev_attr(attr)->index;
3125
int err;
3126
u32 value;
3127
3128
if (amdgpu_sriov_vf(adev))
3129
return -EINVAL;
3130
3131
err = kstrtou32(buf, 10, &value);
3132
if (err)
3133
return err;
3134
3135
value = value / 1000000; /* convert to Watt */
3136
value |= limit_type << 24;
3137
3138
err = amdgpu_pm_get_access(adev);
3139
if (err < 0)
3140
return err;
3141
3142
err = amdgpu_dpm_set_power_limit(adev, value);
3143
3144
amdgpu_pm_put_access(adev);
3145
3146
if (err)
3147
return err;
3148
3149
return count;
3150
}
3151
3152
static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3153
struct device_attribute *attr,
3154
char *buf)
3155
{
3156
struct amdgpu_device *adev = dev_get_drvdata(dev);
3157
uint32_t sclk;
3158
int r;
3159
3160
/* get the sclk */
3161
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3162
(void *)&sclk);
3163
if (r)
3164
return r;
3165
3166
return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3167
}
3168
3169
static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3170
struct device_attribute *attr,
3171
char *buf)
3172
{
3173
return sysfs_emit(buf, "sclk\n");
3174
}
3175
3176
static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3177
struct device_attribute *attr,
3178
char *buf)
3179
{
3180
struct amdgpu_device *adev = dev_get_drvdata(dev);
3181
uint32_t mclk;
3182
int r;
3183
3184
/* get the sclk */
3185
r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3186
(void *)&mclk);
3187
if (r)
3188
return r;
3189
3190
return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3191
}
3192
3193
static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3194
struct device_attribute *attr,
3195
char *buf)
3196
{
3197
return sysfs_emit(buf, "mclk\n");
3198
}
3199
3200
/**
3201
* DOC: hwmon
3202
*
3203
* The amdgpu driver exposes the following sensor interfaces:
3204
*
3205
* - GPU temperature (via the on-die sensor)
3206
*
3207
* - GPU voltage
3208
*
3209
* - Northbridge voltage (APUs only)
3210
*
3211
* - GPU power
3212
*
3213
* - GPU fan
3214
*
3215
* - GPU gfx/compute engine clock
3216
*
3217
* - GPU memory clock (dGPU only)
3218
*
3219
* hwmon interfaces for GPU temperature:
3220
*
3221
* - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3222
* - temp2_input and temp3_input are supported on SOC15 dGPUs only
3223
*
3224
* - temp[1-3]_label: temperature channel label
3225
* - temp2_label and temp3_label are supported on SOC15 dGPUs only
3226
*
3227
* - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3228
* - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3229
*
3230
* - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3231
* - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3232
*
3233
* - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3234
* - these are supported on SOC15 dGPUs only
3235
*
3236
* hwmon interfaces for GPU voltage:
3237
*
3238
* - in0_input: the voltage on the GPU in millivolts
3239
*
3240
* - in1_input: the voltage on the Northbridge in millivolts
3241
*
3242
* hwmon interfaces for GPU power:
3243
*
3244
* - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
3245
*
3246
* - power1_input: instantaneous power used by the SoC in microWatts. On APUs this includes the CPU.
3247
*
3248
* - power1_cap_min: minimum cap supported in microWatts
3249
*
3250
* - power1_cap_max: maximum cap supported in microWatts
3251
*
3252
* - power1_cap: selected power cap in microWatts
3253
*
3254
* hwmon interfaces for GPU fan:
3255
*
3256
* - pwm1: pulse width modulation fan level (0-255)
3257
*
3258
* - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3259
*
3260
* - pwm1_min: pulse width modulation fan control minimum level (0)
3261
*
3262
* - pwm1_max: pulse width modulation fan control maximum level (255)
3263
*
3264
* - fan1_min: a minimum value Unit: revolution/min (RPM)
3265
*
3266
* - fan1_max: a maximum value Unit: revolution/max (RPM)
3267
*
3268
* - fan1_input: fan speed in RPM
3269
*
3270
* - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3271
*
3272
* - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3273
*
3274
* NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3275
* That will get the former one overridden.
3276
*
3277
* hwmon interfaces for GPU clocks:
3278
*
3279
* - freq1_input: the gfx/compute clock in hertz
3280
*
3281
* - freq2_input: the memory clock in hertz
3282
*
3283
* You can use hwmon tools like sensors to view this information on your system.
3284
*
3285
*/
3286
3287
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3288
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3289
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3290
static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3291
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3292
static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3293
static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3294
static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3295
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3296
static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3297
static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3298
static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3299
static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3300
static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3301
static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3302
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3303
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3304
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3305
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3306
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3307
static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3308
static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3309
static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3310
static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3311
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3312
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3313
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3314
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3315
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, amdgpu_hwmon_show_vddboard, NULL, 0);
3316
static SENSOR_DEVICE_ATTR(in2_label, S_IRUGO, amdgpu_hwmon_show_vddboard_label, NULL, 0);
3317
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3318
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, amdgpu_hwmon_show_power_input, NULL, 0);
3319
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3320
static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3321
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3322
static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3323
static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3324
static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3325
static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3326
static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3327
static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3328
static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3329
static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3330
static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3331
static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3332
static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3333
static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3334
3335
static struct attribute *hwmon_attributes[] = {
3336
&sensor_dev_attr_temp1_input.dev_attr.attr,
3337
&sensor_dev_attr_temp1_crit.dev_attr.attr,
3338
&sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3339
&sensor_dev_attr_temp2_input.dev_attr.attr,
3340
&sensor_dev_attr_temp2_crit.dev_attr.attr,
3341
&sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3342
&sensor_dev_attr_temp3_input.dev_attr.attr,
3343
&sensor_dev_attr_temp3_crit.dev_attr.attr,
3344
&sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3345
&sensor_dev_attr_temp1_emergency.dev_attr.attr,
3346
&sensor_dev_attr_temp2_emergency.dev_attr.attr,
3347
&sensor_dev_attr_temp3_emergency.dev_attr.attr,
3348
&sensor_dev_attr_temp1_label.dev_attr.attr,
3349
&sensor_dev_attr_temp2_label.dev_attr.attr,
3350
&sensor_dev_attr_temp3_label.dev_attr.attr,
3351
&sensor_dev_attr_pwm1.dev_attr.attr,
3352
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
3353
&sensor_dev_attr_pwm1_min.dev_attr.attr,
3354
&sensor_dev_attr_pwm1_max.dev_attr.attr,
3355
&sensor_dev_attr_fan1_input.dev_attr.attr,
3356
&sensor_dev_attr_fan1_min.dev_attr.attr,
3357
&sensor_dev_attr_fan1_max.dev_attr.attr,
3358
&sensor_dev_attr_fan1_target.dev_attr.attr,
3359
&sensor_dev_attr_fan1_enable.dev_attr.attr,
3360
&sensor_dev_attr_in0_input.dev_attr.attr,
3361
&sensor_dev_attr_in0_label.dev_attr.attr,
3362
&sensor_dev_attr_in1_input.dev_attr.attr,
3363
&sensor_dev_attr_in1_label.dev_attr.attr,
3364
&sensor_dev_attr_in2_input.dev_attr.attr,
3365
&sensor_dev_attr_in2_label.dev_attr.attr,
3366
&sensor_dev_attr_power1_average.dev_attr.attr,
3367
&sensor_dev_attr_power1_input.dev_attr.attr,
3368
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
3369
&sensor_dev_attr_power1_cap_min.dev_attr.attr,
3370
&sensor_dev_attr_power1_cap.dev_attr.attr,
3371
&sensor_dev_attr_power1_cap_default.dev_attr.attr,
3372
&sensor_dev_attr_power1_label.dev_attr.attr,
3373
&sensor_dev_attr_power2_average.dev_attr.attr,
3374
&sensor_dev_attr_power2_cap_max.dev_attr.attr,
3375
&sensor_dev_attr_power2_cap_min.dev_attr.attr,
3376
&sensor_dev_attr_power2_cap.dev_attr.attr,
3377
&sensor_dev_attr_power2_cap_default.dev_attr.attr,
3378
&sensor_dev_attr_power2_label.dev_attr.attr,
3379
&sensor_dev_attr_freq1_input.dev_attr.attr,
3380
&sensor_dev_attr_freq1_label.dev_attr.attr,
3381
&sensor_dev_attr_freq2_input.dev_attr.attr,
3382
&sensor_dev_attr_freq2_label.dev_attr.attr,
3383
NULL
3384
};
3385
3386
static umode_t hwmon_attributes_visible(struct kobject *kobj,
3387
struct attribute *attr, int index)
3388
{
3389
struct device *dev = kobj_to_dev(kobj);
3390
struct amdgpu_device *adev = dev_get_drvdata(dev);
3391
umode_t effective_mode = attr->mode;
3392
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
3393
uint32_t tmp;
3394
3395
/* under pp one vf mode manage of hwmon attributes is not supported */
3396
if (amdgpu_sriov_is_pp_one_vf(adev))
3397
effective_mode &= ~S_IWUSR;
3398
3399
/* Skip fan attributes if fan is not present */
3400
if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3401
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3402
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3403
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3404
attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3405
attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3406
attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3407
attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3408
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3409
return 0;
3410
3411
/* Skip fan attributes on APU */
3412
if ((adev->flags & AMD_IS_APU) &&
3413
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3414
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3415
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3416
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3417
attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3418
attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3419
attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3420
attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3421
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3422
return 0;
3423
3424
/* Skip crit temp on APU */
3425
if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3426
(gc_ver == IP_VERSION(9, 4, 3) || gc_ver == IP_VERSION(9, 4, 4) ||
3427
gc_ver == IP_VERSION(9, 5, 0))) &&
3428
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3429
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3430
return 0;
3431
3432
/* Skip limit attributes if DPM is not enabled */
3433
if (!adev->pm.dpm_enabled &&
3434
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3435
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3436
attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3437
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3438
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3439
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3440
attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3441
attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3442
attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3443
attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3444
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3445
return 0;
3446
3447
/* mask fan attributes if we have no bindings for this asic to expose */
3448
if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3449
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3450
((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3451
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3452
effective_mode &= ~S_IRUGO;
3453
3454
if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3455
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3456
((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3457
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3458
effective_mode &= ~S_IWUSR;
3459
3460
/* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3461
if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3462
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3463
attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3464
attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) {
3465
if (adev->family == AMDGPU_FAMILY_SI ||
3466
((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) &&
3467
(gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) ||
3468
(amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3)))
3469
return 0;
3470
}
3471
3472
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3473
if (((adev->family == AMDGPU_FAMILY_SI) ||
3474
((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3475
(attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3476
return 0;
3477
3478
/* not all products support both average and instantaneous */
3479
if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
3480
amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
3481
return 0;
3482
if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
3483
amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
3484
return 0;
3485
3486
/* hide max/min values if we can't both query and manage the fan */
3487
if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3488
(amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3489
(amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3490
(amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3491
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3492
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3493
return 0;
3494
3495
if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3496
(amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3497
(attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3498
attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3499
return 0;
3500
3501
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3502
adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
3503
(gc_ver == IP_VERSION(9, 4, 3) ||
3504
gc_ver == IP_VERSION(9, 4, 4) ||
3505
gc_ver == IP_VERSION(9, 5, 0))) &&
3506
(attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3507
attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3508
return 0;
3509
3510
/* only APUs other than gc 9,4,3 have vddnb */
3511
if ((!(adev->flags & AMD_IS_APU) ||
3512
(gc_ver == IP_VERSION(9, 4, 3) ||
3513
gc_ver == IP_VERSION(9, 4, 4) ||
3514
gc_ver == IP_VERSION(9, 5, 0))) &&
3515
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3516
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3517
return 0;
3518
3519
/* only few boards support vddboard */
3520
if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
3521
attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
3522
amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
3523
(void *)&tmp) == -EOPNOTSUPP)
3524
return 0;
3525
3526
/* no mclk on APUs other than gc 9,4,3*/
3527
if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3528
(attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3529
attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3530
return 0;
3531
3532
if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3533
(gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)) &&
3534
(attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3535
attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3536
attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3537
attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3538
attr == &sensor_dev_attr_temp3_label.dev_attr.attr ||
3539
attr == &sensor_dev_attr_temp3_crit.dev_attr.attr))
3540
return 0;
3541
3542
/* hotspot temperature for gc 9,4,3*/
3543
if (gc_ver == IP_VERSION(9, 4, 3) ||
3544
gc_ver == IP_VERSION(9, 4, 4) ||
3545
gc_ver == IP_VERSION(9, 5, 0)) {
3546
if (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3547
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3548
attr == &sensor_dev_attr_temp1_label.dev_attr.attr)
3549
return 0;
3550
3551
if (attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3552
attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr)
3553
return attr->mode;
3554
}
3555
3556
/* only SOC15 dGPUs support hotspot and mem temperatures */
3557
if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3558
(attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3559
attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3560
attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3561
attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3562
attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3563
return 0;
3564
3565
/* only Vangogh has fast PPT limit and power labels */
3566
if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
3567
(attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3568
attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3569
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3570
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3571
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3572
attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3573
return 0;
3574
3575
return effective_mode;
3576
}
3577
3578
static const struct attribute_group hwmon_attrgroup = {
3579
.attrs = hwmon_attributes,
3580
.is_visible = hwmon_attributes_visible,
3581
};
3582
3583
static const struct attribute_group *hwmon_groups[] = {
3584
&hwmon_attrgroup,
3585
NULL
3586
};
3587
3588
static int amdgpu_retrieve_od_settings(struct amdgpu_device *adev,
3589
enum pp_clock_type od_type,
3590
char *buf)
3591
{
3592
int size = 0;
3593
int ret;
3594
3595
ret = amdgpu_pm_get_access_if_active(adev);
3596
if (ret)
3597
return ret;
3598
3599
size = amdgpu_dpm_print_clock_levels(adev, od_type, buf);
3600
if (size == 0)
3601
size = sysfs_emit(buf, "\n");
3602
3603
amdgpu_pm_put_access(adev);
3604
3605
return size;
3606
}
3607
3608
static int parse_input_od_command_lines(const char *buf,
3609
size_t count,
3610
u32 *type,
3611
long *params,
3612
uint32_t *num_of_params)
3613
{
3614
const char delimiter[3] = {' ', '\n', '\0'};
3615
uint32_t parameter_size = 0;
3616
char buf_cpy[128] = {0};
3617
char *tmp_str, *sub_str;
3618
int ret;
3619
3620
if (count > sizeof(buf_cpy) - 1)
3621
return -EINVAL;
3622
3623
memcpy(buf_cpy, buf, count);
3624
tmp_str = buf_cpy;
3625
3626
/* skip heading spaces */
3627
while (isspace(*tmp_str))
3628
tmp_str++;
3629
3630
switch (*tmp_str) {
3631
case 'c':
3632
*type = PP_OD_COMMIT_DPM_TABLE;
3633
return 0;
3634
case 'r':
3635
params[parameter_size] = *type;
3636
*num_of_params = 1;
3637
*type = PP_OD_RESTORE_DEFAULT_TABLE;
3638
return 0;
3639
default:
3640
break;
3641
}
3642
3643
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
3644
if (strlen(sub_str) == 0)
3645
continue;
3646
3647
ret = kstrtol(sub_str, 0, &params[parameter_size]);
3648
if (ret)
3649
return -EINVAL;
3650
parameter_size++;
3651
3652
if (!tmp_str)
3653
break;
3654
3655
while (isspace(*tmp_str))
3656
tmp_str++;
3657
}
3658
3659
*num_of_params = parameter_size;
3660
3661
return 0;
3662
}
3663
3664
static int
3665
amdgpu_distribute_custom_od_settings(struct amdgpu_device *adev,
3666
enum PP_OD_DPM_TABLE_COMMAND cmd_type,
3667
const char *in_buf,
3668
size_t count)
3669
{
3670
uint32_t parameter_size = 0;
3671
long parameter[64];
3672
int ret;
3673
3674
ret = parse_input_od_command_lines(in_buf,
3675
count,
3676
&cmd_type,
3677
parameter,
3678
&parameter_size);
3679
if (ret)
3680
return ret;
3681
3682
ret = amdgpu_pm_get_access(adev);
3683
if (ret < 0)
3684
return ret;
3685
3686
ret = amdgpu_dpm_odn_edit_dpm_table(adev,
3687
cmd_type,
3688
parameter,
3689
parameter_size);
3690
if (ret)
3691
goto err_out;
3692
3693
if (cmd_type == PP_OD_COMMIT_DPM_TABLE) {
3694
ret = amdgpu_dpm_dispatch_task(adev,
3695
AMD_PP_TASK_READJUST_POWER_STATE,
3696
NULL);
3697
if (ret)
3698
goto err_out;
3699
}
3700
3701
amdgpu_pm_put_access(adev);
3702
3703
return count;
3704
3705
err_out:
3706
amdgpu_pm_put_access(adev);
3707
3708
return ret;
3709
}
3710
3711
/**
3712
* DOC: fan_curve
3713
*
3714
* The amdgpu driver provides a sysfs API for checking and adjusting the fan
3715
* control curve line.
3716
*
3717
* Reading back the file shows you the current settings(temperature in Celsius
3718
* degree and fan speed in pwm) applied to every anchor point of the curve line
3719
* and their permitted ranges if changable.
3720
*
3721
* Writing a desired string(with the format like "anchor_point_index temperature
3722
* fan_speed_in_pwm") to the file, change the settings for the specific anchor
3723
* point accordingly.
3724
*
3725
* When you have finished the editing, write "c" (commit) to the file to commit
3726
* your changes.
3727
*
3728
* If you want to reset to the default value, write "r" (reset) to the file to
3729
* reset them
3730
*
3731
* There are two fan control modes supported: auto and manual. With auto mode,
3732
* PMFW handles the fan speed control(how fan speed reacts to ASIC temperature).
3733
* While with manual mode, users can set their own fan curve line as what
3734
* described here. Normally the ASIC is booted up with auto mode. Any
3735
* settings via this interface will switch the fan control to manual mode
3736
* implicitly.
3737
*/
3738
static ssize_t fan_curve_show(struct kobject *kobj,
3739
struct kobj_attribute *attr,
3740
char *buf)
3741
{
3742
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3743
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3744
3745
return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_CURVE, buf);
3746
}
3747
3748
static ssize_t fan_curve_store(struct kobject *kobj,
3749
struct kobj_attribute *attr,
3750
const char *buf,
3751
size_t count)
3752
{
3753
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3754
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3755
3756
return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3757
PP_OD_EDIT_FAN_CURVE,
3758
buf,
3759
count);
3760
}
3761
3762
static umode_t fan_curve_visible(struct amdgpu_device *adev)
3763
{
3764
umode_t umode = 0000;
3765
3766
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_RETRIEVE)
3767
umode |= S_IRUSR | S_IRGRP | S_IROTH;
3768
3769
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_CURVE_SET)
3770
umode |= S_IWUSR;
3771
3772
return umode;
3773
}
3774
3775
/**
3776
* DOC: acoustic_limit_rpm_threshold
3777
*
3778
* The amdgpu driver provides a sysfs API for checking and adjusting the
3779
* acoustic limit in RPM for fan control.
3780
*
3781
* Reading back the file shows you the current setting and the permitted
3782
* ranges if changable.
3783
*
3784
* Writing an integer to the file, change the setting accordingly.
3785
*
3786
* When you have finished the editing, write "c" (commit) to the file to commit
3787
* your changes.
3788
*
3789
* If you want to reset to the default value, write "r" (reset) to the file to
3790
* reset them
3791
*
3792
* This setting works under auto fan control mode only. It adjusts the PMFW's
3793
* behavior about the maximum speed in RPM the fan can spin. Setting via this
3794
* interface will switch the fan control to auto mode implicitly.
3795
*/
3796
static ssize_t acoustic_limit_threshold_show(struct kobject *kobj,
3797
struct kobj_attribute *attr,
3798
char *buf)
3799
{
3800
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3801
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3802
3803
return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_LIMIT, buf);
3804
}
3805
3806
static ssize_t acoustic_limit_threshold_store(struct kobject *kobj,
3807
struct kobj_attribute *attr,
3808
const char *buf,
3809
size_t count)
3810
{
3811
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3812
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3813
3814
return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3815
PP_OD_EDIT_ACOUSTIC_LIMIT,
3816
buf,
3817
count);
3818
}
3819
3820
static umode_t acoustic_limit_threshold_visible(struct amdgpu_device *adev)
3821
{
3822
umode_t umode = 0000;
3823
3824
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_RETRIEVE)
3825
umode |= S_IRUSR | S_IRGRP | S_IROTH;
3826
3827
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_LIMIT_THRESHOLD_SET)
3828
umode |= S_IWUSR;
3829
3830
return umode;
3831
}
3832
3833
/**
3834
* DOC: acoustic_target_rpm_threshold
3835
*
3836
* The amdgpu driver provides a sysfs API for checking and adjusting the
3837
* acoustic target in RPM for fan control.
3838
*
3839
* Reading back the file shows you the current setting and the permitted
3840
* ranges if changable.
3841
*
3842
* Writing an integer to the file, change the setting accordingly.
3843
*
3844
* When you have finished the editing, write "c" (commit) to the file to commit
3845
* your changes.
3846
*
3847
* If you want to reset to the default value, write "r" (reset) to the file to
3848
* reset them
3849
*
3850
* This setting works under auto fan control mode only. It can co-exist with
3851
* other settings which can work also under auto mode. It adjusts the PMFW's
3852
* behavior about the maximum speed in RPM the fan can spin when ASIC
3853
* temperature is not greater than target temperature. Setting via this
3854
* interface will switch the fan control to auto mode implicitly.
3855
*/
3856
static ssize_t acoustic_target_threshold_show(struct kobject *kobj,
3857
struct kobj_attribute *attr,
3858
char *buf)
3859
{
3860
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3861
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3862
3863
return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_ACOUSTIC_TARGET, buf);
3864
}
3865
3866
static ssize_t acoustic_target_threshold_store(struct kobject *kobj,
3867
struct kobj_attribute *attr,
3868
const char *buf,
3869
size_t count)
3870
{
3871
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3872
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3873
3874
return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3875
PP_OD_EDIT_ACOUSTIC_TARGET,
3876
buf,
3877
count);
3878
}
3879
3880
static umode_t acoustic_target_threshold_visible(struct amdgpu_device *adev)
3881
{
3882
umode_t umode = 0000;
3883
3884
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_RETRIEVE)
3885
umode |= S_IRUSR | S_IRGRP | S_IROTH;
3886
3887
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_ACOUSTIC_TARGET_THRESHOLD_SET)
3888
umode |= S_IWUSR;
3889
3890
return umode;
3891
}
3892
3893
/**
3894
* DOC: fan_target_temperature
3895
*
3896
* The amdgpu driver provides a sysfs API for checking and adjusting the
3897
* target tempeature in Celsius degree for fan control.
3898
*
3899
* Reading back the file shows you the current setting and the permitted
3900
* ranges if changable.
3901
*
3902
* Writing an integer to the file, change the setting accordingly.
3903
*
3904
* When you have finished the editing, write "c" (commit) to the file to commit
3905
* your changes.
3906
*
3907
* If you want to reset to the default value, write "r" (reset) to the file to
3908
* reset them
3909
*
3910
* This setting works under auto fan control mode only. It can co-exist with
3911
* other settings which can work also under auto mode. Paring with the
3912
* acoustic_target_rpm_threshold setting, they define the maximum speed in
3913
* RPM the fan can spin when ASIC temperature is not greater than target
3914
* temperature. Setting via this interface will switch the fan control to
3915
* auto mode implicitly.
3916
*/
3917
static ssize_t fan_target_temperature_show(struct kobject *kobj,
3918
struct kobj_attribute *attr,
3919
char *buf)
3920
{
3921
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3922
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3923
3924
return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_TARGET_TEMPERATURE, buf);
3925
}
3926
3927
static ssize_t fan_target_temperature_store(struct kobject *kobj,
3928
struct kobj_attribute *attr,
3929
const char *buf,
3930
size_t count)
3931
{
3932
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3933
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3934
3935
return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3936
PP_OD_EDIT_FAN_TARGET_TEMPERATURE,
3937
buf,
3938
count);
3939
}
3940
3941
static umode_t fan_target_temperature_visible(struct amdgpu_device *adev)
3942
{
3943
umode_t umode = 0000;
3944
3945
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_RETRIEVE)
3946
umode |= S_IRUSR | S_IRGRP | S_IROTH;
3947
3948
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_TARGET_TEMPERATURE_SET)
3949
umode |= S_IWUSR;
3950
3951
return umode;
3952
}
3953
3954
/**
3955
* DOC: fan_minimum_pwm
3956
*
3957
* The amdgpu driver provides a sysfs API for checking and adjusting the
3958
* minimum fan speed in PWM.
3959
*
3960
* Reading back the file shows you the current setting and the permitted
3961
* ranges if changable.
3962
*
3963
* Writing an integer to the file, change the setting accordingly.
3964
*
3965
* When you have finished the editing, write "c" (commit) to the file to commit
3966
* your changes.
3967
*
3968
* If you want to reset to the default value, write "r" (reset) to the file to
3969
* reset them
3970
*
3971
* This setting works under auto fan control mode only. It can co-exist with
3972
* other settings which can work also under auto mode. It adjusts the PMFW's
3973
* behavior about the minimum fan speed in PWM the fan should spin. Setting
3974
* via this interface will switch the fan control to auto mode implicitly.
3975
*/
3976
static ssize_t fan_minimum_pwm_show(struct kobject *kobj,
3977
struct kobj_attribute *attr,
3978
char *buf)
3979
{
3980
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3981
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3982
3983
return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_MINIMUM_PWM, buf);
3984
}
3985
3986
static ssize_t fan_minimum_pwm_store(struct kobject *kobj,
3987
struct kobj_attribute *attr,
3988
const char *buf,
3989
size_t count)
3990
{
3991
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
3992
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
3993
3994
return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
3995
PP_OD_EDIT_FAN_MINIMUM_PWM,
3996
buf,
3997
count);
3998
}
3999
4000
static umode_t fan_minimum_pwm_visible(struct amdgpu_device *adev)
4001
{
4002
umode_t umode = 0000;
4003
4004
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_RETRIEVE)
4005
umode |= S_IRUSR | S_IRGRP | S_IROTH;
4006
4007
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_MINIMUM_PWM_SET)
4008
umode |= S_IWUSR;
4009
4010
return umode;
4011
}
4012
4013
/**
4014
* DOC: fan_zero_rpm_enable
4015
*
4016
* The amdgpu driver provides a sysfs API for checking and adjusting the
4017
* zero RPM feature.
4018
*
4019
* Reading back the file shows you the current setting and the permitted
4020
* ranges if changable.
4021
*
4022
* Writing an integer to the file, change the setting accordingly.
4023
*
4024
* When you have finished the editing, write "c" (commit) to the file to commit
4025
* your changes.
4026
*
4027
* If you want to reset to the default value, write "r" (reset) to the file to
4028
* reset them.
4029
*/
4030
static ssize_t fan_zero_rpm_enable_show(struct kobject *kobj,
4031
struct kobj_attribute *attr,
4032
char *buf)
4033
{
4034
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4035
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4036
4037
return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_ENABLE, buf);
4038
}
4039
4040
static ssize_t fan_zero_rpm_enable_store(struct kobject *kobj,
4041
struct kobj_attribute *attr,
4042
const char *buf,
4043
size_t count)
4044
{
4045
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4046
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4047
4048
return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4049
PP_OD_EDIT_FAN_ZERO_RPM_ENABLE,
4050
buf,
4051
count);
4052
}
4053
4054
static umode_t fan_zero_rpm_enable_visible(struct amdgpu_device *adev)
4055
{
4056
umode_t umode = 0000;
4057
4058
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_RETRIEVE)
4059
umode |= S_IRUSR | S_IRGRP | S_IROTH;
4060
4061
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_ENABLE_SET)
4062
umode |= S_IWUSR;
4063
4064
return umode;
4065
}
4066
4067
/**
4068
* DOC: fan_zero_rpm_stop_temperature
4069
*
4070
* The amdgpu driver provides a sysfs API for checking and adjusting the
4071
* zero RPM stop temperature feature.
4072
*
4073
* Reading back the file shows you the current setting and the permitted
4074
* ranges if changable.
4075
*
4076
* Writing an integer to the file, change the setting accordingly.
4077
*
4078
* When you have finished the editing, write "c" (commit) to the file to commit
4079
* your changes.
4080
*
4081
* If you want to reset to the default value, write "r" (reset) to the file to
4082
* reset them.
4083
*
4084
* This setting works only if the Zero RPM setting is enabled. It adjusts the
4085
* temperature below which the fan can stop.
4086
*/
4087
static ssize_t fan_zero_rpm_stop_temp_show(struct kobject *kobj,
4088
struct kobj_attribute *attr,
4089
char *buf)
4090
{
4091
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4092
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4093
4094
return (ssize_t)amdgpu_retrieve_od_settings(adev, OD_FAN_ZERO_RPM_STOP_TEMP, buf);
4095
}
4096
4097
static ssize_t fan_zero_rpm_stop_temp_store(struct kobject *kobj,
4098
struct kobj_attribute *attr,
4099
const char *buf,
4100
size_t count)
4101
{
4102
struct od_kobj *container = container_of(kobj, struct od_kobj, kobj);
4103
struct amdgpu_device *adev = (struct amdgpu_device *)container->priv;
4104
4105
return (ssize_t)amdgpu_distribute_custom_od_settings(adev,
4106
PP_OD_EDIT_FAN_ZERO_RPM_STOP_TEMP,
4107
buf,
4108
count);
4109
}
4110
4111
static umode_t fan_zero_rpm_stop_temp_visible(struct amdgpu_device *adev)
4112
{
4113
umode_t umode = 0000;
4114
4115
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_RETRIEVE)
4116
umode |= S_IRUSR | S_IRGRP | S_IROTH;
4117
4118
if (adev->pm.od_feature_mask & OD_OPS_SUPPORT_FAN_ZERO_RPM_STOP_TEMP_SET)
4119
umode |= S_IWUSR;
4120
4121
return umode;
4122
}
4123
4124
static struct od_feature_set amdgpu_od_set = {
4125
.containers = {
4126
[0] = {
4127
.name = "fan_ctrl",
4128
.sub_feature = {
4129
[0] = {
4130
.name = "fan_curve",
4131
.ops = {
4132
.is_visible = fan_curve_visible,
4133
.show = fan_curve_show,
4134
.store = fan_curve_store,
4135
},
4136
},
4137
[1] = {
4138
.name = "acoustic_limit_rpm_threshold",
4139
.ops = {
4140
.is_visible = acoustic_limit_threshold_visible,
4141
.show = acoustic_limit_threshold_show,
4142
.store = acoustic_limit_threshold_store,
4143
},
4144
},
4145
[2] = {
4146
.name = "acoustic_target_rpm_threshold",
4147
.ops = {
4148
.is_visible = acoustic_target_threshold_visible,
4149
.show = acoustic_target_threshold_show,
4150
.store = acoustic_target_threshold_store,
4151
},
4152
},
4153
[3] = {
4154
.name = "fan_target_temperature",
4155
.ops = {
4156
.is_visible = fan_target_temperature_visible,
4157
.show = fan_target_temperature_show,
4158
.store = fan_target_temperature_store,
4159
},
4160
},
4161
[4] = {
4162
.name = "fan_minimum_pwm",
4163
.ops = {
4164
.is_visible = fan_minimum_pwm_visible,
4165
.show = fan_minimum_pwm_show,
4166
.store = fan_minimum_pwm_store,
4167
},
4168
},
4169
[5] = {
4170
.name = "fan_zero_rpm_enable",
4171
.ops = {
4172
.is_visible = fan_zero_rpm_enable_visible,
4173
.show = fan_zero_rpm_enable_show,
4174
.store = fan_zero_rpm_enable_store,
4175
},
4176
},
4177
[6] = {
4178
.name = "fan_zero_rpm_stop_temperature",
4179
.ops = {
4180
.is_visible = fan_zero_rpm_stop_temp_visible,
4181
.show = fan_zero_rpm_stop_temp_show,
4182
.store = fan_zero_rpm_stop_temp_store,
4183
},
4184
},
4185
},
4186
},
4187
},
4188
};
4189
4190
static void od_kobj_release(struct kobject *kobj)
4191
{
4192
struct od_kobj *od_kobj = container_of(kobj, struct od_kobj, kobj);
4193
4194
kfree(od_kobj);
4195
}
4196
4197
static const struct kobj_type od_ktype = {
4198
.release = od_kobj_release,
4199
.sysfs_ops = &kobj_sysfs_ops,
4200
};
4201
4202
static void amdgpu_od_set_fini(struct amdgpu_device *adev)
4203
{
4204
struct od_kobj *container, *container_next;
4205
struct od_attribute *attribute, *attribute_next;
4206
4207
if (list_empty(&adev->pm.od_kobj_list))
4208
return;
4209
4210
list_for_each_entry_safe(container, container_next,
4211
&adev->pm.od_kobj_list, entry) {
4212
list_del(&container->entry);
4213
4214
list_for_each_entry_safe(attribute, attribute_next,
4215
&container->attribute, entry) {
4216
list_del(&attribute->entry);
4217
sysfs_remove_file(&container->kobj,
4218
&attribute->attribute.attr);
4219
kfree(attribute);
4220
}
4221
4222
kobject_put(&container->kobj);
4223
}
4224
}
4225
4226
static bool amdgpu_is_od_feature_supported(struct amdgpu_device *adev,
4227
struct od_feature_ops *feature_ops)
4228
{
4229
umode_t mode;
4230
4231
if (!feature_ops->is_visible)
4232
return false;
4233
4234
/*
4235
* If the feature has no user read and write mode set,
4236
* we can assume the feature is actually not supported.(?)
4237
* And the revelant sysfs interface should not be exposed.
4238
*/
4239
mode = feature_ops->is_visible(adev);
4240
if (mode & (S_IRUSR | S_IWUSR))
4241
return true;
4242
4243
return false;
4244
}
4245
4246
static bool amdgpu_od_is_self_contained(struct amdgpu_device *adev,
4247
struct od_feature_container *container)
4248
{
4249
int i;
4250
4251
/*
4252
* If there is no valid entry within the container, the container
4253
* is recognized as a self contained container. And the valid entry
4254
* here means it has a valid naming and it is visible/supported by
4255
* the ASIC.
4256
*/
4257
for (i = 0; i < ARRAY_SIZE(container->sub_feature); i++) {
4258
if (container->sub_feature[i].name &&
4259
amdgpu_is_od_feature_supported(adev,
4260
&container->sub_feature[i].ops))
4261
return false;
4262
}
4263
4264
return true;
4265
}
4266
4267
static int amdgpu_od_set_init(struct amdgpu_device *adev)
4268
{
4269
struct od_kobj *top_set, *sub_set;
4270
struct od_attribute *attribute;
4271
struct od_feature_container *container;
4272
struct od_feature_item *feature;
4273
int i, j;
4274
int ret;
4275
4276
/* Setup the top `gpu_od` directory which holds all other OD interfaces */
4277
top_set = kzalloc(sizeof(*top_set), GFP_KERNEL);
4278
if (!top_set)
4279
return -ENOMEM;
4280
list_add(&top_set->entry, &adev->pm.od_kobj_list);
4281
4282
ret = kobject_init_and_add(&top_set->kobj,
4283
&od_ktype,
4284
&adev->dev->kobj,
4285
"%s",
4286
"gpu_od");
4287
if (ret)
4288
goto err_out;
4289
INIT_LIST_HEAD(&top_set->attribute);
4290
top_set->priv = adev;
4291
4292
for (i = 0; i < ARRAY_SIZE(amdgpu_od_set.containers); i++) {
4293
container = &amdgpu_od_set.containers[i];
4294
4295
if (!container->name)
4296
continue;
4297
4298
/*
4299
* If there is valid entries within the container, the container
4300
* will be presented as a sub directory and all its holding entries
4301
* will be presented as plain files under it.
4302
* While if there is no valid entry within the container, the container
4303
* itself will be presented as a plain file under top `gpu_od` directory.
4304
*/
4305
if (amdgpu_od_is_self_contained(adev, container)) {
4306
if (!amdgpu_is_od_feature_supported(adev,
4307
&container->ops))
4308
continue;
4309
4310
/*
4311
* The container is presented as a plain file under top `gpu_od`
4312
* directory.
4313
*/
4314
attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4315
if (!attribute) {
4316
ret = -ENOMEM;
4317
goto err_out;
4318
}
4319
list_add(&attribute->entry, &top_set->attribute);
4320
4321
attribute->attribute.attr.mode =
4322
container->ops.is_visible(adev);
4323
attribute->attribute.attr.name = container->name;
4324
attribute->attribute.show =
4325
container->ops.show;
4326
attribute->attribute.store =
4327
container->ops.store;
4328
ret = sysfs_create_file(&top_set->kobj,
4329
&attribute->attribute.attr);
4330
if (ret)
4331
goto err_out;
4332
} else {
4333
/* The container is presented as a sub directory. */
4334
sub_set = kzalloc(sizeof(*sub_set), GFP_KERNEL);
4335
if (!sub_set) {
4336
ret = -ENOMEM;
4337
goto err_out;
4338
}
4339
list_add(&sub_set->entry, &adev->pm.od_kobj_list);
4340
4341
ret = kobject_init_and_add(&sub_set->kobj,
4342
&od_ktype,
4343
&top_set->kobj,
4344
"%s",
4345
container->name);
4346
if (ret)
4347
goto err_out;
4348
INIT_LIST_HEAD(&sub_set->attribute);
4349
sub_set->priv = adev;
4350
4351
for (j = 0; j < ARRAY_SIZE(container->sub_feature); j++) {
4352
feature = &container->sub_feature[j];
4353
if (!feature->name)
4354
continue;
4355
4356
if (!amdgpu_is_od_feature_supported(adev,
4357
&feature->ops))
4358
continue;
4359
4360
/*
4361
* With the container presented as a sub directory, the entry within
4362
* it is presented as a plain file under the sub directory.
4363
*/
4364
attribute = kzalloc(sizeof(*attribute), GFP_KERNEL);
4365
if (!attribute) {
4366
ret = -ENOMEM;
4367
goto err_out;
4368
}
4369
list_add(&attribute->entry, &sub_set->attribute);
4370
4371
attribute->attribute.attr.mode =
4372
feature->ops.is_visible(adev);
4373
attribute->attribute.attr.name = feature->name;
4374
attribute->attribute.show =
4375
feature->ops.show;
4376
attribute->attribute.store =
4377
feature->ops.store;
4378
ret = sysfs_create_file(&sub_set->kobj,
4379
&attribute->attribute.attr);
4380
if (ret)
4381
goto err_out;
4382
}
4383
}
4384
}
4385
4386
/*
4387
* If gpu_od is the only member in the list, that means gpu_od is an
4388
* empty directory, so remove it.
4389
*/
4390
if (list_is_singular(&adev->pm.od_kobj_list))
4391
goto err_out;
4392
4393
return 0;
4394
4395
err_out:
4396
amdgpu_od_set_fini(adev);
4397
4398
return ret;
4399
}
4400
4401
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
4402
{
4403
enum amdgpu_sriov_vf_mode mode;
4404
uint32_t mask = 0;
4405
int ret;
4406
4407
if (adev->pm.sysfs_initialized)
4408
return 0;
4409
4410
INIT_LIST_HEAD(&adev->pm.pm_attr_list);
4411
4412
if (adev->pm.dpm_enabled == 0)
4413
return 0;
4414
4415
mode = amdgpu_virt_get_sriov_vf_mode(adev);
4416
4417
/* under multi-vf mode, the hwmon attributes are all not supported */
4418
if (mode != SRIOV_VF_MODE_MULTI_VF) {
4419
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
4420
DRIVER_NAME, adev,
4421
hwmon_groups);
4422
if (IS_ERR(adev->pm.int_hwmon_dev)) {
4423
ret = PTR_ERR(adev->pm.int_hwmon_dev);
4424
dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
4425
return ret;
4426
}
4427
}
4428
4429
switch (mode) {
4430
case SRIOV_VF_MODE_ONE_VF:
4431
mask = ATTR_FLAG_ONEVF;
4432
break;
4433
case SRIOV_VF_MODE_MULTI_VF:
4434
mask = 0;
4435
break;
4436
case SRIOV_VF_MODE_BARE_METAL:
4437
default:
4438
mask = ATTR_FLAG_MASK_ALL;
4439
break;
4440
}
4441
4442
ret = amdgpu_device_attr_create_groups(adev,
4443
amdgpu_device_attrs,
4444
ARRAY_SIZE(amdgpu_device_attrs),
4445
mask,
4446
&adev->pm.pm_attr_list);
4447
if (ret)
4448
goto err_out0;
4449
4450
if (amdgpu_dpm_is_overdrive_supported(adev)) {
4451
ret = amdgpu_od_set_init(adev);
4452
if (ret)
4453
goto err_out1;
4454
} else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) {
4455
dev_info(adev->dev, "overdrive feature is not supported\n");
4456
}
4457
4458
if (amdgpu_dpm_get_pm_policy_info(adev, PP_PM_POLICY_NONE, NULL) !=
4459
-EOPNOTSUPP) {
4460
ret = devm_device_add_group(adev->dev,
4461
&amdgpu_pm_policy_attr_group);
4462
if (ret)
4463
goto err_out0;
4464
}
4465
4466
adev->pm.sysfs_initialized = true;
4467
4468
return 0;
4469
4470
err_out1:
4471
amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4472
err_out0:
4473
if (adev->pm.int_hwmon_dev)
4474
hwmon_device_unregister(adev->pm.int_hwmon_dev);
4475
4476
return ret;
4477
}
4478
4479
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
4480
{
4481
amdgpu_od_set_fini(adev);
4482
4483
if (adev->pm.int_hwmon_dev)
4484
hwmon_device_unregister(adev->pm.int_hwmon_dev);
4485
4486
amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
4487
}
4488
4489
/*
4490
* Debugfs info
4491
*/
4492
#if defined(CONFIG_DEBUG_FS)
4493
4494
static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
4495
struct amdgpu_device *adev)
4496
{
4497
uint16_t *p_val;
4498
uint32_t size;
4499
int i;
4500
uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
4501
4502
if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
4503
p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
4504
GFP_KERNEL);
4505
4506
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
4507
(void *)p_val, &size)) {
4508
for (i = 0; i < num_cpu_cores; i++)
4509
seq_printf(m, "\t%u MHz (CPU%d)\n",
4510
*(p_val + i), i);
4511
}
4512
4513
kfree(p_val);
4514
}
4515
}
4516
4517
static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
4518
{
4519
uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0);
4520
uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
4521
uint32_t value;
4522
uint64_t value64 = 0;
4523
uint32_t query = 0;
4524
int size;
4525
4526
/* GPU Clocks */
4527
size = sizeof(value);
4528
seq_printf(m, "GFX Clocks and Power:\n");
4529
4530
amdgpu_debugfs_prints_cpu_info(m, adev);
4531
4532
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
4533
seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
4534
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
4535
seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
4536
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
4537
seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
4538
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
4539
seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
4540
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
4541
seq_printf(m, "\t%u mV (VDDGFX)\n", value);
4542
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
4543
seq_printf(m, "\t%u mV (VDDNB)\n", value);
4544
size = sizeof(uint32_t);
4545
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&query, &size)) {
4546
if (adev->flags & AMD_IS_APU)
4547
seq_printf(m, "\t%u.%02u W (average SoC including CPU)\n", query >> 8, query & 0xff);
4548
else
4549
seq_printf(m, "\t%u.%02u W (average SoC)\n", query >> 8, query & 0xff);
4550
}
4551
size = sizeof(uint32_t);
4552
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&query, &size)) {
4553
if (adev->flags & AMD_IS_APU)
4554
seq_printf(m, "\t%u.%02u W (current SoC including CPU)\n", query >> 8, query & 0xff);
4555
else
4556
seq_printf(m, "\t%u.%02u W (current SoC)\n", query >> 8, query & 0xff);
4557
}
4558
size = sizeof(value);
4559
seq_printf(m, "\n");
4560
4561
/* GPU Temp */
4562
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
4563
seq_printf(m, "GPU Temperature: %u C\n", value/1000);
4564
4565
/* GPU Load */
4566
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
4567
seq_printf(m, "GPU Load: %u %%\n", value);
4568
/* MEM Load */
4569
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
4570
seq_printf(m, "MEM Load: %u %%\n", value);
4571
/* VCN Load */
4572
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size))
4573
seq_printf(m, "VCN Load: %u %%\n", value);
4574
4575
seq_printf(m, "\n");
4576
4577
/* SMC feature mask */
4578
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
4579
seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
4580
4581
/* ASICs greater than CHIP_VEGA20 supports these sensors */
4582
if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
4583
/* VCN clocks */
4584
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
4585
if (!value) {
4586
seq_printf(m, "VCN: Powered down\n");
4587
} else {
4588
seq_printf(m, "VCN: Powered up\n");
4589
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4590
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4591
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4592
seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4593
}
4594
}
4595
seq_printf(m, "\n");
4596
} else {
4597
/* UVD clocks */
4598
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
4599
if (!value) {
4600
seq_printf(m, "UVD: Powered down\n");
4601
} else {
4602
seq_printf(m, "UVD: Powered up\n");
4603
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
4604
seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
4605
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
4606
seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
4607
}
4608
}
4609
seq_printf(m, "\n");
4610
4611
/* VCE clocks */
4612
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
4613
if (!value) {
4614
seq_printf(m, "VCE: Powered down\n");
4615
} else {
4616
seq_printf(m, "VCE: Powered up\n");
4617
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
4618
seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
4619
}
4620
}
4621
}
4622
4623
return 0;
4624
}
4625
4626
static const struct cg_flag_name clocks[] = {
4627
{AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
4628
{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
4629
{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
4630
{AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
4631
{AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
4632
{AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
4633
{AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
4634
{AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
4635
{AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
4636
{AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
4637
{AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
4638
{AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
4639
{AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
4640
{AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
4641
{AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
4642
{AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
4643
{AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
4644
{AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
4645
{AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
4646
{AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
4647
{AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
4648
{AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
4649
{AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
4650
{AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
4651
{AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
4652
{AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
4653
{AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
4654
{AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
4655
{AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
4656
{AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
4657
{AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
4658
{AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
4659
{AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
4660
{AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
4661
{0, NULL},
4662
};
4663
4664
static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
4665
{
4666
int i;
4667
4668
for (i = 0; clocks[i].flag; i++)
4669
seq_printf(m, "\t%s: %s\n", clocks[i].name,
4670
(flags & clocks[i].flag) ? "On" : "Off");
4671
}
4672
4673
static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
4674
{
4675
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
4676
u64 flags = 0;
4677
int r;
4678
4679
r = amdgpu_pm_get_access(adev);
4680
if (r < 0)
4681
return r;
4682
4683
if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
4684
r = amdgpu_debugfs_pm_info_pp(m, adev);
4685
if (r)
4686
goto out;
4687
}
4688
4689
amdgpu_device_ip_get_clockgating_state(adev, &flags);
4690
4691
seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
4692
amdgpu_parse_cg_state(m, flags);
4693
seq_printf(m, "\n");
4694
4695
out:
4696
amdgpu_pm_put_access(adev);
4697
4698
return r;
4699
}
4700
4701
DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
4702
4703
/*
4704
* amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
4705
*
4706
* Reads debug memory region allocated to PMFW
4707
*/
4708
static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
4709
size_t size, loff_t *pos)
4710
{
4711
struct amdgpu_device *adev = file_inode(f)->i_private;
4712
size_t smu_prv_buf_size;
4713
void *smu_prv_buf;
4714
int ret = 0;
4715
4716
ret = amdgpu_pm_dev_state_check(adev, true);
4717
if (ret)
4718
return ret;
4719
4720
ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
4721
if (ret)
4722
return ret;
4723
4724
if (!smu_prv_buf || !smu_prv_buf_size)
4725
return -EINVAL;
4726
4727
return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
4728
smu_prv_buf_size);
4729
}
4730
4731
static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
4732
.owner = THIS_MODULE,
4733
.open = simple_open,
4734
.read = amdgpu_pm_prv_buffer_read,
4735
.llseek = default_llseek,
4736
};
4737
4738
#endif
4739
4740
void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
4741
{
4742
#if defined(CONFIG_DEBUG_FS)
4743
struct drm_minor *minor = adev_to_drm(adev)->primary;
4744
struct dentry *root = minor->debugfs_root;
4745
4746
if (!adev->pm.dpm_enabled)
4747
return;
4748
4749
debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
4750
&amdgpu_debugfs_pm_info_fops);
4751
4752
if (adev->pm.smu_prv_buffer_size > 0)
4753
debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
4754
adev,
4755
&amdgpu_debugfs_pm_prv_buffer_fops,
4756
adev->pm.smu_prv_buffer_size);
4757
4758
amdgpu_dpm_stb_debug_fs_init(adev);
4759
#endif
4760
}
4761
4762