Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
53444 views
1
/*
2
* Copyright 2020 Advanced Micro Devices, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*/
22
23
#ifndef __SMU_CMN_H__
24
#define __SMU_CMN_H__
25
26
#include "amdgpu_smu.h"
27
28
extern const struct smu_msg_ops smu_msg_v1_ops;
29
30
int smu_msg_wait_response(struct smu_msg_ctl *ctl, u32 timeout_us);
31
int smu_msg_send_async_locked(struct smu_msg_ctl *ctl,
32
enum smu_message_type msg, u32 param);
33
34
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
35
36
#define FDO_PWM_MODE_STATIC 1
37
#define FDO_PWM_MODE_STATIC_RPM 5
38
39
#define SMU_IH_INTERRUPT_ID_TO_DRIVER 0xFE
40
#define SMU_IH_INTERRUPT_CONTEXT_ID_BACO 0x2
41
#define SMU_IH_INTERRUPT_CONTEXT_ID_AC 0x3
42
#define SMU_IH_INTERRUPT_CONTEXT_ID_DC 0x4
43
#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
44
#define SMU_IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
45
#define SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
46
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
47
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
48
49
#define SMU_IGNORE_IF_VERSION 0xFFFFFFFF
50
51
#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
52
do { \
53
typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
54
struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \
55
struct metrics_table_header *header = \
56
(struct metrics_table_header *)tmp; \
57
memset(header, 0xFF, sizeof(*tmp)); \
58
header->format_revision = frev; \
59
header->content_revision = crev; \
60
header->structure_size = sizeof(*tmp); \
61
} while (0)
62
63
#define smu_cmn_init_partition_metrics(ptr, fr, cr) \
64
do { \
65
typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \
66
(ptr)); \
67
struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \
68
struct metrics_table_header *header = \
69
(struct metrics_table_header *)tmp; \
70
memset(header, 0xFF, sizeof(*tmp)); \
71
header->format_revision = fr; \
72
header->content_revision = cr; \
73
header->structure_size = sizeof(*tmp); \
74
} while (0)
75
76
#define smu_cmn_init_baseboard_temp_metrics(ptr, fr, cr) \
77
do { \
78
typecheck(struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *, \
79
(ptr)); \
80
struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
81
struct metrics_table_header *header = \
82
(struct metrics_table_header *)tmp; \
83
memset(header, 0xFF, sizeof(*tmp)); \
84
header->format_revision = fr; \
85
header->content_revision = cr; \
86
header->structure_size = sizeof(*tmp); \
87
} while (0)
88
89
#define smu_cmn_init_gpuboard_temp_metrics(ptr, fr, cr) \
90
do { \
91
typecheck(struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *, \
92
(ptr)); \
93
struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
94
struct metrics_table_header *header = \
95
(struct metrics_table_header *)tmp; \
96
memset(header, 0xFF, sizeof(*tmp)); \
97
header->format_revision = fr; \
98
header->content_revision = cr; \
99
header->structure_size = sizeof(*tmp); \
100
} while (0)
101
102
#define SMU_DPM_PCIE_GEN_IDX(gen) smu_cmn_dpm_pcie_gen_idx((gen))
103
#define SMU_DPM_PCIE_WIDTH_IDX(width) smu_cmn_dpm_pcie_width_idx((width))
104
105
extern const int link_speed[];
106
107
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
108
static inline int pcie_gen_to_speed(uint32_t gen)
109
{
110
return ((gen == 0) ? link_speed[0] : link_speed[gen - 1]);
111
}
112
113
int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
114
enum smu_message_type msg,
115
uint32_t param,
116
uint32_t *read_arg);
117
118
int smu_cmn_send_smc_msg(struct smu_context *smu,
119
enum smu_message_type msg,
120
uint32_t *read_arg);
121
122
int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
123
uint32_t msg);
124
125
int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
126
uint32_t msg, uint32_t param);
127
128
int smu_cmn_wait_for_response(struct smu_context *smu);
129
130
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
131
enum smu_cmn2asic_mapping_type type,
132
uint32_t index);
133
134
int smu_cmn_feature_is_supported(struct smu_context *smu,
135
enum smu_feature_mask mask);
136
137
int smu_cmn_feature_is_enabled(struct smu_context *smu,
138
enum smu_feature_mask mask);
139
140
bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
141
enum smu_clk_type clk_type);
142
143
int smu_cmn_get_enabled_mask(struct smu_context *smu,
144
uint64_t *feature_mask);
145
146
uint64_t smu_cmn_get_indep_throttler_status(
147
const unsigned long dep_status,
148
const uint8_t *throttler_map);
149
150
int smu_cmn_feature_update_enable_state(struct smu_context *smu,
151
uint64_t feature_mask,
152
bool enabled);
153
154
int smu_cmn_feature_set_enabled(struct smu_context *smu,
155
enum smu_feature_mask mask,
156
bool enable);
157
158
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
159
char *buf);
160
161
int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
162
uint64_t new_mask);
163
164
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
165
enum smu_feature_mask mask);
166
167
int smu_cmn_get_smc_version(struct smu_context *smu,
168
uint32_t *if_version,
169
uint32_t *smu_version);
170
171
int smu_cmn_update_table(struct smu_context *smu,
172
enum smu_table_id table_index,
173
int argument,
174
void *table_data,
175
bool drv2smu);
176
177
int smu_cmn_write_watermarks_table(struct smu_context *smu);
178
179
int smu_cmn_write_pptable(struct smu_context *smu);
180
181
int smu_cmn_get_metrics_table(struct smu_context *smu,
182
void *metrics_table,
183
bool bypass_cache);
184
185
int smu_cmn_get_combo_pptable(struct smu_context *smu);
186
187
int smu_cmn_set_mp1_state(struct smu_context *smu,
188
enum pp_mp1_state mp1_state);
189
190
bool smu_cmn_is_audio_func_enabled(struct amdgpu_device *adev);
191
void smu_cmn_generic_soc_policy_desc(struct smu_dpm_policy *policy);
192
void smu_cmn_generic_plpd_policy_desc(struct smu_dpm_policy *policy);
193
194
void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
195
u32 workload_mask,
196
u32 *backend_workload_mask);
197
198
int smu_cmn_print_dpm_clk_levels(struct smu_context *smu,
199
struct smu_dpm_table *dpm_table,
200
uint32_t cur_clk,
201
char *buf, int *offset);
202
203
int smu_cmn_print_pcie_levels(struct smu_context *smu,
204
struct smu_pcie_table *pcie_table,
205
uint32_t cur_gen, uint32_t cur_lane,
206
char *buf, int *offset);
207
208
int smu_cmn_dpm_pcie_gen_idx(int gen);
209
int smu_cmn_dpm_pcie_width_idx(int width);
210
211
/*SMU gpu metrics */
212
213
/* Attribute ID mapping */
214
#define SMU_MATTR(X) AMDGPU_METRICS_ATTR_ID_##X
215
/* Type ID mapping */
216
#define SMU_MTYPE(X) AMDGPU_METRICS_TYPE_##X
217
/* Unit ID mapping */
218
#define SMU_MUNIT(X) AMDGPU_METRICS_UNIT_##X
219
220
/* Map TYPEID to C type */
221
#define SMU_CTYPE(TYPEID) SMU_CTYPE_##TYPEID
222
223
#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U8 u8
224
#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S8 s8
225
#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U16 u16
226
#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S16 s16
227
#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U32 u32
228
#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S32 s32
229
#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U64 u64
230
#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S64 s64
231
232
/* struct members */
233
#define SMU_METRICS_SCALAR(ID, UNIT, TYPEID, NAME) \
234
u64 NAME##_ftype; \
235
SMU_CTYPE(TYPEID) NAME
236
237
#define SMU_METRICS_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \
238
u64 NAME##_ftype; \
239
SMU_CTYPE(TYPEID) NAME[SIZE]
240
241
/* Init functions for scalar/array fields - init to 0xFFs */
242
#define SMU_METRICS_INIT_SCALAR(ID, UNIT, TYPEID, NAME) \
243
do { \
244
obj->NAME##_ftype = \
245
AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, 1); \
246
obj->NAME = (SMU_CTYPE(TYPEID)) ~0; \
247
count++; \
248
} while (0)
249
250
#define SMU_METRICS_INIT_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \
251
do { \
252
obj->NAME##_ftype = \
253
AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, SIZE); \
254
memset(obj->NAME, 0xFF, sizeof(obj->NAME)); \
255
count++; \
256
} while (0)
257
258
/* Declare Metrics Class and Template object */
259
#define DECLARE_SMU_METRICS_CLASS(CLASSNAME, SMU_METRICS_FIELD_LIST) \
260
struct __packed CLASSNAME { \
261
struct metrics_table_header header; \
262
int attr_count; \
263
SMU_METRICS_FIELD_LIST(SMU_METRICS_SCALAR, SMU_METRICS_ARRAY); \
264
}; \
265
static inline void CLASSNAME##_init(struct CLASSNAME *obj, \
266
uint8_t frev, uint8_t crev) \
267
{ \
268
int count = 0; \
269
memset(obj, 0xFF, sizeof(*obj)); \
270
obj->header.format_revision = frev; \
271
obj->header.content_revision = crev; \
272
obj->header.structure_size = sizeof(*obj); \
273
SMU_METRICS_FIELD_LIST(SMU_METRICS_INIT_SCALAR, \
274
SMU_METRICS_INIT_ARRAY) \
275
obj->attr_count = count; \
276
}
277
278
#endif
279
#endif
280
281