Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
51589 views
1
/*
2
* Copyright 2013 Advanced Micro Devices, Inc.
3
*
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
10
*
11
* The above copyright notice and this permission notice shall be included in
12
* all copies or substantial portions of the Software.
13
*
14
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20
* OTHER DEALINGS IN THE SOFTWARE.
21
*
22
*/
23
24
#include <linux/module.h>
25
#include <linux/pci.h>
26
27
#include "amdgpu.h"
28
#include "amdgpu_pm.h"
29
#include "amdgpu_dpm.h"
30
#include "amdgpu_atombios.h"
31
#include "amdgpu_dpm_internal.h"
32
#include "amd_pcie.h"
33
#include "atom.h"
34
#include "gfx_v6_0.h"
35
#include "r600_dpm.h"
36
#include "sid.h"
37
#include "si_dpm.h"
38
#include "../include/pptable.h"
39
#include <linux/math64.h>
40
#include <linux/seq_file.h>
41
#include <linux/firmware.h>
42
#include <legacy_dpm.h>
43
44
#include "bif/bif_3_0_d.h"
45
#include "bif/bif_3_0_sh_mask.h"
46
47
#include "dce/dce_6_0_d.h"
48
#include "dce/dce_6_0_sh_mask.h"
49
50
#include "gca/gfx_6_0_d.h"
51
#include "gca/gfx_6_0_sh_mask.h"
52
53
#include"gmc/gmc_6_0_d.h"
54
#include"gmc/gmc_6_0_sh_mask.h"
55
56
#include "smu/smu_6_0_d.h"
57
#include "smu/smu_6_0_sh_mask.h"
58
59
#define MC_CG_ARB_FREQ_F0 0x0a
60
#define MC_CG_ARB_FREQ_F1 0x0b
61
#define MC_CG_ARB_FREQ_F2 0x0c
62
#define MC_CG_ARB_FREQ_F3 0x0d
63
64
#define SMC_RAM_END 0x20000
65
66
#define SCLK_MIN_DEEPSLEEP_FREQ 1350
67
68
69
/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
70
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
71
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
72
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
73
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
74
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
75
#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
76
77
#define BIOS_SCRATCH_4 0x5cd
78
79
MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
80
MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
81
MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
82
MODULE_FIRMWARE("amdgpu/verde_smc.bin");
83
MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
84
MODULE_FIRMWARE("amdgpu/oland_smc.bin");
85
MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
86
MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
87
MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
88
MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
89
90
static const struct amd_pm_funcs si_dpm_funcs;
91
92
union power_info {
93
struct _ATOM_POWERPLAY_INFO info;
94
struct _ATOM_POWERPLAY_INFO_V2 info_2;
95
struct _ATOM_POWERPLAY_INFO_V3 info_3;
96
struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
97
struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
98
struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
99
struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
100
struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
101
};
102
103
union fan_info {
104
struct _ATOM_PPLIB_FANTABLE fan;
105
struct _ATOM_PPLIB_FANTABLE2 fan2;
106
struct _ATOM_PPLIB_FANTABLE3 fan3;
107
};
108
109
union pplib_clock_info {
110
struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
111
struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
112
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
113
struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
114
struct _ATOM_PPLIB_SI_CLOCK_INFO si;
115
};
116
117
enum si_dpm_auto_throttle_src {
118
SI_DPM_AUTO_THROTTLE_SRC_THERMAL,
119
SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL
120
};
121
122
enum si_dpm_event_src {
123
SI_DPM_EVENT_SRC_ANALOG = 0,
124
SI_DPM_EVENT_SRC_EXTERNAL = 1,
125
SI_DPM_EVENT_SRC_DIGITAL = 2,
126
SI_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
127
SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
128
};
129
130
static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
131
{
132
R600_UTC_DFLT_00,
133
R600_UTC_DFLT_01,
134
R600_UTC_DFLT_02,
135
R600_UTC_DFLT_03,
136
R600_UTC_DFLT_04,
137
R600_UTC_DFLT_05,
138
R600_UTC_DFLT_06,
139
R600_UTC_DFLT_07,
140
R600_UTC_DFLT_08,
141
R600_UTC_DFLT_09,
142
R600_UTC_DFLT_10,
143
R600_UTC_DFLT_11,
144
R600_UTC_DFLT_12,
145
R600_UTC_DFLT_13,
146
R600_UTC_DFLT_14,
147
};
148
149
static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
150
{
151
R600_DTC_DFLT_00,
152
R600_DTC_DFLT_01,
153
R600_DTC_DFLT_02,
154
R600_DTC_DFLT_03,
155
R600_DTC_DFLT_04,
156
R600_DTC_DFLT_05,
157
R600_DTC_DFLT_06,
158
R600_DTC_DFLT_07,
159
R600_DTC_DFLT_08,
160
R600_DTC_DFLT_09,
161
R600_DTC_DFLT_10,
162
R600_DTC_DFLT_11,
163
R600_DTC_DFLT_12,
164
R600_DTC_DFLT_13,
165
R600_DTC_DFLT_14,
166
};
167
168
static const struct si_cac_config_reg cac_weights_tahiti[] =
169
{
170
{ 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
171
{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
172
{ 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
173
{ 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
174
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
175
{ 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
176
{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
177
{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
178
{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
179
{ 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
180
{ 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
181
{ 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
182
{ 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
183
{ 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
184
{ 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
185
{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
186
{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
187
{ 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
188
{ 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
189
{ 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
190
{ 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
191
{ 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
192
{ 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
193
{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
194
{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
195
{ 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
196
{ 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
197
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
198
{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
199
{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
200
{ 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
201
{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
202
{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
203
{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
204
{ 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
205
{ 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
206
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
207
{ 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
208
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
209
{ 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
210
{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
211
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
212
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
213
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
214
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
215
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
216
{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
217
{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
218
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
219
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
220
{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
221
{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
222
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
223
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
224
{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
225
{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
226
{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
227
{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
228
{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
229
{ 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
230
{ 0xFFFFFFFF }
231
};
232
233
static const struct si_cac_config_reg lcac_tahiti[] =
234
{
235
{ 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
236
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
237
{ 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
238
{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
239
{ 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
240
{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
241
{ 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
242
{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
243
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
244
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
245
{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
246
{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
247
{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
248
{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
249
{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
250
{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
251
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
252
{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
253
{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
254
{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
255
{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
256
{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
257
{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
258
{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
259
{ 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
260
{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
261
{ 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
262
{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
263
{ 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
264
{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
265
{ 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
266
{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
267
{ 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
268
{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
269
{ 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
270
{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
271
{ 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
272
{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
273
{ 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
274
{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
275
{ 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
276
{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
277
{ 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
278
{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
279
{ 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
280
{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
281
{ 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
282
{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
283
{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
284
{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
285
{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
286
{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
287
{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
288
{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
289
{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
290
{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
291
{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
292
{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
293
{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
294
{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
295
{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
296
{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
297
{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
298
{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
299
{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
300
{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
301
{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
302
{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
303
{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
304
{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
305
{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
306
{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
307
{ 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
308
{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
309
{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
310
{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
311
{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
312
{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
313
{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
314
{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
315
{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
316
{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
317
{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
318
{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
319
{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
320
{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
321
{ 0xFFFFFFFF }
322
323
};
324
325
static const struct si_cac_config_reg cac_override_tahiti[] =
326
{
327
{ 0xFFFFFFFF }
328
};
329
330
static const struct si_powertune_data powertune_data_tahiti =
331
{
332
((1 << 16) | 27027),
333
6,
334
0,
335
4,
336
95,
337
{
338
0UL,
339
0UL,
340
4521550UL,
341
309631529UL,
342
-1270850L,
343
4513710L,
344
40
345
},
346
595000000UL,
347
12,
348
{
349
0,
350
0,
351
0,
352
0,
353
0,
354
0,
355
0,
356
0
357
},
358
true
359
};
360
361
static const struct si_dte_data dte_data_tahiti =
362
{
363
{ 1159409, 0, 0, 0, 0 },
364
{ 777, 0, 0, 0, 0 },
365
2,
366
54000,
367
127000,
368
25,
369
2,
370
10,
371
13,
372
{ 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
373
{ 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
374
{ 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
375
85,
376
false
377
};
378
379
static const struct si_dte_data dte_data_tahiti_pro =
380
{
381
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
382
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
383
5,
384
45000,
385
100,
386
0xA,
387
1,
388
0,
389
0x10,
390
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
391
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
392
{ 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
393
90,
394
true
395
};
396
397
static const struct si_dte_data dte_data_new_zealand =
398
{
399
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
400
{ 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
401
0x5,
402
0xAFC8,
403
0x69,
404
0x32,
405
1,
406
0,
407
0x10,
408
{ 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
409
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
410
{ 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
411
85,
412
true
413
};
414
415
static const struct si_dte_data dte_data_aruba_pro =
416
{
417
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
418
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
419
5,
420
45000,
421
100,
422
0xA,
423
1,
424
0,
425
0x10,
426
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
427
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
428
{ 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
429
90,
430
true
431
};
432
433
static const struct si_dte_data dte_data_malta =
434
{
435
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
436
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
437
5,
438
45000,
439
100,
440
0xA,
441
1,
442
0,
443
0x10,
444
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
445
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
446
{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
447
90,
448
true
449
};
450
451
static const struct si_cac_config_reg cac_weights_pitcairn[] =
452
{
453
{ 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
454
{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
455
{ 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
456
{ 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
457
{ 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
458
{ 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
459
{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
460
{ 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
461
{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
462
{ 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
463
{ 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
464
{ 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
465
{ 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
466
{ 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
467
{ 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
468
{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
469
{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
470
{ 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
471
{ 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
472
{ 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
473
{ 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
474
{ 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
475
{ 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
476
{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
477
{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
478
{ 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
479
{ 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
480
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
481
{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
482
{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
483
{ 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
484
{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
485
{ 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
486
{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
487
{ 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
488
{ 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
489
{ 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
490
{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
491
{ 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
492
{ 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
493
{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
494
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
495
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
496
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
497
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
498
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
499
{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
500
{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
501
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
502
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
503
{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
504
{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
505
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
506
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
507
{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
508
{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
509
{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
510
{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
511
{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
512
{ 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
513
{ 0xFFFFFFFF }
514
};
515
516
static const struct si_cac_config_reg lcac_pitcairn[] =
517
{
518
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
519
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
520
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
521
{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
522
{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
523
{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
524
{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
525
{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
526
{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
527
{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
528
{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
529
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
530
{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
531
{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
532
{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
533
{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
534
{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
535
{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
536
{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
537
{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
538
{ 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
539
{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
540
{ 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
541
{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
542
{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
543
{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
544
{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
545
{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
546
{ 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
547
{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
548
{ 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
549
{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
550
{ 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
551
{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
552
{ 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
553
{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
554
{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
555
{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
556
{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
557
{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
558
{ 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
559
{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
560
{ 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
561
{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
562
{ 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
563
{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
564
{ 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
565
{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
566
{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
567
{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
568
{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
569
{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
570
{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
571
{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
572
{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
573
{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
574
{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
575
{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
576
{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
577
{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
578
{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
579
{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
580
{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
581
{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
582
{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
583
{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
584
{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
585
{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
586
{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
587
{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
588
{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
589
{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
590
{ 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
591
{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
592
{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
593
{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
594
{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
595
{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
596
{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
597
{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
598
{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
599
{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
600
{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
601
{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
602
{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
603
{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
604
{ 0xFFFFFFFF }
605
};
606
607
static const struct si_cac_config_reg cac_override_pitcairn[] =
608
{
609
{ 0xFFFFFFFF }
610
};
611
612
static const struct si_powertune_data powertune_data_pitcairn =
613
{
614
((1 << 16) | 27027),
615
5,
616
0,
617
6,
618
100,
619
{
620
51600000UL,
621
1800000UL,
622
7194395UL,
623
309631529UL,
624
-1270850L,
625
4513710L,
626
100
627
},
628
117830498UL,
629
12,
630
{
631
0,
632
0,
633
0,
634
0,
635
0,
636
0,
637
0,
638
0
639
},
640
true
641
};
642
643
static const struct si_dte_data dte_data_pitcairn =
644
{
645
{ 0, 0, 0, 0, 0 },
646
{ 0, 0, 0, 0, 0 },
647
0,
648
0,
649
0,
650
0,
651
0,
652
0,
653
0,
654
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
655
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
656
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
657
0,
658
false
659
};
660
661
static const struct si_dte_data dte_data_curacao_xt =
662
{
663
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
664
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
665
5,
666
45000,
667
100,
668
0xA,
669
1,
670
0,
671
0x10,
672
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
673
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
674
{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
675
90,
676
true
677
};
678
679
static const struct si_dte_data dte_data_curacao_pro =
680
{
681
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
682
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
683
5,
684
45000,
685
100,
686
0xA,
687
1,
688
0,
689
0x10,
690
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
691
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
692
{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
693
90,
694
true
695
};
696
697
static const struct si_dte_data dte_data_neptune_xt =
698
{
699
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
700
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
701
5,
702
45000,
703
100,
704
0xA,
705
1,
706
0,
707
0x10,
708
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
709
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
710
{ 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
711
90,
712
true
713
};
714
715
static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
716
{
717
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
718
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
719
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
720
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
721
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
722
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
723
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
724
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
725
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
726
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
727
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
728
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
729
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
730
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
731
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
732
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
733
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
734
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
735
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
736
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
737
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
738
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
739
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
740
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
741
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
742
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
743
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
744
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
745
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
746
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
747
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
748
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
749
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
750
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
751
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
752
{ 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
753
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
754
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
755
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
756
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
757
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
758
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
759
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
760
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
761
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
762
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
763
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
764
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
765
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
766
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
767
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
768
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
769
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
770
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
771
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
772
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
773
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
774
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
775
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
776
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
777
{ 0xFFFFFFFF }
778
};
779
780
static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
781
{
782
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
783
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
784
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
785
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
786
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
787
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
788
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
789
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
790
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
791
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
792
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
793
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
794
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
795
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
796
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
797
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
798
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
799
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
800
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
801
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
802
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
803
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
804
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
805
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
806
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
807
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
808
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
809
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
810
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
811
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
812
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
813
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
814
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
815
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
816
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
817
{ 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
818
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
819
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
820
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
821
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
822
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
823
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
824
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
825
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
826
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
827
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
828
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
829
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
830
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
831
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
832
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
833
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
834
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
835
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
836
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
837
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
838
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
839
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
840
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
841
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
842
{ 0xFFFFFFFF }
843
};
844
845
static const struct si_cac_config_reg cac_weights_heathrow[] =
846
{
847
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
848
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
849
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
850
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
851
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
852
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
853
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
854
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
855
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
856
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
857
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
858
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
859
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
860
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
861
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
862
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
863
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
864
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
865
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
866
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
867
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
868
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
869
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
870
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
871
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
872
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
873
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
874
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
875
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
876
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
877
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
878
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
879
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
880
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
881
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
882
{ 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
883
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
884
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
885
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
886
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
887
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
888
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
889
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
890
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
891
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
892
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
893
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
894
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
895
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
896
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
897
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
898
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
899
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
900
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
901
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
902
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
903
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
904
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
905
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
906
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
907
{ 0xFFFFFFFF }
908
};
909
910
static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
911
{
912
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
913
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
914
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
915
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
916
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
917
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
918
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
919
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
920
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
921
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
922
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
923
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
924
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
925
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
926
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
927
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
928
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
929
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
930
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
931
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
932
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
933
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
934
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
935
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
936
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
937
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
938
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
939
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
940
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
941
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
942
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
943
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
944
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
945
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
946
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
947
{ 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
948
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
949
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
950
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
951
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
952
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
953
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
954
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
955
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
956
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
957
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
958
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
959
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
960
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
961
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
962
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
963
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
964
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
965
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
966
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
967
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
968
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
969
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
970
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
971
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
972
{ 0xFFFFFFFF }
973
};
974
975
static const struct si_cac_config_reg cac_weights_cape_verde[] =
976
{
977
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
978
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
979
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
980
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
981
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
982
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
983
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
984
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
985
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
986
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
987
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
988
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
989
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
990
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
991
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
992
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
993
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
994
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
995
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
996
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
997
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
998
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
999
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
1000
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
1001
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
1002
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1003
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1004
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1005
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1006
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
1007
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1008
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
1009
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
1010
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
1011
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1012
{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
1013
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1014
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1015
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1016
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1017
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
1018
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1019
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1020
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1021
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1022
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1023
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1024
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1025
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1026
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1027
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1028
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1029
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1030
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1031
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1032
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1033
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1034
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1035
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1036
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
1037
{ 0xFFFFFFFF }
1038
};
1039
1040
static const struct si_cac_config_reg lcac_cape_verde[] =
1041
{
1042
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1043
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1044
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1045
{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1046
{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1047
{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1048
{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1049
{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1050
{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1051
{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1052
{ 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1053
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1054
{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1055
{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1056
{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1057
{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1058
{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1059
{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1060
{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1061
{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1062
{ 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1063
{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1064
{ 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1065
{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1066
{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1067
{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1068
{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1069
{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1070
{ 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1071
{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1072
{ 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1073
{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1074
{ 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1075
{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1076
{ 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1077
{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1078
{ 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1079
{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1080
{ 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1081
{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1082
{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1083
{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1084
{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1085
{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1086
{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1087
{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1088
{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1089
{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1090
{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1091
{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1092
{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1093
{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1094
{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1095
{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1096
{ 0xFFFFFFFF }
1097
};
1098
1099
static const struct si_cac_config_reg cac_override_cape_verde[] =
1100
{
1101
{ 0xFFFFFFFF }
1102
};
1103
1104
static const struct si_powertune_data powertune_data_cape_verde =
1105
{
1106
((1 << 16) | 0x6993),
1107
5,
1108
0,
1109
7,
1110
105,
1111
{
1112
0UL,
1113
0UL,
1114
7194395UL,
1115
309631529UL,
1116
-1270850L,
1117
4513710L,
1118
100
1119
},
1120
117830498UL,
1121
12,
1122
{
1123
0,
1124
0,
1125
0,
1126
0,
1127
0,
1128
0,
1129
0,
1130
0
1131
},
1132
true
1133
};
1134
1135
static const struct si_dte_data dte_data_cape_verde =
1136
{
1137
{ 0, 0, 0, 0, 0 },
1138
{ 0, 0, 0, 0, 0 },
1139
0,
1140
0,
1141
0,
1142
0,
1143
0,
1144
0,
1145
0,
1146
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1147
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1148
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1149
0,
1150
false
1151
};
1152
1153
static const struct si_dte_data dte_data_venus_xtx =
1154
{
1155
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1156
{ 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
1157
5,
1158
55000,
1159
0x69,
1160
0xA,
1161
1,
1162
0,
1163
0x3,
1164
{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1165
{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1166
{ 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1167
90,
1168
true
1169
};
1170
1171
static const struct si_dte_data dte_data_venus_xt =
1172
{
1173
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1174
{ 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
1175
5,
1176
55000,
1177
0x69,
1178
0xA,
1179
1,
1180
0,
1181
0x3,
1182
{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1183
{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1184
{ 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1185
90,
1186
true
1187
};
1188
1189
static const struct si_dte_data dte_data_venus_pro =
1190
{
1191
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1192
{ 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
1193
5,
1194
55000,
1195
0x69,
1196
0xA,
1197
1,
1198
0,
1199
0x3,
1200
{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1201
{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1202
{ 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1203
90,
1204
true
1205
};
1206
1207
static const struct si_cac_config_reg cac_weights_oland[] =
1208
{
1209
{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
1210
{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
1211
{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
1212
{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
1213
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1214
{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
1215
{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
1216
{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
1217
{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
1218
{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
1219
{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
1220
{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
1221
{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
1222
{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1223
{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
1224
{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
1225
{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
1226
{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
1227
{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
1228
{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
1229
{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
1230
{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
1231
{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
1232
{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
1233
{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
1234
{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1235
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1236
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1237
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1238
{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
1239
{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1240
{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
1241
{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
1242
{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
1243
{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1244
{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
1245
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1246
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1247
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1248
{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1249
{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
1250
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1251
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1252
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1253
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1254
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1255
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1256
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1257
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1258
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1259
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1260
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1261
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1262
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1263
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1264
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1265
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1266
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1267
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1268
{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
1269
{ 0xFFFFFFFF }
1270
};
1271
1272
static const struct si_cac_config_reg cac_weights_mars_pro[] =
1273
{
1274
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1275
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1276
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1277
{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1278
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1279
{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1280
{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1281
{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1282
{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1283
{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1284
{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1285
{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1286
{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1287
{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1288
{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1289
{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1290
{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1291
{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1292
{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1293
{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1294
{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1295
{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1296
{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1297
{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1298
{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1299
{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1300
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1301
{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1302
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1303
{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1304
{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1305
{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1306
{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1307
{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1308
{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1309
{ 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
1310
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1311
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1312
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1313
{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1314
{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1315
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1316
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1317
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1318
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1319
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1320
{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1321
{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1322
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1323
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1324
{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1325
{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1326
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1327
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1328
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1329
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1330
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1331
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1332
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1333
{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1334
{ 0xFFFFFFFF }
1335
};
1336
1337
static const struct si_cac_config_reg cac_weights_mars_xt[] =
1338
{
1339
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1340
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1341
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1342
{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1343
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1344
{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1345
{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1346
{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1347
{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1348
{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1349
{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1350
{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1351
{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1352
{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1353
{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1354
{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1355
{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1356
{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1357
{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1358
{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1359
{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1360
{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1361
{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1362
{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1363
{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1364
{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1365
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1366
{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1367
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1368
{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1369
{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1370
{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1371
{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1372
{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1373
{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1374
{ 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
1375
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1376
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1377
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1378
{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1379
{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1380
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1381
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1382
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1383
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1384
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1385
{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1386
{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1387
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1388
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1389
{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1390
{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1391
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1392
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1393
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1394
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1395
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1396
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1397
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1398
{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1399
{ 0xFFFFFFFF }
1400
};
1401
1402
static const struct si_cac_config_reg cac_weights_oland_pro[] =
1403
{
1404
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1405
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1406
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1407
{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1408
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1409
{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1410
{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1411
{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1412
{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1413
{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1414
{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1415
{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1416
{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1417
{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1418
{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1419
{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1420
{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1421
{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1422
{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1423
{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1424
{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1425
{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1426
{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1427
{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1428
{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1429
{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1430
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1431
{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1432
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1433
{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1434
{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1435
{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1436
{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1437
{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1438
{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1439
{ 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
1440
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1441
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1442
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1443
{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1444
{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1445
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1446
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1447
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1448
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1449
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1450
{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1451
{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1452
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1453
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1454
{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1455
{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1456
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1457
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1458
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1459
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1460
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1461
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1462
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1463
{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1464
{ 0xFFFFFFFF }
1465
};
1466
1467
static const struct si_cac_config_reg cac_weights_oland_xt[] =
1468
{
1469
{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1470
{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1471
{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1472
{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1473
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1474
{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1475
{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1476
{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1477
{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1478
{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1479
{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1480
{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1481
{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1482
{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1483
{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1484
{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1485
{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1486
{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1487
{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1488
{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1489
{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1490
{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1491
{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1492
{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1493
{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1494
{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1495
{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1496
{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1497
{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1498
{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1499
{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1500
{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1501
{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1502
{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1503
{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1504
{ 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
1505
{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1506
{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1507
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1508
{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1509
{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1510
{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1511
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1512
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1513
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1514
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1515
{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1516
{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1517
{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1518
{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1519
{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1520
{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1521
{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1522
{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1523
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1524
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1525
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1526
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1527
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1528
{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1529
{ 0xFFFFFFFF }
1530
};
1531
1532
static const struct si_cac_config_reg lcac_oland[] =
1533
{
1534
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1535
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1536
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1537
{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1538
{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1539
{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1540
{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1541
{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1542
{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1543
{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1544
{ 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
1545
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1546
{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1547
{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1548
{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1549
{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1550
{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1551
{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1552
{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1553
{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1554
{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1555
{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1556
{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1557
{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1558
{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1559
{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1560
{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1561
{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1562
{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1563
{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1564
{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1565
{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1566
{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1567
{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1568
{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1569
{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1570
{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1571
{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1572
{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1573
{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1574
{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1575
{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1576
{ 0xFFFFFFFF }
1577
};
1578
1579
static const struct si_cac_config_reg lcac_mars_pro[] =
1580
{
1581
{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1582
{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1583
{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1584
{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1585
{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1586
{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1587
{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1588
{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1589
{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1590
{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1591
{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1592
{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1593
{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1594
{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1595
{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1596
{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1597
{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1598
{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1599
{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1600
{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1601
{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1602
{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1603
{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1604
{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1605
{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1606
{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1607
{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1608
{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1609
{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1610
{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1611
{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1612
{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1613
{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1614
{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1615
{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1616
{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1617
{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1618
{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1619
{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1620
{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1621
{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1622
{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1623
{ 0xFFFFFFFF }
1624
};
1625
1626
static const struct si_cac_config_reg cac_override_oland[] =
1627
{
1628
{ 0xFFFFFFFF }
1629
};
1630
1631
static const struct si_powertune_data powertune_data_oland =
1632
{
1633
((1 << 16) | 0x6993),
1634
5,
1635
0,
1636
7,
1637
105,
1638
{
1639
0UL,
1640
0UL,
1641
7194395UL,
1642
309631529UL,
1643
-1270850L,
1644
4513710L,
1645
100
1646
},
1647
117830498UL,
1648
12,
1649
{
1650
0,
1651
0,
1652
0,
1653
0,
1654
0,
1655
0,
1656
0,
1657
0
1658
},
1659
true
1660
};
1661
1662
static const struct si_powertune_data powertune_data_mars_pro =
1663
{
1664
((1 << 16) | 0x6993),
1665
5,
1666
0,
1667
7,
1668
105,
1669
{
1670
0UL,
1671
0UL,
1672
7194395UL,
1673
309631529UL,
1674
-1270850L,
1675
4513710L,
1676
100
1677
},
1678
117830498UL,
1679
12,
1680
{
1681
0,
1682
0,
1683
0,
1684
0,
1685
0,
1686
0,
1687
0,
1688
0
1689
},
1690
true
1691
};
1692
1693
static const struct si_dte_data dte_data_oland =
1694
{
1695
{ 0, 0, 0, 0, 0 },
1696
{ 0, 0, 0, 0, 0 },
1697
0,
1698
0,
1699
0,
1700
0,
1701
0,
1702
0,
1703
0,
1704
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1705
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1706
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1707
0,
1708
false
1709
};
1710
1711
static const struct si_dte_data dte_data_mars_pro =
1712
{
1713
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1714
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
1715
5,
1716
55000,
1717
105,
1718
0xA,
1719
1,
1720
0,
1721
0x10,
1722
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1723
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1724
{ 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1725
90,
1726
true
1727
};
1728
1729
static const struct si_dte_data dte_data_sun_xt =
1730
{
1731
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1732
{ 0x0, 0x0, 0x0, 0x0, 0x0 },
1733
5,
1734
55000,
1735
105,
1736
0xA,
1737
1,
1738
0,
1739
0x10,
1740
{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1741
{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1742
{ 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1743
90,
1744
true
1745
};
1746
1747
1748
static const struct si_cac_config_reg cac_weights_hainan[] =
1749
{
1750
{ 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
1751
{ 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
1752
{ 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
1753
{ 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
1754
{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1755
{ 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
1756
{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1757
{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1758
{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1759
{ 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
1760
{ 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
1761
{ 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
1762
{ 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
1763
{ 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1764
{ 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
1765
{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1766
{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1767
{ 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
1768
{ 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
1769
{ 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
1770
{ 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
1771
{ 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
1772
{ 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
1773
{ 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
1774
{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1775
{ 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
1776
{ 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
1777
{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1778
{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1779
{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1780
{ 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
1781
{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1782
{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1783
{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1784
{ 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
1785
{ 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
1786
{ 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
1787
{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1788
{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1789
{ 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
1790
{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1791
{ 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
1792
{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1793
{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1794
{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1795
{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1796
{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1797
{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1798
{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1799
{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1800
{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1801
{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1802
{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1803
{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1804
{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1805
{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1806
{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1807
{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1808
{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1809
{ 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
1810
{ 0xFFFFFFFF }
1811
};
1812
1813
static const struct si_powertune_data powertune_data_hainan =
1814
{
1815
((1 << 16) | 0x6993),
1816
5,
1817
0,
1818
9,
1819
105,
1820
{
1821
0UL,
1822
0UL,
1823
7194395UL,
1824
309631529UL,
1825
-1270850L,
1826
4513710L,
1827
100
1828
},
1829
117830498UL,
1830
12,
1831
{
1832
0,
1833
0,
1834
0,
1835
0,
1836
0,
1837
0,
1838
0,
1839
0
1840
},
1841
true
1842
};
1843
1844
static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
1845
static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
1846
static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
1847
static struct si_ps *si_get_ps(struct amdgpu_ps *rps);
1848
1849
static int si_populate_voltage_value(struct amdgpu_device *adev,
1850
const struct atom_voltage_table *table,
1851
u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
1852
static int si_get_std_voltage_value(struct amdgpu_device *adev,
1853
SISLANDS_SMC_VOLTAGE_VALUE *voltage,
1854
u16 *std_voltage);
1855
static int si_write_smc_soft_register(struct amdgpu_device *adev,
1856
u16 reg_offset, u32 value);
1857
static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
1858
struct rv7xx_pl *pl,
1859
SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
1860
static int si_calculate_sclk_params(struct amdgpu_device *adev,
1861
u32 engine_clock,
1862
SISLANDS_SMC_SCLK_VALUE *sclk);
1863
1864
static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
1865
static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
1866
static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
1867
1868
static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
1869
{
1870
struct si_power_info *pi = adev->pm.dpm.priv;
1871
return pi;
1872
}
1873
1874
static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
1875
u16 v, s32 t, u32 ileakage, u32 *leakage)
1876
{
1877
s64 kt, kv, leakage_w, i_leakage, vddc;
1878
s64 temperature, t_slope, t_intercept, av, bv, t_ref;
1879
s64 tmp;
1880
1881
i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
1882
vddc = div64_s64(drm_int2fixp(v), 1000);
1883
temperature = div64_s64(drm_int2fixp(t), 1000);
1884
1885
t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
1886
t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
1887
av = div64_s64(drm_int2fixp(coeff->av), 100000000);
1888
bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
1889
t_ref = drm_int2fixp(coeff->t_ref);
1890
1891
tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
1892
kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
1893
kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
1894
kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
1895
1896
leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
1897
1898
*leakage = drm_fixp2int(leakage_w * 1000);
1899
}
1900
1901
static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
1902
const struct ni_leakage_coeffients *coeff,
1903
u16 v,
1904
s32 t,
1905
u32 i_leakage,
1906
u32 *leakage)
1907
{
1908
si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
1909
}
1910
1911
static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
1912
const u32 fixed_kt, u16 v,
1913
u32 ileakage, u32 *leakage)
1914
{
1915
s64 kt, kv, leakage_w, i_leakage, vddc;
1916
1917
i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
1918
vddc = div64_s64(drm_int2fixp(v), 1000);
1919
1920
kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
1921
kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
1922
drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
1923
1924
leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
1925
1926
*leakage = drm_fixp2int(leakage_w * 1000);
1927
}
1928
1929
static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
1930
const struct ni_leakage_coeffients *coeff,
1931
const u32 fixed_kt,
1932
u16 v,
1933
u32 i_leakage,
1934
u32 *leakage)
1935
{
1936
si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
1937
}
1938
1939
1940
static void si_update_dte_from_pl2(struct amdgpu_device *adev,
1941
struct si_dte_data *dte_data)
1942
{
1943
u32 p_limit1 = adev->pm.dpm.tdp_limit;
1944
u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
1945
u32 k = dte_data->k;
1946
u32 t_max = dte_data->max_t;
1947
u32 t_split[5] = { 10, 15, 20, 25, 30 };
1948
u32 t_0 = dte_data->t0;
1949
u32 i;
1950
1951
if (p_limit2 != 0 && p_limit2 <= p_limit1) {
1952
dte_data->tdep_count = 3;
1953
1954
for (i = 0; i < k; i++) {
1955
dte_data->r[i] =
1956
(t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
1957
(p_limit2 * (u32)100);
1958
}
1959
1960
dte_data->tdep_r[1] = dte_data->r[4] * 2;
1961
1962
for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
1963
dte_data->tdep_r[i] = dte_data->r[4];
1964
}
1965
} else {
1966
DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
1967
}
1968
}
1969
1970
static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
1971
{
1972
struct rv7xx_power_info *pi = adev->pm.dpm.priv;
1973
1974
return pi;
1975
}
1976
1977
static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
1978
{
1979
struct ni_power_info *pi = adev->pm.dpm.priv;
1980
1981
return pi;
1982
}
1983
1984
static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
1985
{
1986
struct si_ps *ps = aps->ps_priv;
1987
1988
return ps;
1989
}
1990
1991
static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
1992
{
1993
struct ni_power_info *ni_pi = ni_get_pi(adev);
1994
struct si_power_info *si_pi = si_get_pi(adev);
1995
bool update_dte_from_pl2 = false;
1996
1997
if (adev->asic_type == CHIP_TAHITI) {
1998
si_pi->cac_weights = cac_weights_tahiti;
1999
si_pi->lcac_config = lcac_tahiti;
2000
si_pi->cac_override = cac_override_tahiti;
2001
si_pi->powertune_data = &powertune_data_tahiti;
2002
si_pi->dte_data = dte_data_tahiti;
2003
2004
switch (adev->pdev->device) {
2005
case 0x6798:
2006
si_pi->dte_data.enable_dte_by_default = true;
2007
break;
2008
case 0x6799:
2009
si_pi->dte_data = dte_data_new_zealand;
2010
break;
2011
case 0x6790:
2012
case 0x6791:
2013
case 0x6792:
2014
case 0x679E:
2015
si_pi->dte_data = dte_data_aruba_pro;
2016
update_dte_from_pl2 = true;
2017
break;
2018
case 0x679B:
2019
si_pi->dte_data = dte_data_malta;
2020
update_dte_from_pl2 = true;
2021
break;
2022
case 0x679A:
2023
si_pi->dte_data = dte_data_tahiti_pro;
2024
update_dte_from_pl2 = true;
2025
break;
2026
default:
2027
if (si_pi->dte_data.enable_dte_by_default == true)
2028
DRM_ERROR("DTE is not enabled!\n");
2029
break;
2030
}
2031
} else if (adev->asic_type == CHIP_PITCAIRN) {
2032
si_pi->cac_weights = cac_weights_pitcairn;
2033
si_pi->lcac_config = lcac_pitcairn;
2034
si_pi->cac_override = cac_override_pitcairn;
2035
si_pi->powertune_data = &powertune_data_pitcairn;
2036
2037
switch (adev->pdev->device) {
2038
case 0x6810:
2039
case 0x6818:
2040
si_pi->dte_data = dte_data_curacao_xt;
2041
update_dte_from_pl2 = true;
2042
break;
2043
case 0x6819:
2044
case 0x6811:
2045
si_pi->dte_data = dte_data_curacao_pro;
2046
update_dte_from_pl2 = true;
2047
break;
2048
case 0x6800:
2049
case 0x6806:
2050
si_pi->dte_data = dte_data_neptune_xt;
2051
update_dte_from_pl2 = true;
2052
break;
2053
default:
2054
si_pi->dte_data = dte_data_pitcairn;
2055
break;
2056
}
2057
} else if (adev->asic_type == CHIP_VERDE) {
2058
si_pi->lcac_config = lcac_cape_verde;
2059
si_pi->cac_override = cac_override_cape_verde;
2060
si_pi->powertune_data = &powertune_data_cape_verde;
2061
2062
switch (adev->pdev->device) {
2063
case 0x683B:
2064
case 0x683F:
2065
case 0x6829:
2066
case 0x6835:
2067
si_pi->cac_weights = cac_weights_cape_verde_pro;
2068
si_pi->dte_data = dte_data_cape_verde;
2069
break;
2070
case 0x682C:
2071
si_pi->cac_weights = cac_weights_cape_verde_pro;
2072
si_pi->dte_data = dte_data_sun_xt;
2073
update_dte_from_pl2 = true;
2074
break;
2075
case 0x6825:
2076
case 0x6827:
2077
si_pi->cac_weights = cac_weights_heathrow;
2078
si_pi->dte_data = dte_data_cape_verde;
2079
break;
2080
case 0x6824:
2081
case 0x682D:
2082
si_pi->cac_weights = cac_weights_chelsea_xt;
2083
si_pi->dte_data = dte_data_cape_verde;
2084
break;
2085
case 0x682F:
2086
si_pi->cac_weights = cac_weights_chelsea_pro;
2087
si_pi->dte_data = dte_data_cape_verde;
2088
break;
2089
case 0x6820:
2090
si_pi->cac_weights = cac_weights_heathrow;
2091
si_pi->dte_data = dte_data_venus_xtx;
2092
break;
2093
case 0x6821:
2094
si_pi->cac_weights = cac_weights_heathrow;
2095
si_pi->dte_data = dte_data_venus_xt;
2096
break;
2097
case 0x6823:
2098
case 0x682B:
2099
case 0x6822:
2100
case 0x682A:
2101
si_pi->cac_weights = cac_weights_chelsea_pro;
2102
si_pi->dte_data = dte_data_venus_pro;
2103
break;
2104
default:
2105
si_pi->cac_weights = cac_weights_cape_verde;
2106
si_pi->dte_data = dte_data_cape_verde;
2107
break;
2108
}
2109
} else if (adev->asic_type == CHIP_OLAND) {
2110
si_pi->lcac_config = lcac_mars_pro;
2111
si_pi->cac_override = cac_override_oland;
2112
si_pi->powertune_data = &powertune_data_mars_pro;
2113
si_pi->dte_data = dte_data_mars_pro;
2114
2115
switch (adev->pdev->device) {
2116
case 0x6601:
2117
case 0x6621:
2118
case 0x6603:
2119
case 0x6605:
2120
si_pi->cac_weights = cac_weights_mars_pro;
2121
update_dte_from_pl2 = true;
2122
break;
2123
case 0x6600:
2124
case 0x6606:
2125
case 0x6620:
2126
case 0x6604:
2127
si_pi->cac_weights = cac_weights_mars_xt;
2128
update_dte_from_pl2 = true;
2129
break;
2130
case 0x6611:
2131
case 0x6613:
2132
case 0x6608:
2133
si_pi->cac_weights = cac_weights_oland_pro;
2134
update_dte_from_pl2 = true;
2135
break;
2136
case 0x6610:
2137
si_pi->cac_weights = cac_weights_oland_xt;
2138
update_dte_from_pl2 = true;
2139
break;
2140
default:
2141
si_pi->cac_weights = cac_weights_oland;
2142
si_pi->lcac_config = lcac_oland;
2143
si_pi->cac_override = cac_override_oland;
2144
si_pi->powertune_data = &powertune_data_oland;
2145
si_pi->dte_data = dte_data_oland;
2146
break;
2147
}
2148
} else if (adev->asic_type == CHIP_HAINAN) {
2149
si_pi->cac_weights = cac_weights_hainan;
2150
si_pi->lcac_config = lcac_oland;
2151
si_pi->cac_override = cac_override_oland;
2152
si_pi->powertune_data = &powertune_data_hainan;
2153
si_pi->dte_data = dte_data_sun_xt;
2154
update_dte_from_pl2 = true;
2155
} else {
2156
DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
2157
return;
2158
}
2159
2160
ni_pi->enable_power_containment = false;
2161
ni_pi->enable_cac = false;
2162
ni_pi->enable_sq_ramping = false;
2163
si_pi->enable_dte = false;
2164
2165
if (si_pi->powertune_data->enable_powertune_by_default) {
2166
ni_pi->enable_power_containment = true;
2167
ni_pi->enable_cac = true;
2168
if (si_pi->dte_data.enable_dte_by_default) {
2169
si_pi->enable_dte = true;
2170
if (update_dte_from_pl2)
2171
si_update_dte_from_pl2(adev, &si_pi->dte_data);
2172
2173
}
2174
ni_pi->enable_sq_ramping = true;
2175
}
2176
2177
ni_pi->driver_calculate_cac_leakage = true;
2178
ni_pi->cac_configuration_required = true;
2179
2180
if (ni_pi->cac_configuration_required) {
2181
ni_pi->support_cac_long_term_average = true;
2182
si_pi->dyn_powertune_data.l2_lta_window_size =
2183
si_pi->powertune_data->l2_lta_window_size_default;
2184
si_pi->dyn_powertune_data.lts_truncate =
2185
si_pi->powertune_data->lts_truncate_default;
2186
} else {
2187
ni_pi->support_cac_long_term_average = false;
2188
si_pi->dyn_powertune_data.l2_lta_window_size = 0;
2189
si_pi->dyn_powertune_data.lts_truncate = 0;
2190
}
2191
2192
si_pi->dyn_powertune_data.disable_uvd_powertune = false;
2193
}
2194
2195
static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
2196
{
2197
return 1;
2198
}
2199
2200
static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
2201
{
2202
u32 xclk;
2203
u32 wintime;
2204
u32 cac_window;
2205
u32 cac_window_size;
2206
2207
xclk = amdgpu_asic_get_xclk(adev);
2208
2209
if (xclk == 0)
2210
return 0;
2211
2212
cac_window = RREG32(mmCG_CAC_CTRL) & CG_CAC_CTRL__CAC_WINDOW_MASK;
2213
cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
2214
2215
wintime = (cac_window_size * 100) / xclk;
2216
2217
return wintime;
2218
}
2219
2220
static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
2221
{
2222
return power_in_watts;
2223
}
2224
2225
static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
2226
bool adjust_polarity,
2227
u32 tdp_adjustment,
2228
u32 *tdp_limit,
2229
u32 *near_tdp_limit)
2230
{
2231
u32 adjustment_delta, max_tdp_limit;
2232
2233
if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
2234
return -EINVAL;
2235
2236
max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
2237
2238
if (adjust_polarity) {
2239
*tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
2240
*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
2241
} else {
2242
*tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
2243
adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit;
2244
if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
2245
*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
2246
else
2247
*near_tdp_limit = 0;
2248
}
2249
2250
if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
2251
return -EINVAL;
2252
if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
2253
return -EINVAL;
2254
2255
return 0;
2256
}
2257
2258
static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
2259
struct amdgpu_ps *amdgpu_state)
2260
{
2261
struct ni_power_info *ni_pi = ni_get_pi(adev);
2262
struct si_power_info *si_pi = si_get_pi(adev);
2263
2264
if (ni_pi->enable_power_containment) {
2265
SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
2266
PP_SIslands_PAPMParameters *papm_parm;
2267
struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
2268
u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
2269
u32 tdp_limit;
2270
u32 near_tdp_limit;
2271
int ret;
2272
2273
if (scaling_factor == 0)
2274
return -EINVAL;
2275
2276
ret = si_calculate_adjusted_tdp_limits(adev,
2277
false, /* ??? */
2278
adev->pm.dpm.tdp_adjustment,
2279
&tdp_limit,
2280
&near_tdp_limit);
2281
if (ret)
2282
return ret;
2283
2284
if (adev->pdev->device == 0x6611 && adev->pdev->revision == 0x87) {
2285
/* Workaround buggy powertune on Radeon 430 and 520. */
2286
tdp_limit = 32;
2287
near_tdp_limit = 28;
2288
}
2289
2290
smc_table->dpm2Params.TDPLimit =
2291
cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
2292
smc_table->dpm2Params.NearTDPLimit =
2293
cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
2294
smc_table->dpm2Params.SafePowerLimit =
2295
cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
2296
2297
ret = amdgpu_si_copy_bytes_to_smc(adev,
2298
(si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
2299
offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
2300
(u8 *)(&(smc_table->dpm2Params.TDPLimit)),
2301
sizeof(u32) * 3,
2302
si_pi->sram_end);
2303
if (ret)
2304
return ret;
2305
2306
if (si_pi->enable_ppm) {
2307
papm_parm = &si_pi->papm_parm;
2308
memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
2309
papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
2310
papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
2311
papm_parm->dGPU_T_Warning = cpu_to_be32(95);
2312
papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
2313
papm_parm->PlatformPowerLimit = 0xffffffff;
2314
papm_parm->NearTDPLimitPAPM = 0xffffffff;
2315
2316
ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
2317
(u8 *)papm_parm,
2318
sizeof(PP_SIslands_PAPMParameters),
2319
si_pi->sram_end);
2320
if (ret)
2321
return ret;
2322
}
2323
}
2324
return 0;
2325
}
2326
2327
static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
2328
struct amdgpu_ps *amdgpu_state)
2329
{
2330
struct ni_power_info *ni_pi = ni_get_pi(adev);
2331
struct si_power_info *si_pi = si_get_pi(adev);
2332
2333
if (ni_pi->enable_power_containment) {
2334
SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
2335
int ret;
2336
2337
ret = amdgpu_si_copy_bytes_to_smc(adev,
2338
(si_pi->state_table_start +
2339
offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
2340
offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
2341
(u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
2342
sizeof(u32) * 2,
2343
si_pi->sram_end);
2344
if (ret)
2345
return ret;
2346
}
2347
2348
return 0;
2349
}
2350
2351
static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
2352
const u16 prev_std_vddc,
2353
const u16 curr_std_vddc)
2354
{
2355
u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
2356
u64 prev_vddc = (u64)prev_std_vddc;
2357
u64 curr_vddc = (u64)curr_std_vddc;
2358
u64 pwr_efficiency_ratio, n, d;
2359
2360
if ((prev_vddc == 0) || (curr_vddc == 0))
2361
return 0;
2362
2363
n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
2364
d = prev_vddc * prev_vddc;
2365
pwr_efficiency_ratio = div64_u64(n, d);
2366
2367
if (pwr_efficiency_ratio > (u64)0xFFFF)
2368
return 0;
2369
2370
return (u16)pwr_efficiency_ratio;
2371
}
2372
2373
static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
2374
struct amdgpu_ps *amdgpu_state)
2375
{
2376
struct si_power_info *si_pi = si_get_pi(adev);
2377
2378
if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
2379
amdgpu_state->vclk && amdgpu_state->dclk)
2380
return true;
2381
2382
return false;
2383
}
2384
2385
struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
2386
{
2387
struct evergreen_power_info *pi = adev->pm.dpm.priv;
2388
2389
return pi;
2390
}
2391
2392
static int si_populate_power_containment_values(struct amdgpu_device *adev,
2393
struct amdgpu_ps *amdgpu_state,
2394
SISLANDS_SMC_SWSTATE *smc_state)
2395
{
2396
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
2397
struct ni_power_info *ni_pi = ni_get_pi(adev);
2398
struct si_ps *state = si_get_ps(amdgpu_state);
2399
SISLANDS_SMC_VOLTAGE_VALUE vddc;
2400
u32 prev_sclk;
2401
u32 max_sclk;
2402
u32 min_sclk;
2403
u16 prev_std_vddc;
2404
u16 curr_std_vddc;
2405
int i;
2406
u16 pwr_efficiency_ratio;
2407
u8 max_ps_percent;
2408
bool disable_uvd_power_tune;
2409
int ret;
2410
2411
if (ni_pi->enable_power_containment == false)
2412
return 0;
2413
2414
if (state->performance_level_count == 0)
2415
return -EINVAL;
2416
2417
if (smc_state->levelCount != state->performance_level_count)
2418
return -EINVAL;
2419
2420
disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
2421
2422
smc_state->levels[0].dpm2.MaxPS = 0;
2423
smc_state->levels[0].dpm2.NearTDPDec = 0;
2424
smc_state->levels[0].dpm2.AboveSafeInc = 0;
2425
smc_state->levels[0].dpm2.BelowSafeInc = 0;
2426
smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
2427
2428
for (i = 1; i < state->performance_level_count; i++) {
2429
prev_sclk = state->performance_levels[i-1].sclk;
2430
max_sclk = state->performance_levels[i].sclk;
2431
if (i == 1)
2432
max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
2433
else
2434
max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
2435
2436
if (prev_sclk > max_sclk)
2437
return -EINVAL;
2438
2439
if ((max_ps_percent == 0) ||
2440
(prev_sclk == max_sclk) ||
2441
disable_uvd_power_tune)
2442
min_sclk = max_sclk;
2443
else if (i == 1)
2444
min_sclk = prev_sclk;
2445
else
2446
min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
2447
2448
if (min_sclk < state->performance_levels[0].sclk)
2449
min_sclk = state->performance_levels[0].sclk;
2450
2451
if (min_sclk == 0)
2452
return -EINVAL;
2453
2454
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
2455
state->performance_levels[i-1].vddc, &vddc);
2456
if (ret)
2457
return ret;
2458
2459
ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
2460
if (ret)
2461
return ret;
2462
2463
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
2464
state->performance_levels[i].vddc, &vddc);
2465
if (ret)
2466
return ret;
2467
2468
ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
2469
if (ret)
2470
return ret;
2471
2472
pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
2473
prev_std_vddc, curr_std_vddc);
2474
2475
smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
2476
smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
2477
smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
2478
smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
2479
smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
2480
}
2481
2482
return 0;
2483
}
2484
2485
static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
2486
struct amdgpu_ps *amdgpu_state,
2487
SISLANDS_SMC_SWSTATE *smc_state)
2488
{
2489
struct ni_power_info *ni_pi = ni_get_pi(adev);
2490
struct si_ps *state = si_get_ps(amdgpu_state);
2491
u32 sq_power_throttle, sq_power_throttle2;
2492
bool enable_sq_ramping = ni_pi->enable_sq_ramping;
2493
int i;
2494
2495
if (state->performance_level_count == 0)
2496
return -EINVAL;
2497
2498
if (smc_state->levelCount != state->performance_level_count)
2499
return -EINVAL;
2500
2501
if (adev->pm.dpm.sq_ramping_threshold == 0)
2502
return -EINVAL;
2503
2504
if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (SQ_POWER_THROTTLE__MAX_POWER_MASK >> SQ_POWER_THROTTLE__MAX_POWER__SHIFT))
2505
enable_sq_ramping = false;
2506
2507
if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (SQ_POWER_THROTTLE__MIN_POWER_MASK >> SQ_POWER_THROTTLE__MIN_POWER__SHIFT))
2508
enable_sq_ramping = false;
2509
2510
if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK >> SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT))
2511
enable_sq_ramping = false;
2512
2513
if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK >> SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT))
2514
enable_sq_ramping = false;
2515
2516
if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK >> SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT))
2517
enable_sq_ramping = false;
2518
2519
for (i = 0; i < state->performance_level_count; i++) {
2520
sq_power_throttle = 0;
2521
sq_power_throttle2 = 0;
2522
2523
if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
2524
enable_sq_ramping) {
2525
sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER << SQ_POWER_THROTTLE__MAX_POWER__SHIFT;
2526
sq_power_throttle |= SISLANDS_DPM2_SQ_RAMP_MIN_POWER << SQ_POWER_THROTTLE__MIN_POWER__SHIFT;
2527
sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA << SQ_POWER_THROTTLE2__MAX_POWER_DELTA__SHIFT;
2528
sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_STI_SIZE << SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE__SHIFT;
2529
sq_power_throttle2 |= SISLANDS_DPM2_SQ_RAMP_LTI_RATIO << SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO__SHIFT;
2530
} else {
2531
sq_power_throttle |= SQ_POWER_THROTTLE__MAX_POWER_MASK |
2532
SQ_POWER_THROTTLE__MIN_POWER_MASK;
2533
sq_power_throttle2 |= SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
2534
SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
2535
SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
2536
}
2537
2538
smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
2539
smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
2540
}
2541
2542
return 0;
2543
}
2544
2545
static int si_enable_power_containment(struct amdgpu_device *adev,
2546
struct amdgpu_ps *amdgpu_new_state,
2547
bool enable)
2548
{
2549
struct ni_power_info *ni_pi = ni_get_pi(adev);
2550
PPSMC_Result smc_result;
2551
int ret = 0;
2552
2553
if (ni_pi->enable_power_containment) {
2554
if (enable) {
2555
if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
2556
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
2557
if (smc_result != PPSMC_Result_OK)
2558
ret = -EINVAL;
2559
}
2560
} else {
2561
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
2562
if (smc_result != PPSMC_Result_OK)
2563
ret = -EINVAL;
2564
}
2565
}
2566
2567
return ret;
2568
}
2569
2570
static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
2571
{
2572
struct si_power_info *si_pi = si_get_pi(adev);
2573
int ret = 0;
2574
struct si_dte_data *dte_data = &si_pi->dte_data;
2575
Smc_SIslands_DTE_Configuration *dte_tables = NULL;
2576
u32 table_size;
2577
u8 tdep_count;
2578
u32 i;
2579
2580
if (dte_data == NULL)
2581
si_pi->enable_dte = false;
2582
2583
if (si_pi->enable_dte == false)
2584
return 0;
2585
2586
if (dte_data->k <= 0)
2587
return -EINVAL;
2588
2589
dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
2590
if (dte_tables == NULL) {
2591
si_pi->enable_dte = false;
2592
return -ENOMEM;
2593
}
2594
2595
table_size = dte_data->k;
2596
2597
if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
2598
table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
2599
2600
tdep_count = dte_data->tdep_count;
2601
if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
2602
tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
2603
2604
dte_tables->K = cpu_to_be32(table_size);
2605
dte_tables->T0 = cpu_to_be32(dte_data->t0);
2606
dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
2607
dte_tables->WindowSize = dte_data->window_size;
2608
dte_tables->temp_select = dte_data->temp_select;
2609
dte_tables->DTE_mode = dte_data->dte_mode;
2610
dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
2611
2612
if (tdep_count > 0)
2613
table_size--;
2614
2615
for (i = 0; i < table_size; i++) {
2616
dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
2617
dte_tables->R[i] = cpu_to_be32(dte_data->r[i]);
2618
}
2619
2620
dte_tables->Tdep_count = tdep_count;
2621
2622
for (i = 0; i < (u32)tdep_count; i++) {
2623
dte_tables->T_limits[i] = dte_data->t_limits[i];
2624
dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
2625
dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
2626
}
2627
2628
ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
2629
(u8 *)dte_tables,
2630
sizeof(Smc_SIslands_DTE_Configuration),
2631
si_pi->sram_end);
2632
kfree(dte_tables);
2633
2634
return ret;
2635
}
2636
2637
static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
2638
u16 *max, u16 *min)
2639
{
2640
struct si_power_info *si_pi = si_get_pi(adev);
2641
struct amdgpu_cac_leakage_table *table =
2642
&adev->pm.dpm.dyn_state.cac_leakage_table;
2643
u32 i;
2644
u32 v0_loadline;
2645
2646
if (table == NULL)
2647
return -EINVAL;
2648
2649
*max = 0;
2650
*min = 0xFFFF;
2651
2652
for (i = 0; i < table->count; i++) {
2653
if (table->entries[i].vddc > *max)
2654
*max = table->entries[i].vddc;
2655
if (table->entries[i].vddc < *min)
2656
*min = table->entries[i].vddc;
2657
}
2658
2659
if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
2660
return -EINVAL;
2661
2662
v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
2663
2664
if (v0_loadline > 0xFFFFUL)
2665
return -EINVAL;
2666
2667
*min = (u16)v0_loadline;
2668
2669
if ((*min > *max) || (*max == 0) || (*min == 0))
2670
return -EINVAL;
2671
2672
return 0;
2673
}
2674
2675
static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
2676
{
2677
return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
2678
SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
2679
}
2680
2681
static int si_init_dte_leakage_table(struct amdgpu_device *adev,
2682
PP_SIslands_CacConfig *cac_tables,
2683
u16 vddc_max, u16 vddc_min, u16 vddc_step,
2684
u16 t0, u16 t_step)
2685
{
2686
struct si_power_info *si_pi = si_get_pi(adev);
2687
u32 leakage;
2688
unsigned int i, j;
2689
s32 t;
2690
u32 smc_leakage;
2691
u32 scaling_factor;
2692
u16 voltage;
2693
2694
scaling_factor = si_get_smc_power_scaling_factor(adev);
2695
2696
for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
2697
t = (1000 * (i * t_step + t0));
2698
2699
for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
2700
voltage = vddc_max - (vddc_step * j);
2701
2702
si_calculate_leakage_for_v_and_t(adev,
2703
&si_pi->powertune_data->leakage_coefficients,
2704
voltage,
2705
t,
2706
si_pi->dyn_powertune_data.cac_leakage,
2707
&leakage);
2708
2709
smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
2710
2711
if (smc_leakage > 0xFFFF)
2712
smc_leakage = 0xFFFF;
2713
2714
cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
2715
cpu_to_be16((u16)smc_leakage);
2716
}
2717
}
2718
return 0;
2719
}
2720
2721
static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
2722
PP_SIslands_CacConfig *cac_tables,
2723
u16 vddc_max, u16 vddc_min, u16 vddc_step)
2724
{
2725
struct si_power_info *si_pi = si_get_pi(adev);
2726
u32 leakage;
2727
unsigned int i, j;
2728
u32 smc_leakage;
2729
u32 scaling_factor;
2730
u16 voltage;
2731
2732
scaling_factor = si_get_smc_power_scaling_factor(adev);
2733
2734
for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
2735
voltage = vddc_max - (vddc_step * j);
2736
2737
si_calculate_leakage_for_v(adev,
2738
&si_pi->powertune_data->leakage_coefficients,
2739
si_pi->powertune_data->fixed_kt,
2740
voltage,
2741
si_pi->dyn_powertune_data.cac_leakage,
2742
&leakage);
2743
2744
smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
2745
2746
if (smc_leakage > 0xFFFF)
2747
smc_leakage = 0xFFFF;
2748
2749
for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
2750
cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
2751
cpu_to_be16((u16)smc_leakage);
2752
}
2753
return 0;
2754
}
2755
2756
static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
2757
{
2758
struct ni_power_info *ni_pi = ni_get_pi(adev);
2759
struct si_power_info *si_pi = si_get_pi(adev);
2760
PP_SIslands_CacConfig *cac_tables = NULL;
2761
u16 vddc_max, vddc_min, vddc_step;
2762
u16 t0, t_step;
2763
u32 load_line_slope, reg;
2764
int ret = 0;
2765
u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
2766
2767
if (ni_pi->enable_cac == false)
2768
return 0;
2769
2770
cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
2771
if (!cac_tables)
2772
return -ENOMEM;
2773
2774
reg = RREG32(mmCG_CAC_CTRL) & ~CG_CAC_CTRL__CAC_WINDOW_MASK;
2775
reg |= (si_pi->powertune_data->cac_window << CG_CAC_CTRL__CAC_WINDOW__SHIFT);
2776
WREG32(mmCG_CAC_CTRL, reg);
2777
2778
si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
2779
si_pi->dyn_powertune_data.dc_pwr_value =
2780
si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
2781
si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
2782
si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
2783
2784
si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
2785
2786
ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
2787
if (ret)
2788
goto done_free;
2789
2790
vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
2791
vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
2792
t_step = 4;
2793
t0 = 60;
2794
2795
if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
2796
ret = si_init_dte_leakage_table(adev, cac_tables,
2797
vddc_max, vddc_min, vddc_step,
2798
t0, t_step);
2799
else
2800
ret = si_init_simplified_leakage_table(adev, cac_tables,
2801
vddc_max, vddc_min, vddc_step);
2802
if (ret)
2803
goto done_free;
2804
2805
load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
2806
2807
cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
2808
cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
2809
cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
2810
cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
2811
cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
2812
cac_tables->R_LL = cpu_to_be32(load_line_slope);
2813
cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
2814
cac_tables->calculation_repeats = cpu_to_be32(2);
2815
cac_tables->dc_cac = cpu_to_be32(0);
2816
cac_tables->log2_PG_LKG_SCALE = 12;
2817
cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
2818
cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
2819
cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
2820
2821
ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
2822
(u8 *)cac_tables,
2823
sizeof(PP_SIslands_CacConfig),
2824
si_pi->sram_end);
2825
2826
if (ret)
2827
goto done_free;
2828
2829
ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
2830
2831
done_free:
2832
if (ret) {
2833
ni_pi->enable_cac = false;
2834
ni_pi->enable_power_containment = false;
2835
}
2836
2837
kfree(cac_tables);
2838
2839
return ret;
2840
}
2841
2842
static int si_program_cac_config_registers(struct amdgpu_device *adev,
2843
const struct si_cac_config_reg *cac_config_regs)
2844
{
2845
const struct si_cac_config_reg *config_regs = cac_config_regs;
2846
u32 data = 0, offset;
2847
2848
if (!config_regs)
2849
return -EINVAL;
2850
2851
while (config_regs->offset != 0xFFFFFFFF) {
2852
switch (config_regs->type) {
2853
case SISLANDS_CACCONFIG_CGIND:
2854
offset = SMC_CG_IND_START + config_regs->offset;
2855
if (offset < SMC_CG_IND_END)
2856
data = RREG32_SMC(offset);
2857
break;
2858
default:
2859
data = RREG32(config_regs->offset);
2860
break;
2861
}
2862
2863
data &= ~config_regs->mask;
2864
data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
2865
2866
switch (config_regs->type) {
2867
case SISLANDS_CACCONFIG_CGIND:
2868
offset = SMC_CG_IND_START + config_regs->offset;
2869
if (offset < SMC_CG_IND_END)
2870
WREG32_SMC(offset, data);
2871
break;
2872
default:
2873
WREG32(config_regs->offset, data);
2874
break;
2875
}
2876
config_regs++;
2877
}
2878
return 0;
2879
}
2880
2881
static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
2882
{
2883
struct ni_power_info *ni_pi = ni_get_pi(adev);
2884
struct si_power_info *si_pi = si_get_pi(adev);
2885
int ret;
2886
2887
if ((ni_pi->enable_cac == false) ||
2888
(ni_pi->cac_configuration_required == false))
2889
return 0;
2890
2891
ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
2892
if (ret)
2893
return ret;
2894
ret = si_program_cac_config_registers(adev, si_pi->cac_override);
2895
if (ret)
2896
return ret;
2897
ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
2898
if (ret)
2899
return ret;
2900
2901
return 0;
2902
}
2903
2904
static int si_enable_smc_cac(struct amdgpu_device *adev,
2905
struct amdgpu_ps *amdgpu_new_state,
2906
bool enable)
2907
{
2908
struct ni_power_info *ni_pi = ni_get_pi(adev);
2909
struct si_power_info *si_pi = si_get_pi(adev);
2910
PPSMC_Result smc_result;
2911
int ret = 0;
2912
2913
if (ni_pi->enable_cac) {
2914
if (enable) {
2915
if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
2916
if (ni_pi->support_cac_long_term_average) {
2917
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
2918
if (smc_result != PPSMC_Result_OK)
2919
ni_pi->support_cac_long_term_average = false;
2920
}
2921
2922
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
2923
if (smc_result != PPSMC_Result_OK) {
2924
ret = -EINVAL;
2925
ni_pi->cac_enabled = false;
2926
} else {
2927
ni_pi->cac_enabled = true;
2928
}
2929
2930
if (si_pi->enable_dte) {
2931
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
2932
if (smc_result != PPSMC_Result_OK)
2933
ret = -EINVAL;
2934
}
2935
}
2936
} else if (ni_pi->cac_enabled) {
2937
if (si_pi->enable_dte)
2938
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
2939
2940
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
2941
2942
ni_pi->cac_enabled = false;
2943
2944
if (ni_pi->support_cac_long_term_average)
2945
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
2946
}
2947
}
2948
return ret;
2949
}
2950
2951
static int si_init_smc_spll_table(struct amdgpu_device *adev)
2952
{
2953
struct ni_power_info *ni_pi = ni_get_pi(adev);
2954
struct si_power_info *si_pi = si_get_pi(adev);
2955
SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
2956
SISLANDS_SMC_SCLK_VALUE sclk_params;
2957
u32 fb_div, p_div;
2958
u32 clk_s, clk_v;
2959
u32 sclk = 0;
2960
int ret = 0;
2961
u32 tmp;
2962
int i;
2963
2964
if (si_pi->spll_table_start == 0)
2965
return -EINVAL;
2966
2967
spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
2968
if (spll_table == NULL)
2969
return -ENOMEM;
2970
2971
for (i = 0; i < 256; i++) {
2972
ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
2973
if (ret)
2974
break;
2975
p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK) >> CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
2976
fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK) >> CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
2977
clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK) >> CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
2978
clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK) >> CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
2979
2980
fb_div &= ~0x00001FFF;
2981
fb_div >>= 1;
2982
clk_v >>= 6;
2983
2984
if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
2985
ret = -EINVAL;
2986
if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
2987
ret = -EINVAL;
2988
if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2989
ret = -EINVAL;
2990
if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
2991
ret = -EINVAL;
2992
2993
if (ret)
2994
break;
2995
2996
tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
2997
((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
2998
spll_table->freq[i] = cpu_to_be32(tmp);
2999
3000
tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
3001
((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
3002
spll_table->ss[i] = cpu_to_be32(tmp);
3003
3004
sclk += 512;
3005
}
3006
3007
3008
if (!ret)
3009
ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
3010
(u8 *)spll_table,
3011
sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
3012
si_pi->sram_end);
3013
3014
if (ret)
3015
ni_pi->enable_power_containment = false;
3016
3017
kfree(spll_table);
3018
3019
return ret;
3020
}
3021
3022
static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
3023
u16 vce_voltage)
3024
{
3025
u16 highest_leakage = 0;
3026
struct si_power_info *si_pi = si_get_pi(adev);
3027
int i;
3028
3029
for (i = 0; i < si_pi->leakage_voltage.count; i++){
3030
if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
3031
highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
3032
}
3033
3034
if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
3035
return highest_leakage;
3036
3037
return vce_voltage;
3038
}
3039
3040
static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
3041
u32 evclk, u32 ecclk, u16 *voltage)
3042
{
3043
u32 i;
3044
int ret = -EINVAL;
3045
struct amdgpu_vce_clock_voltage_dependency_table *table =
3046
&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3047
3048
if (((evclk == 0) && (ecclk == 0)) ||
3049
(table && (table->count == 0))) {
3050
*voltage = 0;
3051
return 0;
3052
}
3053
3054
for (i = 0; i < table->count; i++) {
3055
if ((evclk <= table->entries[i].evclk) &&
3056
(ecclk <= table->entries[i].ecclk)) {
3057
*voltage = table->entries[i].v;
3058
ret = 0;
3059
break;
3060
}
3061
}
3062
3063
/* if no match return the highest voltage */
3064
if (ret)
3065
*voltage = table->entries[table->count - 1].v;
3066
3067
*voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
3068
3069
return ret;
3070
}
3071
3072
static bool si_dpm_vblank_too_short(void *handle)
3073
{
3074
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3075
u32 vblank_time = adev->pm.pm_display_cfg.min_vblank_time;
3076
/* we never hit the non-gddr5 limit so disable it */
3077
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
3078
3079
/* Consider zero vblank time too short and disable MCLK switching.
3080
* Note that the vblank time is set to maximum when no displays are attached,
3081
* so we'll still enable MCLK switching in that case.
3082
*/
3083
if (vblank_time == 0)
3084
return true;
3085
else if (vblank_time < switch_limit)
3086
return true;
3087
else
3088
return false;
3089
3090
}
3091
3092
static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
3093
u32 arb_freq_src, u32 arb_freq_dest)
3094
{
3095
u32 mc_arb_dram_timing;
3096
u32 mc_arb_dram_timing2;
3097
u32 burst_time;
3098
u32 mc_cg_config;
3099
3100
switch (arb_freq_src) {
3101
case MC_CG_ARB_FREQ_F0:
3102
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING);
3103
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
3104
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
3105
break;
3106
case MC_CG_ARB_FREQ_F1:
3107
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1);
3108
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
3109
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
3110
break;
3111
case MC_CG_ARB_FREQ_F2:
3112
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2);
3113
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
3114
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
3115
break;
3116
case MC_CG_ARB_FREQ_F3:
3117
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3);
3118
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
3119
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
3120
break;
3121
default:
3122
return -EINVAL;
3123
}
3124
3125
switch (arb_freq_dest) {
3126
case MC_CG_ARB_FREQ_F0:
3127
WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
3128
WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
3129
WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
3130
break;
3131
case MC_CG_ARB_FREQ_F1:
3132
WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
3133
WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
3134
WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
3135
break;
3136
case MC_CG_ARB_FREQ_F2:
3137
WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
3138
WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
3139
WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
3140
break;
3141
case MC_CG_ARB_FREQ_F3:
3142
WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
3143
WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
3144
WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
3145
break;
3146
default:
3147
return -EINVAL;
3148
}
3149
3150
mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
3151
WREG32(MC_CG_CONFIG, mc_cg_config);
3152
WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
3153
3154
return 0;
3155
}
3156
3157
static void ni_update_current_ps(struct amdgpu_device *adev,
3158
struct amdgpu_ps *rps)
3159
{
3160
struct si_ps *new_ps = si_get_ps(rps);
3161
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
3162
struct ni_power_info *ni_pi = ni_get_pi(adev);
3163
3164
eg_pi->current_rps = *rps;
3165
ni_pi->current_ps = *new_ps;
3166
eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
3167
adev->pm.dpm.current_ps = &eg_pi->current_rps;
3168
}
3169
3170
static void ni_update_requested_ps(struct amdgpu_device *adev,
3171
struct amdgpu_ps *rps)
3172
{
3173
struct si_ps *new_ps = si_get_ps(rps);
3174
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
3175
struct ni_power_info *ni_pi = ni_get_pi(adev);
3176
3177
eg_pi->requested_rps = *rps;
3178
ni_pi->requested_ps = *new_ps;
3179
eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
3180
adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
3181
}
3182
3183
static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
3184
struct amdgpu_ps *new_ps,
3185
struct amdgpu_ps *old_ps)
3186
{
3187
struct si_ps *new_state = si_get_ps(new_ps);
3188
struct si_ps *current_state = si_get_ps(old_ps);
3189
3190
if ((new_ps->vclk == old_ps->vclk) &&
3191
(new_ps->dclk == old_ps->dclk))
3192
return;
3193
3194
if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
3195
current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3196
return;
3197
3198
amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
3199
}
3200
3201
static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
3202
struct amdgpu_ps *new_ps,
3203
struct amdgpu_ps *old_ps)
3204
{
3205
struct si_ps *new_state = si_get_ps(new_ps);
3206
struct si_ps *current_state = si_get_ps(old_ps);
3207
3208
if ((new_ps->vclk == old_ps->vclk) &&
3209
(new_ps->dclk == old_ps->dclk))
3210
return;
3211
3212
if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
3213
current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3214
return;
3215
3216
amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
3217
}
3218
3219
static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
3220
{
3221
unsigned int i;
3222
3223
for (i = 0; i < table->count; i++)
3224
if (voltage <= table->entries[i].value)
3225
return table->entries[i].value;
3226
3227
return table->entries[table->count - 1].value;
3228
}
3229
3230
static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
3231
u32 max_clock, u32 requested_clock)
3232
{
3233
unsigned int i;
3234
3235
if ((clocks == NULL) || (clocks->count == 0))
3236
return (requested_clock < max_clock) ? requested_clock : max_clock;
3237
3238
for (i = 0; i < clocks->count; i++) {
3239
if (clocks->values[i] >= requested_clock)
3240
return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
3241
}
3242
3243
return (clocks->values[clocks->count - 1] < max_clock) ?
3244
clocks->values[clocks->count - 1] : max_clock;
3245
}
3246
3247
static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
3248
u32 max_mclk, u32 requested_mclk)
3249
{
3250
return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
3251
max_mclk, requested_mclk);
3252
}
3253
3254
static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
3255
u32 max_sclk, u32 requested_sclk)
3256
{
3257
return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
3258
max_sclk, requested_sclk);
3259
}
3260
3261
static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
3262
u32 *max_clock)
3263
{
3264
u32 i, clock = 0;
3265
3266
if ((table == NULL) || (table->count == 0)) {
3267
*max_clock = clock;
3268
return;
3269
}
3270
3271
for (i = 0; i < table->count; i++) {
3272
if (clock < table->entries[i].clk)
3273
clock = table->entries[i].clk;
3274
}
3275
*max_clock = clock;
3276
}
3277
3278
static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
3279
u32 clock, u16 max_voltage, u16 *voltage)
3280
{
3281
u32 i;
3282
3283
if ((table == NULL) || (table->count == 0))
3284
return;
3285
3286
for (i= 0; i < table->count; i++) {
3287
if (clock <= table->entries[i].clk) {
3288
if (*voltage < table->entries[i].v)
3289
*voltage = (u16)((table->entries[i].v < max_voltage) ?
3290
table->entries[i].v : max_voltage);
3291
return;
3292
}
3293
}
3294
3295
*voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
3296
}
3297
3298
static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
3299
const struct amdgpu_clock_and_voltage_limits *max_limits,
3300
struct rv7xx_pl *pl)
3301
{
3302
3303
if ((pl->mclk == 0) || (pl->sclk == 0))
3304
return;
3305
3306
if (pl->mclk == pl->sclk)
3307
return;
3308
3309
if (pl->mclk > pl->sclk) {
3310
if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
3311
pl->sclk = btc_get_valid_sclk(adev,
3312
max_limits->sclk,
3313
(pl->mclk +
3314
(adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
3315
adev->pm.dpm.dyn_state.mclk_sclk_ratio);
3316
} else {
3317
if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
3318
pl->mclk = btc_get_valid_mclk(adev,
3319
max_limits->mclk,
3320
pl->sclk -
3321
adev->pm.dpm.dyn_state.sclk_mclk_delta);
3322
}
3323
}
3324
3325
static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
3326
u16 max_vddc, u16 max_vddci,
3327
u16 *vddc, u16 *vddci)
3328
{
3329
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
3330
u16 new_voltage;
3331
3332
if ((0 == *vddc) || (0 == *vddci))
3333
return;
3334
3335
if (*vddc > *vddci) {
3336
if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
3337
new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
3338
(*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
3339
*vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
3340
}
3341
} else {
3342
if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
3343
new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
3344
(*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
3345
*vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
3346
}
3347
}
3348
}
3349
3350
static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
3351
u32 *p, u32 *u)
3352
{
3353
u32 b_c = 0;
3354
u32 i_c;
3355
u32 tmp;
3356
3357
i_c = (i * r_c) / 100;
3358
tmp = i_c >> p_b;
3359
3360
while (tmp) {
3361
b_c++;
3362
tmp >>= 1;
3363
}
3364
3365
*u = (b_c + 1) / 2;
3366
*p = i_c / (1 << (2 * (*u)));
3367
}
3368
3369
static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
3370
{
3371
u32 k, a, ah, al;
3372
u32 t1;
3373
3374
if ((fl == 0) || (fh == 0) || (fl > fh))
3375
return -EINVAL;
3376
3377
k = (100 * fh) / fl;
3378
t1 = (t * (k - 100));
3379
a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
3380
a = (a + 5) / 10;
3381
ah = ((a * t) + 5000) / 10000;
3382
al = a - ah;
3383
3384
*th = t - ah;
3385
*tl = t + al;
3386
3387
return 0;
3388
}
3389
3390
static bool r600_is_uvd_state(u32 class, u32 class2)
3391
{
3392
if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
3393
return true;
3394
if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
3395
return true;
3396
if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
3397
return true;
3398
if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
3399
return true;
3400
if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
3401
return true;
3402
return false;
3403
}
3404
3405
static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
3406
{
3407
return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
3408
}
3409
3410
static void rv770_get_max_vddc(struct amdgpu_device *adev)
3411
{
3412
struct rv7xx_power_info *pi = rv770_get_pi(adev);
3413
u16 vddc;
3414
3415
if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
3416
pi->max_vddc = 0;
3417
else
3418
pi->max_vddc = vddc;
3419
}
3420
3421
static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
3422
{
3423
struct rv7xx_power_info *pi = rv770_get_pi(adev);
3424
struct amdgpu_atom_ss ss;
3425
3426
pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
3427
ASIC_INTERNAL_ENGINE_SS, 0);
3428
pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
3429
ASIC_INTERNAL_MEMORY_SS, 0);
3430
3431
if (pi->sclk_ss || pi->mclk_ss)
3432
pi->dynamic_ss = true;
3433
else
3434
pi->dynamic_ss = false;
3435
}
3436
3437
3438
static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3439
struct amdgpu_ps *rps)
3440
{
3441
const struct amd_pp_display_configuration *display_cfg =
3442
&adev->pm.pm_display_cfg;
3443
struct si_ps *ps = si_get_ps(rps);
3444
struct amdgpu_clock_and_voltage_limits *max_limits;
3445
bool disable_mclk_switching = false;
3446
bool disable_sclk_switching = false;
3447
u32 mclk, sclk;
3448
u16 vddc, vddci, min_vce_voltage = 0;
3449
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
3450
u32 max_sclk = 0, max_mclk = 0;
3451
u32 high_pixelclock_count = 0;
3452
int i;
3453
3454
if (adev->asic_type == CHIP_HAINAN) {
3455
if ((adev->pdev->revision == 0x81) ||
3456
(adev->pdev->revision == 0xC3) ||
3457
(adev->pdev->device == 0x6664) ||
3458
(adev->pdev->device == 0x6665) ||
3459
(adev->pdev->device == 0x6667)) {
3460
max_sclk = 75000;
3461
}
3462
if ((adev->pdev->revision == 0xC3) ||
3463
(adev->pdev->device == 0x6665)) {
3464
max_sclk = 60000;
3465
max_mclk = 80000;
3466
}
3467
} else if (adev->asic_type == CHIP_OLAND) {
3468
if ((adev->pdev->revision == 0xC7) ||
3469
(adev->pdev->revision == 0x80) ||
3470
(adev->pdev->revision == 0x81) ||
3471
(adev->pdev->revision == 0x83) ||
3472
(adev->pdev->revision == 0x87 &&
3473
adev->pdev->device != 0x6611) ||
3474
(adev->pdev->device == 0x6604) ||
3475
(adev->pdev->device == 0x6605)) {
3476
max_sclk = 75000;
3477
} else if (adev->pdev->revision == 0x87 &&
3478
adev->pdev->device == 0x6611) {
3479
/* Radeon 430 and 520 */
3480
max_sclk = 78000;
3481
}
3482
}
3483
3484
/* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz.
3485
* For example, 4K 60Hz and 1080p 144Hz fall into this category.
3486
* Find number of such displays connected.
3487
*/
3488
for (i = 0; i < display_cfg->num_display; i++) {
3489
/* The array only contains active displays. */
3490
if (display_cfg->displays[i].pixel_clock > 297000)
3491
high_pixelclock_count++;
3492
}
3493
3494
/* These are some ad-hoc fixes to some issues observed with SI GPUs.
3495
* They are necessary because we don't have something like dce_calcs
3496
* for these GPUs to calculate bandwidth requirements.
3497
*/
3498
if (high_pixelclock_count) {
3499
/* Work around flickering lines at the bottom edge
3500
* of the screen when using a single 4K 60Hz monitor.
3501
*/
3502
disable_mclk_switching = true;
3503
3504
/* On Oland, we observe some flickering when two 4K 60Hz
3505
* displays are connected, possibly because voltage is too low.
3506
* Raise the voltage by requiring a higher SCLK.
3507
* (Voltage cannot be adjusted independently without also SCLK.)
3508
*/
3509
if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND)
3510
disable_sclk_switching = true;
3511
}
3512
3513
if (rps->vce_active) {
3514
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
3515
rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
3516
si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
3517
&min_vce_voltage);
3518
} else {
3519
rps->evclk = 0;
3520
rps->ecclk = 0;
3521
}
3522
3523
if ((adev->pm.pm_display_cfg.num_display > 1) ||
3524
si_dpm_vblank_too_short(adev))
3525
disable_mclk_switching = true;
3526
3527
if (rps->vclk || rps->dclk) {
3528
disable_mclk_switching = true;
3529
disable_sclk_switching = true;
3530
}
3531
3532
if (adev->pm.ac_power)
3533
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3534
else
3535
max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3536
3537
for (i = ps->performance_level_count - 2; i >= 0; i--) {
3538
if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
3539
ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
3540
}
3541
if (adev->pm.ac_power == false) {
3542
for (i = 0; i < ps->performance_level_count; i++) {
3543
if (ps->performance_levels[i].mclk > max_limits->mclk)
3544
ps->performance_levels[i].mclk = max_limits->mclk;
3545
if (ps->performance_levels[i].sclk > max_limits->sclk)
3546
ps->performance_levels[i].sclk = max_limits->sclk;
3547
if (ps->performance_levels[i].vddc > max_limits->vddc)
3548
ps->performance_levels[i].vddc = max_limits->vddc;
3549
if (ps->performance_levels[i].vddci > max_limits->vddci)
3550
ps->performance_levels[i].vddci = max_limits->vddci;
3551
}
3552
}
3553
3554
/* limit clocks to max supported clocks based on voltage dependency tables */
3555
btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3556
&max_sclk_vddc);
3557
btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3558
&max_mclk_vddci);
3559
btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3560
&max_mclk_vddc);
3561
3562
for (i = 0; i < ps->performance_level_count; i++) {
3563
if (max_sclk_vddc) {
3564
if (ps->performance_levels[i].sclk > max_sclk_vddc)
3565
ps->performance_levels[i].sclk = max_sclk_vddc;
3566
}
3567
if (max_mclk_vddci) {
3568
if (ps->performance_levels[i].mclk > max_mclk_vddci)
3569
ps->performance_levels[i].mclk = max_mclk_vddci;
3570
}
3571
if (max_mclk_vddc) {
3572
if (ps->performance_levels[i].mclk > max_mclk_vddc)
3573
ps->performance_levels[i].mclk = max_mclk_vddc;
3574
}
3575
if (max_mclk) {
3576
if (ps->performance_levels[i].mclk > max_mclk)
3577
ps->performance_levels[i].mclk = max_mclk;
3578
}
3579
if (max_sclk) {
3580
if (ps->performance_levels[i].sclk > max_sclk)
3581
ps->performance_levels[i].sclk = max_sclk;
3582
}
3583
}
3584
3585
/* XXX validate the min clocks required for display */
3586
3587
if (disable_mclk_switching) {
3588
mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
3589
vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
3590
} else {
3591
mclk = ps->performance_levels[0].mclk;
3592
vddci = ps->performance_levels[0].vddci;
3593
}
3594
3595
if (disable_sclk_switching) {
3596
sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
3597
vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
3598
} else {
3599
sclk = ps->performance_levels[0].sclk;
3600
vddc = ps->performance_levels[0].vddc;
3601
}
3602
3603
if (rps->vce_active) {
3604
if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
3605
sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
3606
if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
3607
mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
3608
}
3609
3610
/* adjusted low state */
3611
ps->performance_levels[0].sclk = sclk;
3612
ps->performance_levels[0].mclk = mclk;
3613
ps->performance_levels[0].vddc = vddc;
3614
ps->performance_levels[0].vddci = vddci;
3615
3616
if (disable_sclk_switching) {
3617
sclk = ps->performance_levels[0].sclk;
3618
for (i = 1; i < ps->performance_level_count; i++) {
3619
if (sclk < ps->performance_levels[i].sclk)
3620
sclk = ps->performance_levels[i].sclk;
3621
}
3622
for (i = 0; i < ps->performance_level_count; i++) {
3623
ps->performance_levels[i].sclk = sclk;
3624
ps->performance_levels[i].vddc = vddc;
3625
}
3626
} else {
3627
for (i = 1; i < ps->performance_level_count; i++) {
3628
if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
3629
ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
3630
if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
3631
ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
3632
}
3633
}
3634
3635
if (disable_mclk_switching) {
3636
mclk = ps->performance_levels[0].mclk;
3637
for (i = 1; i < ps->performance_level_count; i++) {
3638
if (mclk < ps->performance_levels[i].mclk)
3639
mclk = ps->performance_levels[i].mclk;
3640
}
3641
for (i = 0; i < ps->performance_level_count; i++) {
3642
ps->performance_levels[i].mclk = mclk;
3643
ps->performance_levels[i].vddci = vddci;
3644
}
3645
} else {
3646
for (i = 1; i < ps->performance_level_count; i++) {
3647
if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
3648
ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
3649
if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
3650
ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
3651
}
3652
}
3653
3654
for (i = 0; i < ps->performance_level_count; i++)
3655
btc_adjust_clock_combinations(adev, max_limits,
3656
&ps->performance_levels[i]);
3657
3658
for (i = 0; i < ps->performance_level_count; i++) {
3659
if (ps->performance_levels[i].vddc < min_vce_voltage)
3660
ps->performance_levels[i].vddc = min_vce_voltage;
3661
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3662
ps->performance_levels[i].sclk,
3663
max_limits->vddc, &ps->performance_levels[i].vddc);
3664
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3665
ps->performance_levels[i].mclk,
3666
max_limits->vddci, &ps->performance_levels[i].vddci);
3667
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3668
ps->performance_levels[i].mclk,
3669
max_limits->vddc, &ps->performance_levels[i].vddc);
3670
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
3671
display_cfg->display_clk,
3672
max_limits->vddc, &ps->performance_levels[i].vddc);
3673
}
3674
3675
for (i = 0; i < ps->performance_level_count; i++) {
3676
btc_apply_voltage_delta_rules(adev,
3677
max_limits->vddc, max_limits->vddci,
3678
&ps->performance_levels[i].vddc,
3679
&ps->performance_levels[i].vddci);
3680
}
3681
3682
ps->dc_compatible = true;
3683
for (i = 0; i < ps->performance_level_count; i++) {
3684
if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
3685
ps->dc_compatible = false;
3686
}
3687
}
3688
3689
#if 0
3690
static int si_read_smc_soft_register(struct amdgpu_device *adev,
3691
u16 reg_offset, u32 *value)
3692
{
3693
struct si_power_info *si_pi = si_get_pi(adev);
3694
3695
return amdgpu_si_read_smc_sram_dword(adev,
3696
si_pi->soft_regs_start + reg_offset, value,
3697
si_pi->sram_end);
3698
}
3699
#endif
3700
3701
static int si_write_smc_soft_register(struct amdgpu_device *adev,
3702
u16 reg_offset, u32 value)
3703
{
3704
struct si_power_info *si_pi = si_get_pi(adev);
3705
3706
return amdgpu_si_write_smc_sram_dword(adev,
3707
si_pi->soft_regs_start + reg_offset,
3708
value, si_pi->sram_end);
3709
}
3710
3711
static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
3712
{
3713
bool ret = false;
3714
u32 tmp, width, row, column, bank, density;
3715
bool is_memory_gddr5, is_special;
3716
3717
tmp = RREG32(MC_SEQ_MISC0);
3718
is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
3719
is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
3720
& (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
3721
3722
WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
3723
width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
3724
3725
tmp = RREG32(mmMC_ARB_RAMCFG);
3726
row = ((tmp & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT) + 10;
3727
column = ((tmp & MC_ARB_RAMCFG__NOOFCOLS_MASK) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT) + 8;
3728
bank = ((tmp & MC_ARB_RAMCFG__NOOFBANK_MASK) >> MC_ARB_RAMCFG__NOOFBANK__SHIFT) + 2;
3729
3730
density = (1 << (row + column - 20 + bank)) * width;
3731
3732
if ((adev->pdev->device == 0x6819) &&
3733
is_memory_gddr5 && is_special && (density == 0x400))
3734
ret = true;
3735
3736
return ret;
3737
}
3738
3739
static void si_get_leakage_vddc(struct amdgpu_device *adev)
3740
{
3741
struct si_power_info *si_pi = si_get_pi(adev);
3742
u16 vddc, count = 0;
3743
int i, ret;
3744
3745
for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
3746
ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
3747
3748
if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
3749
si_pi->leakage_voltage.entries[count].voltage = vddc;
3750
si_pi->leakage_voltage.entries[count].leakage_index =
3751
SISLANDS_LEAKAGE_INDEX0 + i;
3752
count++;
3753
}
3754
}
3755
si_pi->leakage_voltage.count = count;
3756
}
3757
3758
static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
3759
u32 index, u16 *leakage_voltage)
3760
{
3761
struct si_power_info *si_pi = si_get_pi(adev);
3762
int i;
3763
3764
if (leakage_voltage == NULL)
3765
return -EINVAL;
3766
3767
if ((index & 0xff00) != 0xff00)
3768
return -EINVAL;
3769
3770
if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
3771
return -EINVAL;
3772
3773
if (index < SISLANDS_LEAKAGE_INDEX0)
3774
return -EINVAL;
3775
3776
for (i = 0; i < si_pi->leakage_voltage.count; i++) {
3777
if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
3778
*leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
3779
return 0;
3780
}
3781
}
3782
return -EAGAIN;
3783
}
3784
3785
static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
3786
{
3787
struct rv7xx_power_info *pi = rv770_get_pi(adev);
3788
bool want_thermal_protection;
3789
enum si_dpm_event_src dpm_event_src;
3790
3791
switch (sources) {
3792
case 0:
3793
default:
3794
want_thermal_protection = false;
3795
break;
3796
case (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL):
3797
want_thermal_protection = true;
3798
dpm_event_src = SI_DPM_EVENT_SRC_DIGITAL;
3799
break;
3800
case (1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
3801
want_thermal_protection = true;
3802
dpm_event_src = SI_DPM_EVENT_SRC_EXTERNAL;
3803
break;
3804
case ((1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
3805
(1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL)):
3806
want_thermal_protection = true;
3807
dpm_event_src = SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
3808
break;
3809
}
3810
3811
if (want_thermal_protection) {
3812
WREG32_P(mmCG_THERMAL_CTRL, dpm_event_src << CG_THERMAL_CTRL__DPM_EVENT_SRC__SHIFT, ~CG_THERMAL_CTRL__DPM_EVENT_SRC_MASK);
3813
if (pi->thermal_protection)
3814
WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
3815
} else {
3816
WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
3817
}
3818
}
3819
3820
static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
3821
enum si_dpm_auto_throttle_src source,
3822
bool enable)
3823
{
3824
struct rv7xx_power_info *pi = rv770_get_pi(adev);
3825
3826
if (enable) {
3827
if (!(pi->active_auto_throttle_sources & (1 << source))) {
3828
pi->active_auto_throttle_sources |= 1 << source;
3829
si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
3830
}
3831
} else {
3832
if (pi->active_auto_throttle_sources & (1 << source)) {
3833
pi->active_auto_throttle_sources &= ~(1 << source);
3834
si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
3835
}
3836
}
3837
}
3838
3839
static void si_start_dpm(struct amdgpu_device *adev)
3840
{
3841
WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
3842
}
3843
3844
static void si_stop_dpm(struct amdgpu_device *adev)
3845
{
3846
WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK);
3847
}
3848
3849
static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
3850
{
3851
if (enable)
3852
WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
3853
else
3854
WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK, ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK);
3855
3856
}
3857
3858
#if 0
3859
static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
3860
u32 thermal_level)
3861
{
3862
PPSMC_Result ret;
3863
3864
if (thermal_level == 0) {
3865
ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
3866
if (ret == PPSMC_Result_OK)
3867
return 0;
3868
else
3869
return -EINVAL;
3870
}
3871
return 0;
3872
}
3873
3874
static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
3875
{
3876
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
3877
}
3878
#endif
3879
3880
#if 0
3881
static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
3882
{
3883
if (ac_power)
3884
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
3885
0 : -EINVAL;
3886
3887
return 0;
3888
}
3889
#endif
3890
3891
static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
3892
PPSMC_Msg msg, u32 parameter)
3893
{
3894
WREG32(mmSMC_SCRATCH0, parameter);
3895
return amdgpu_si_send_msg_to_smc(adev, msg);
3896
}
3897
3898
static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
3899
{
3900
if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
3901
return -EINVAL;
3902
3903
return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
3904
0 : -EINVAL;
3905
}
3906
3907
static int si_dpm_force_performance_level(void *handle,
3908
enum amd_dpm_forced_level level)
3909
{
3910
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3911
struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
3912
struct si_ps *ps = si_get_ps(rps);
3913
u32 levels = ps->performance_level_count;
3914
3915
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
3916
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3917
return -EINVAL;
3918
3919
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
3920
return -EINVAL;
3921
} else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
3922
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3923
return -EINVAL;
3924
3925
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
3926
return -EINVAL;
3927
} else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
3928
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3929
return -EINVAL;
3930
3931
if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3932
return -EINVAL;
3933
}
3934
3935
adev->pm.dpm.forced_level = level;
3936
3937
return 0;
3938
}
3939
3940
#if 0
3941
static int si_set_boot_state(struct amdgpu_device *adev)
3942
{
3943
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
3944
0 : -EINVAL;
3945
}
3946
#endif
3947
3948
static int si_set_sw_state(struct amdgpu_device *adev)
3949
{
3950
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
3951
0 : -EINVAL;
3952
}
3953
3954
static int si_halt_smc(struct amdgpu_device *adev)
3955
{
3956
if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
3957
return -EINVAL;
3958
3959
return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
3960
0 : -EINVAL;
3961
}
3962
3963
static int si_resume_smc(struct amdgpu_device *adev)
3964
{
3965
if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
3966
return -EINVAL;
3967
3968
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
3969
0 : -EINVAL;
3970
}
3971
3972
static void si_dpm_start_smc(struct amdgpu_device *adev)
3973
{
3974
amdgpu_si_program_jump_on_start(adev);
3975
amdgpu_si_start_smc(adev);
3976
amdgpu_si_smc_clock(adev, true);
3977
}
3978
3979
static void si_dpm_stop_smc(struct amdgpu_device *adev)
3980
{
3981
amdgpu_si_reset_smc(adev);
3982
amdgpu_si_smc_clock(adev, false);
3983
}
3984
3985
static int si_process_firmware_header(struct amdgpu_device *adev)
3986
{
3987
struct si_power_info *si_pi = si_get_pi(adev);
3988
u32 tmp;
3989
int ret;
3990
3991
ret = amdgpu_si_read_smc_sram_dword(adev,
3992
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3993
SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
3994
&tmp, si_pi->sram_end);
3995
if (ret)
3996
return ret;
3997
3998
si_pi->state_table_start = tmp;
3999
4000
ret = amdgpu_si_read_smc_sram_dword(adev,
4001
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4002
SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
4003
&tmp, si_pi->sram_end);
4004
if (ret)
4005
return ret;
4006
4007
si_pi->soft_regs_start = tmp;
4008
4009
ret = amdgpu_si_read_smc_sram_dword(adev,
4010
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4011
SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
4012
&tmp, si_pi->sram_end);
4013
if (ret)
4014
return ret;
4015
4016
si_pi->mc_reg_table_start = tmp;
4017
4018
ret = amdgpu_si_read_smc_sram_dword(adev,
4019
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4020
SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
4021
&tmp, si_pi->sram_end);
4022
if (ret)
4023
return ret;
4024
4025
si_pi->fan_table_start = tmp;
4026
4027
ret = amdgpu_si_read_smc_sram_dword(adev,
4028
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4029
SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
4030
&tmp, si_pi->sram_end);
4031
if (ret)
4032
return ret;
4033
4034
si_pi->arb_table_start = tmp;
4035
4036
ret = amdgpu_si_read_smc_sram_dword(adev,
4037
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4038
SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
4039
&tmp, si_pi->sram_end);
4040
if (ret)
4041
return ret;
4042
4043
si_pi->cac_table_start = tmp;
4044
4045
ret = amdgpu_si_read_smc_sram_dword(adev,
4046
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4047
SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
4048
&tmp, si_pi->sram_end);
4049
if (ret)
4050
return ret;
4051
4052
si_pi->dte_table_start = tmp;
4053
4054
ret = amdgpu_si_read_smc_sram_dword(adev,
4055
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4056
SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
4057
&tmp, si_pi->sram_end);
4058
if (ret)
4059
return ret;
4060
4061
si_pi->spll_table_start = tmp;
4062
4063
ret = amdgpu_si_read_smc_sram_dword(adev,
4064
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4065
SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
4066
&tmp, si_pi->sram_end);
4067
if (ret)
4068
return ret;
4069
4070
si_pi->papm_cfg_table_start = tmp;
4071
4072
return ret;
4073
}
4074
4075
static void si_read_clock_registers(struct amdgpu_device *adev)
4076
{
4077
struct si_power_info *si_pi = si_get_pi(adev);
4078
4079
si_pi->clock_registers.cg_spll_func_cntl = RREG32(mmCG_SPLL_FUNC_CNTL);
4080
si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(mmCG_SPLL_FUNC_CNTL_2);
4081
si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(mmCG_SPLL_FUNC_CNTL_3);
4082
si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(mmCG_SPLL_FUNC_CNTL_4);
4083
si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(mmCG_SPLL_SPREAD_SPECTRUM);
4084
si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(mmCG_SPLL_SPREAD_SPECTRUM_2);
4085
si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
4086
si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
4087
si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
4088
si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
4089
si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
4090
si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
4091
si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
4092
si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
4093
si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
4094
}
4095
4096
static void si_enable_thermal_protection(struct amdgpu_device *adev,
4097
bool enable)
4098
{
4099
if (enable)
4100
WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
4101
else
4102
WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK, ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK);
4103
}
4104
4105
static void si_enable_acpi_power_management(struct amdgpu_device *adev)
4106
{
4107
WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__STATIC_PM_EN_MASK, ~GENERAL_PWRMGT__STATIC_PM_EN_MASK);
4108
}
4109
4110
#if 0
4111
static int si_enter_ulp_state(struct amdgpu_device *adev)
4112
{
4113
WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
4114
4115
udelay(25000);
4116
4117
return 0;
4118
}
4119
4120
static int si_exit_ulp_state(struct amdgpu_device *adev)
4121
{
4122
int i;
4123
4124
WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
4125
4126
udelay(7000);
4127
4128
for (i = 0; i < adev->usec_timeout; i++) {
4129
if (RREG32(SMC_RESP_0) == 1)
4130
break;
4131
udelay(1000);
4132
}
4133
4134
return 0;
4135
}
4136
#endif
4137
4138
static int si_notify_smc_display_change(struct amdgpu_device *adev,
4139
bool has_display)
4140
{
4141
PPSMC_Msg msg = has_display ?
4142
PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
4143
4144
return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
4145
0 : -EINVAL;
4146
}
4147
4148
static void si_program_response_times(struct amdgpu_device *adev)
4149
{
4150
u32 voltage_response_time, acpi_delay_time, vbi_time_out;
4151
u32 vddc_dly, acpi_dly, vbi_dly;
4152
u32 reference_clock;
4153
4154
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
4155
4156
voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
4157
4158
if (voltage_response_time == 0)
4159
voltage_response_time = 1000;
4160
4161
acpi_delay_time = 15000;
4162
vbi_time_out = 100000;
4163
4164
reference_clock = amdgpu_asic_get_xclk(adev);
4165
4166
vddc_dly = (voltage_response_time * reference_clock) / 100;
4167
acpi_dly = (acpi_delay_time * reference_clock) / 100;
4168
vbi_dly = (vbi_time_out * reference_clock) / 100;
4169
4170
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
4171
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
4172
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
4173
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
4174
}
4175
4176
static void si_program_ds_registers(struct amdgpu_device *adev)
4177
{
4178
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4179
u32 tmp;
4180
4181
/* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
4182
if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
4183
tmp = 0x10;
4184
else
4185
tmp = 0x1;
4186
4187
if (eg_pi->sclk_deep_sleep) {
4188
WREG32_P(mmMISC_CLK_CNTL, (tmp << MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL__SHIFT), ~MISC_CLK_CNTL__DEEP_SLEEP_CLK_SEL_MASK);
4189
WREG32_P(mmCG_SPLL_AUTOSCALE_CNTL, CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK,
4190
~CG_SPLL_AUTOSCALE_CNTL__AUTOSCALE_ON_SS_CLEAR_MASK);
4191
}
4192
}
4193
4194
static void si_program_display_gap(struct amdgpu_device *adev)
4195
{
4196
const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
4197
u32 tmp, pipe;
4198
4199
tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
4200
if (cfg->num_display > 0)
4201
tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
4202
else
4203
tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
4204
4205
if (cfg->num_display > 1)
4206
tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
4207
else
4208
tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
4209
4210
WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
4211
4212
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
4213
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
4214
4215
if (cfg->num_display > 0 && pipe != cfg->crtc_index) {
4216
pipe = cfg->crtc_index;
4217
4218
tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
4219
tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
4220
WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
4221
}
4222
4223
/* Setting this to false forces the performance state to low if the crtcs are disabled.
4224
* This can be a problem on PowerXpress systems or if you want to use the card
4225
* for offscreen rendering or compute if there are no crtcs enabled.
4226
*/
4227
si_notify_smc_display_change(adev, cfg->num_display > 0);
4228
}
4229
4230
static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
4231
{
4232
struct rv7xx_power_info *pi = rv770_get_pi(adev);
4233
4234
if (enable) {
4235
if (pi->sclk_ss)
4236
WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
4237
} else {
4238
WREG32_P(mmCG_SPLL_SPREAD_SPECTRUM, 0, ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
4239
WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK);
4240
}
4241
}
4242
4243
static void si_setup_bsp(struct amdgpu_device *adev)
4244
{
4245
struct rv7xx_power_info *pi = rv770_get_pi(adev);
4246
u32 xclk = amdgpu_asic_get_xclk(adev);
4247
4248
r600_calculate_u_and_p(pi->asi,
4249
xclk,
4250
16,
4251
&pi->bsp,
4252
&pi->bsu);
4253
4254
r600_calculate_u_and_p(pi->pasi,
4255
xclk,
4256
16,
4257
&pi->pbsp,
4258
&pi->pbsu);
4259
4260
4261
pi->dsp = (pi->bsp << CG_BSP__BSP__SHIFT) | (pi->bsu << CG_BSP__BSU__SHIFT);
4262
pi->psp = (pi->pbsp << CG_BSP__BSP__SHIFT) | (pi->pbsu << CG_BSP__BSU__SHIFT);
4263
4264
WREG32(mmCG_BSP, pi->dsp);
4265
}
4266
4267
static void si_program_git(struct amdgpu_device *adev)
4268
{
4269
WREG32_P(mmCG_GIT, R600_GICST_DFLT << CG_GIT__CG_GICST__SHIFT, ~CG_GIT__CG_GICST_MASK);
4270
}
4271
4272
static void si_program_tp(struct amdgpu_device *adev)
4273
{
4274
int i;
4275
enum r600_td td = R600_TD_DFLT;
4276
4277
for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
4278
WREG32(mmCG_FFCT_0 + i, (r600_utc[i] << CG_FFCT_0__UTC_0__SHIFT | r600_dtc[i] << CG_FFCT_0__DTC_0__SHIFT));
4279
4280
if (td == R600_TD_AUTO)
4281
WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
4282
else
4283
WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK, ~SCLK_PWRMGT_CNTL__FIR_FORCE_TREND_SEL_MASK);
4284
4285
if (td == R600_TD_UP)
4286
WREG32_P(mmSCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
4287
4288
if (td == R600_TD_DOWN)
4289
WREG32_P(mmSCLK_PWRMGT_CNTL, SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK, ~SCLK_PWRMGT_CNTL__FIR_TREND_MODE_MASK);
4290
}
4291
4292
static void si_program_tpp(struct amdgpu_device *adev)
4293
{
4294
WREG32(mmCG_TPC, R600_TPC_DFLT);
4295
}
4296
4297
static void si_program_sstp(struct amdgpu_device *adev)
4298
{
4299
WREG32(mmCG_SSP, (R600_SSTU_DFLT << CG_SSP__SSTU__SHIFT| R600_SST_DFLT << CG_SSP__SST__SHIFT));
4300
}
4301
4302
static void si_enable_display_gap(struct amdgpu_device *adev)
4303
{
4304
u32 tmp = RREG32(mmCG_DISPLAY_GAP_CNTL);
4305
4306
tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
4307
tmp |= (R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT |
4308
R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT);
4309
4310
tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG_MASK);
4311
tmp |= (R600_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP1_GAP_MCHG__SHIFT |
4312
R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP_MCHG__SHIFT);
4313
WREG32(mmCG_DISPLAY_GAP_CNTL, tmp);
4314
}
4315
4316
static void si_program_vc(struct amdgpu_device *adev)
4317
{
4318
struct rv7xx_power_info *pi = rv770_get_pi(adev);
4319
4320
WREG32(mmCG_FTV, pi->vrc);
4321
}
4322
4323
static void si_clear_vc(struct amdgpu_device *adev)
4324
{
4325
WREG32(mmCG_FTV, 0);
4326
}
4327
4328
static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
4329
{
4330
u8 mc_para_index;
4331
4332
if (memory_clock < 10000)
4333
mc_para_index = 0;
4334
else if (memory_clock >= 80000)
4335
mc_para_index = 0x0f;
4336
else
4337
mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
4338
return mc_para_index;
4339
}
4340
4341
static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
4342
{
4343
u8 mc_para_index;
4344
4345
if (strobe_mode) {
4346
if (memory_clock < 12500)
4347
mc_para_index = 0x00;
4348
else if (memory_clock > 47500)
4349
mc_para_index = 0x0f;
4350
else
4351
mc_para_index = (u8)((memory_clock - 10000) / 2500);
4352
} else {
4353
if (memory_clock < 65000)
4354
mc_para_index = 0x00;
4355
else if (memory_clock > 135000)
4356
mc_para_index = 0x0f;
4357
else
4358
mc_para_index = (u8)((memory_clock - 60000) / 5000);
4359
}
4360
return mc_para_index;
4361
}
4362
4363
static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
4364
{
4365
struct rv7xx_power_info *pi = rv770_get_pi(adev);
4366
bool strobe_mode = false;
4367
u8 result = 0;
4368
4369
if (mclk <= pi->mclk_strobe_mode_threshold)
4370
strobe_mode = true;
4371
4372
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
4373
result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
4374
else
4375
result = si_get_ddr3_mclk_frequency_ratio(mclk);
4376
4377
if (strobe_mode)
4378
result |= SISLANDS_SMC_STROBE_ENABLE;
4379
4380
return result;
4381
}
4382
4383
static int si_upload_firmware(struct amdgpu_device *adev)
4384
{
4385
struct si_power_info *si_pi = si_get_pi(adev);
4386
4387
amdgpu_si_reset_smc(adev);
4388
amdgpu_si_smc_clock(adev, false);
4389
4390
return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
4391
}
4392
4393
static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
4394
const struct atom_voltage_table *table,
4395
const struct amdgpu_phase_shedding_limits_table *limits)
4396
{
4397
u32 data, num_bits, num_levels;
4398
4399
if ((table == NULL) || (limits == NULL))
4400
return false;
4401
4402
data = table->mask_low;
4403
4404
num_bits = hweight32(data);
4405
4406
if (num_bits == 0)
4407
return false;
4408
4409
num_levels = (1 << num_bits);
4410
4411
if (table->count != num_levels)
4412
return false;
4413
4414
if (limits->count != (num_levels - 1))
4415
return false;
4416
4417
return true;
4418
}
4419
4420
static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
4421
u32 max_voltage_steps,
4422
struct atom_voltage_table *voltage_table)
4423
{
4424
unsigned int i, diff;
4425
4426
if (voltage_table->count <= max_voltage_steps)
4427
return;
4428
4429
diff = voltage_table->count - max_voltage_steps;
4430
4431
for (i= 0; i < max_voltage_steps; i++)
4432
voltage_table->entries[i] = voltage_table->entries[i + diff];
4433
4434
voltage_table->count = max_voltage_steps;
4435
}
4436
4437
static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
4438
struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
4439
struct atom_voltage_table *voltage_table)
4440
{
4441
u32 i;
4442
4443
if (voltage_dependency_table == NULL)
4444
return -EINVAL;
4445
4446
voltage_table->mask_low = 0;
4447
voltage_table->phase_delay = 0;
4448
4449
voltage_table->count = voltage_dependency_table->count;
4450
for (i = 0; i < voltage_table->count; i++) {
4451
voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
4452
voltage_table->entries[i].smio_low = 0;
4453
}
4454
4455
return 0;
4456
}
4457
4458
static int si_construct_voltage_tables(struct amdgpu_device *adev)
4459
{
4460
struct rv7xx_power_info *pi = rv770_get_pi(adev);
4461
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4462
struct si_power_info *si_pi = si_get_pi(adev);
4463
int ret;
4464
4465
if (pi->voltage_control) {
4466
ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
4467
VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
4468
if (ret)
4469
return ret;
4470
4471
if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
4472
si_trim_voltage_table_to_fit_state_table(adev,
4473
SISLANDS_MAX_NO_VREG_STEPS,
4474
&eg_pi->vddc_voltage_table);
4475
} else if (si_pi->voltage_control_svi2) {
4476
ret = si_get_svi2_voltage_table(adev,
4477
&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
4478
&eg_pi->vddc_voltage_table);
4479
if (ret)
4480
return ret;
4481
} else {
4482
return -EINVAL;
4483
}
4484
4485
if (eg_pi->vddci_control) {
4486
ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
4487
VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
4488
if (ret)
4489
return ret;
4490
4491
if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
4492
si_trim_voltage_table_to_fit_state_table(adev,
4493
SISLANDS_MAX_NO_VREG_STEPS,
4494
&eg_pi->vddci_voltage_table);
4495
}
4496
if (si_pi->vddci_control_svi2) {
4497
ret = si_get_svi2_voltage_table(adev,
4498
&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
4499
&eg_pi->vddci_voltage_table);
4500
if (ret)
4501
return ret;
4502
}
4503
4504
if (pi->mvdd_control) {
4505
ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
4506
VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
4507
4508
if (ret) {
4509
pi->mvdd_control = false;
4510
return ret;
4511
}
4512
4513
if (si_pi->mvdd_voltage_table.count == 0) {
4514
pi->mvdd_control = false;
4515
return -EINVAL;
4516
}
4517
4518
if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
4519
si_trim_voltage_table_to_fit_state_table(adev,
4520
SISLANDS_MAX_NO_VREG_STEPS,
4521
&si_pi->mvdd_voltage_table);
4522
}
4523
4524
if (si_pi->vddc_phase_shed_control) {
4525
ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
4526
VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
4527
if (ret)
4528
si_pi->vddc_phase_shed_control = false;
4529
4530
if ((si_pi->vddc_phase_shed_table.count == 0) ||
4531
(si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
4532
si_pi->vddc_phase_shed_control = false;
4533
}
4534
4535
return 0;
4536
}
4537
4538
static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
4539
const struct atom_voltage_table *voltage_table,
4540
SISLANDS_SMC_STATETABLE *table)
4541
{
4542
unsigned int i;
4543
4544
for (i = 0; i < voltage_table->count; i++)
4545
table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
4546
}
4547
4548
static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
4549
SISLANDS_SMC_STATETABLE *table)
4550
{
4551
struct rv7xx_power_info *pi = rv770_get_pi(adev);
4552
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4553
struct si_power_info *si_pi = si_get_pi(adev);
4554
u8 i;
4555
4556
if (si_pi->voltage_control_svi2) {
4557
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
4558
si_pi->svc_gpio_id);
4559
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
4560
si_pi->svd_gpio_id);
4561
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
4562
2);
4563
} else {
4564
if (eg_pi->vddc_voltage_table.count) {
4565
si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
4566
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
4567
cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
4568
4569
for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
4570
if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
4571
table->maxVDDCIndexInPPTable = i;
4572
break;
4573
}
4574
}
4575
}
4576
4577
if (eg_pi->vddci_voltage_table.count) {
4578
si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
4579
4580
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
4581
cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
4582
}
4583
4584
4585
if (si_pi->mvdd_voltage_table.count) {
4586
si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
4587
4588
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
4589
cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
4590
}
4591
4592
if (si_pi->vddc_phase_shed_control) {
4593
if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
4594
&adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
4595
si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
4596
4597
table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
4598
cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
4599
4600
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
4601
(u32)si_pi->vddc_phase_shed_table.phase_delay);
4602
} else {
4603
si_pi->vddc_phase_shed_control = false;
4604
}
4605
}
4606
}
4607
4608
return 0;
4609
}
4610
4611
static int si_populate_voltage_value(struct amdgpu_device *adev,
4612
const struct atom_voltage_table *table,
4613
u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4614
{
4615
unsigned int i;
4616
4617
for (i = 0; i < table->count; i++) {
4618
if (value <= table->entries[i].value) {
4619
voltage->index = (u8)i;
4620
voltage->value = cpu_to_be16(table->entries[i].value);
4621
break;
4622
}
4623
}
4624
4625
if (i >= table->count)
4626
return -EINVAL;
4627
4628
return 0;
4629
}
4630
4631
static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
4632
SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4633
{
4634
struct rv7xx_power_info *pi = rv770_get_pi(adev);
4635
struct si_power_info *si_pi = si_get_pi(adev);
4636
4637
if (pi->mvdd_control) {
4638
if (mclk <= pi->mvdd_split_frequency)
4639
voltage->index = 0;
4640
else
4641
voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
4642
4643
voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
4644
}
4645
return 0;
4646
}
4647
4648
static int si_get_std_voltage_value(struct amdgpu_device *adev,
4649
SISLANDS_SMC_VOLTAGE_VALUE *voltage,
4650
u16 *std_voltage)
4651
{
4652
u16 v_index;
4653
bool voltage_found = false;
4654
*std_voltage = be16_to_cpu(voltage->value);
4655
4656
if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
4657
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
4658
if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
4659
return -EINVAL;
4660
4661
for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
4662
if (be16_to_cpu(voltage->value) ==
4663
(u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
4664
voltage_found = true;
4665
if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
4666
*std_voltage =
4667
adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
4668
else
4669
*std_voltage =
4670
adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
4671
break;
4672
}
4673
}
4674
4675
if (!voltage_found) {
4676
for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
4677
if (be16_to_cpu(voltage->value) <=
4678
(u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
4679
voltage_found = true;
4680
if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
4681
*std_voltage =
4682
adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
4683
else
4684
*std_voltage =
4685
adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
4686
break;
4687
}
4688
}
4689
}
4690
} else {
4691
if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
4692
*std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
4693
}
4694
}
4695
4696
return 0;
4697
}
4698
4699
static int si_populate_std_voltage_value(struct amdgpu_device *adev,
4700
u16 value, u8 index,
4701
SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4702
{
4703
voltage->index = index;
4704
voltage->value = cpu_to_be16(value);
4705
4706
return 0;
4707
}
4708
4709
static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
4710
const struct amdgpu_phase_shedding_limits_table *limits,
4711
u16 voltage, u32 sclk, u32 mclk,
4712
SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
4713
{
4714
unsigned int i;
4715
4716
for (i = 0; i < limits->count; i++) {
4717
if ((voltage <= limits->entries[i].voltage) &&
4718
(sclk <= limits->entries[i].sclk) &&
4719
(mclk <= limits->entries[i].mclk))
4720
break;
4721
}
4722
4723
smc_voltage->phase_settings = (u8)i;
4724
4725
return 0;
4726
}
4727
4728
static int si_init_arb_table_index(struct amdgpu_device *adev)
4729
{
4730
struct si_power_info *si_pi = si_get_pi(adev);
4731
u32 tmp;
4732
int ret;
4733
4734
ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
4735
&tmp, si_pi->sram_end);
4736
if (ret)
4737
return ret;
4738
4739
tmp &= 0x00FFFFFF;
4740
tmp |= MC_CG_ARB_FREQ_F1 << 24;
4741
4742
return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
4743
tmp, si_pi->sram_end);
4744
}
4745
4746
static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
4747
{
4748
return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
4749
}
4750
4751
static int si_reset_to_default(struct amdgpu_device *adev)
4752
{
4753
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
4754
0 : -EINVAL;
4755
}
4756
4757
static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
4758
{
4759
struct si_power_info *si_pi = si_get_pi(adev);
4760
u32 tmp;
4761
int ret;
4762
4763
ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
4764
&tmp, si_pi->sram_end);
4765
if (ret)
4766
return ret;
4767
4768
tmp = (tmp >> 24) & 0xff;
4769
4770
if (tmp == MC_CG_ARB_FREQ_F0)
4771
return 0;
4772
4773
return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
4774
}
4775
4776
static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
4777
u32 engine_clock)
4778
{
4779
u32 dram_rows;
4780
u32 dram_refresh_rate;
4781
u32 mc_arb_rfsh_rate;
4782
u32 tmp = (RREG32(mmMC_ARB_RAMCFG) & MC_ARB_RAMCFG__NOOFROWS_MASK) >> MC_ARB_RAMCFG__NOOFROWS__SHIFT;
4783
4784
if (tmp >= 4)
4785
dram_rows = 16384;
4786
else
4787
dram_rows = 1 << (tmp + 10);
4788
4789
dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
4790
mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
4791
4792
return mc_arb_rfsh_rate;
4793
}
4794
4795
static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
4796
struct rv7xx_pl *pl,
4797
SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
4798
{
4799
u32 dram_timing;
4800
u32 dram_timing2;
4801
u32 burst_time;
4802
int ret;
4803
4804
arb_regs->mc_arb_rfsh_rate =
4805
(u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
4806
4807
ret = amdgpu_atombios_set_engine_dram_timings(adev, pl->sclk,
4808
pl->mclk);
4809
if (ret)
4810
return ret;
4811
4812
dram_timing = RREG32(MC_ARB_DRAM_TIMING);
4813
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
4814
burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
4815
4816
arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing);
4817
arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
4818
arb_regs->mc_arb_burst_time = (u8)burst_time;
4819
4820
return 0;
4821
}
4822
4823
static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
4824
struct amdgpu_ps *amdgpu_state,
4825
unsigned int first_arb_set)
4826
{
4827
struct si_power_info *si_pi = si_get_pi(adev);
4828
struct si_ps *state = si_get_ps(amdgpu_state);
4829
SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
4830
int i, ret = 0;
4831
4832
for (i = 0; i < state->performance_level_count; i++) {
4833
ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
4834
if (ret)
4835
break;
4836
ret = amdgpu_si_copy_bytes_to_smc(adev,
4837
si_pi->arb_table_start +
4838
offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
4839
sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
4840
(u8 *)&arb_regs,
4841
sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
4842
si_pi->sram_end);
4843
if (ret)
4844
break;
4845
}
4846
4847
return ret;
4848
}
4849
4850
static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
4851
struct amdgpu_ps *amdgpu_new_state)
4852
{
4853
return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
4854
SISLANDS_DRIVER_STATE_ARB_INDEX);
4855
}
4856
4857
static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
4858
struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4859
{
4860
struct rv7xx_power_info *pi = rv770_get_pi(adev);
4861
struct si_power_info *si_pi = si_get_pi(adev);
4862
4863
if (pi->mvdd_control)
4864
return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
4865
si_pi->mvdd_bootup_value, voltage);
4866
4867
return 0;
4868
}
4869
4870
static int si_populate_smc_initial_state(struct amdgpu_device *adev,
4871
struct amdgpu_ps *amdgpu_initial_state,
4872
SISLANDS_SMC_STATETABLE *table)
4873
{
4874
struct si_ps *initial_state = si_get_ps(amdgpu_initial_state);
4875
struct rv7xx_power_info *pi = rv770_get_pi(adev);
4876
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4877
struct si_power_info *si_pi = si_get_pi(adev);
4878
u32 reg;
4879
int ret;
4880
4881
table->initialState.level.mclk.vDLL_CNTL =
4882
cpu_to_be32(si_pi->clock_registers.dll_cntl);
4883
table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
4884
cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
4885
table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
4886
cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
4887
table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
4888
cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
4889
table->initialState.level.mclk.vMPLL_FUNC_CNTL =
4890
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
4891
table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 =
4892
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
4893
table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 =
4894
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
4895
table->initialState.level.mclk.vMPLL_SS =
4896
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
4897
table->initialState.level.mclk.vMPLL_SS2 =
4898
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
4899
4900
table->initialState.level.mclk.mclk_value =
4901
cpu_to_be32(initial_state->performance_levels[0].mclk);
4902
4903
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
4904
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
4905
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
4906
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
4907
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
4908
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
4909
table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
4910
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
4911
table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
4912
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
4913
table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
4914
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
4915
4916
table->initialState.level.sclk.sclk_value =
4917
cpu_to_be32(initial_state->performance_levels[0].sclk);
4918
4919
table->initialState.level.arbRefreshState =
4920
SISLANDS_INITIAL_STATE_ARB_INDEX;
4921
4922
table->initialState.level.ACIndex = 0;
4923
4924
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
4925
initial_state->performance_levels[0].vddc,
4926
&table->initialState.level.vddc);
4927
4928
if (!ret) {
4929
u16 std_vddc;
4930
4931
ret = si_get_std_voltage_value(adev,
4932
&table->initialState.level.vddc,
4933
&std_vddc);
4934
if (!ret)
4935
si_populate_std_voltage_value(adev, std_vddc,
4936
table->initialState.level.vddc.index,
4937
&table->initialState.level.std_vddc);
4938
}
4939
4940
if (eg_pi->vddci_control)
4941
si_populate_voltage_value(adev,
4942
&eg_pi->vddci_voltage_table,
4943
initial_state->performance_levels[0].vddci,
4944
&table->initialState.level.vddci);
4945
4946
if (si_pi->vddc_phase_shed_control)
4947
si_populate_phase_shedding_value(adev,
4948
&adev->pm.dpm.dyn_state.phase_shedding_limits_table,
4949
initial_state->performance_levels[0].vddc,
4950
initial_state->performance_levels[0].sclk,
4951
initial_state->performance_levels[0].mclk,
4952
&table->initialState.level.vddc);
4953
4954
si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
4955
4956
reg = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
4957
table->initialState.level.aT = cpu_to_be32(reg);
4958
table->initialState.level.bSP = cpu_to_be32(pi->dsp);
4959
table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
4960
4961
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
4962
table->initialState.level.strobeMode =
4963
si_get_strobe_mode_settings(adev,
4964
initial_state->performance_levels[0].mclk);
4965
4966
if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
4967
table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
4968
else
4969
table->initialState.level.mcFlags = 0;
4970
}
4971
4972
table->initialState.levelCount = 1;
4973
4974
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
4975
4976
table->initialState.level.dpm2.MaxPS = 0;
4977
table->initialState.level.dpm2.NearTDPDec = 0;
4978
table->initialState.level.dpm2.AboveSafeInc = 0;
4979
table->initialState.level.dpm2.BelowSafeInc = 0;
4980
table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
4981
4982
reg = SQ_POWER_THROTTLE__MIN_POWER_MASK |
4983
SQ_POWER_THROTTLE__MAX_POWER_MASK;
4984
table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
4985
4986
reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK |
4987
SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK |
4988
SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
4989
table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
4990
4991
return 0;
4992
}
4993
4994
static enum si_pcie_gen si_gen_pcie_gen_support(struct amdgpu_device *adev,
4995
u32 sys_mask,
4996
enum si_pcie_gen asic_gen,
4997
enum si_pcie_gen default_gen)
4998
{
4999
switch (asic_gen) {
5000
case SI_PCIE_GEN1:
5001
return SI_PCIE_GEN1;
5002
case SI_PCIE_GEN2:
5003
return SI_PCIE_GEN2;
5004
case SI_PCIE_GEN3:
5005
return SI_PCIE_GEN3;
5006
default:
5007
if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
5008
(default_gen == SI_PCIE_GEN3))
5009
return SI_PCIE_GEN3;
5010
else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
5011
(default_gen == SI_PCIE_GEN2))
5012
return SI_PCIE_GEN2;
5013
else
5014
return SI_PCIE_GEN1;
5015
}
5016
return SI_PCIE_GEN1;
5017
}
5018
5019
static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
5020
SISLANDS_SMC_STATETABLE *table)
5021
{
5022
struct rv7xx_power_info *pi = rv770_get_pi(adev);
5023
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
5024
struct si_power_info *si_pi = si_get_pi(adev);
5025
u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
5026
u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
5027
u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
5028
u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
5029
u32 dll_cntl = si_pi->clock_registers.dll_cntl;
5030
u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
5031
u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
5032
u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
5033
u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
5034
u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
5035
u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
5036
u32 reg;
5037
int ret;
5038
5039
table->ACPIState = table->initialState;
5040
5041
table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
5042
5043
if (pi->acpi_vddc) {
5044
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
5045
pi->acpi_vddc, &table->ACPIState.level.vddc);
5046
if (!ret) {
5047
u16 std_vddc;
5048
5049
ret = si_get_std_voltage_value(adev,
5050
&table->ACPIState.level.vddc, &std_vddc);
5051
if (!ret)
5052
si_populate_std_voltage_value(adev, std_vddc,
5053
table->ACPIState.level.vddc.index,
5054
&table->ACPIState.level.std_vddc);
5055
}
5056
table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen;
5057
5058
if (si_pi->vddc_phase_shed_control) {
5059
si_populate_phase_shedding_value(adev,
5060
&adev->pm.dpm.dyn_state.phase_shedding_limits_table,
5061
pi->acpi_vddc,
5062
0,
5063
0,
5064
&table->ACPIState.level.vddc);
5065
}
5066
} else {
5067
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
5068
pi->min_vddc_in_table, &table->ACPIState.level.vddc);
5069
if (!ret) {
5070
u16 std_vddc;
5071
5072
ret = si_get_std_voltage_value(adev,
5073
&table->ACPIState.level.vddc, &std_vddc);
5074
5075
if (!ret)
5076
si_populate_std_voltage_value(adev, std_vddc,
5077
table->ACPIState.level.vddc.index,
5078
&table->ACPIState.level.std_vddc);
5079
}
5080
table->ACPIState.level.gen2PCIE =
5081
(u8)si_gen_pcie_gen_support(adev,
5082
si_pi->sys_pcie_mask,
5083
si_pi->boot_pcie_gen,
5084
SI_PCIE_GEN1);
5085
5086
if (si_pi->vddc_phase_shed_control)
5087
si_populate_phase_shedding_value(adev,
5088
&adev->pm.dpm.dyn_state.phase_shedding_limits_table,
5089
pi->min_vddc_in_table,
5090
0,
5091
0,
5092
&table->ACPIState.level.vddc);
5093
}
5094
5095
if (pi->acpi_vddc) {
5096
if (eg_pi->acpi_vddci)
5097
si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
5098
eg_pi->acpi_vddci,
5099
&table->ACPIState.level.vddci);
5100
}
5101
5102
mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
5103
mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
5104
5105
dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
5106
5107
spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
5108
spll_func_cntl_2 |= 4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
5109
5110
table->ACPIState.level.mclk.vDLL_CNTL =
5111
cpu_to_be32(dll_cntl);
5112
table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL =
5113
cpu_to_be32(mclk_pwrmgt_cntl);
5114
table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL =
5115
cpu_to_be32(mpll_ad_func_cntl);
5116
table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL =
5117
cpu_to_be32(mpll_dq_func_cntl);
5118
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL =
5119
cpu_to_be32(mpll_func_cntl);
5120
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 =
5121
cpu_to_be32(mpll_func_cntl_1);
5122
table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 =
5123
cpu_to_be32(mpll_func_cntl_2);
5124
table->ACPIState.level.mclk.vMPLL_SS =
5125
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
5126
table->ACPIState.level.mclk.vMPLL_SS2 =
5127
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
5128
5129
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL =
5130
cpu_to_be32(spll_func_cntl);
5131
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
5132
cpu_to_be32(spll_func_cntl_2);
5133
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
5134
cpu_to_be32(spll_func_cntl_3);
5135
table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
5136
cpu_to_be32(spll_func_cntl_4);
5137
5138
table->ACPIState.level.mclk.mclk_value = 0;
5139
table->ACPIState.level.sclk.sclk_value = 0;
5140
5141
si_populate_mvdd_value(adev, 0, &table->ACPIState.level.mvdd);
5142
5143
if (eg_pi->dynamic_ac_timing)
5144
table->ACPIState.level.ACIndex = 0;
5145
5146
table->ACPIState.level.dpm2.MaxPS = 0;
5147
table->ACPIState.level.dpm2.NearTDPDec = 0;
5148
table->ACPIState.level.dpm2.AboveSafeInc = 0;
5149
table->ACPIState.level.dpm2.BelowSafeInc = 0;
5150
table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
5151
5152
reg = SQ_POWER_THROTTLE__MIN_POWER_MASK | SQ_POWER_THROTTLE__MAX_POWER_MASK;
5153
table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
5154
5155
reg = SQ_POWER_THROTTLE2__MAX_POWER_DELTA_MASK | SQ_POWER_THROTTLE2__SHORT_TERM_INTERVAL_SIZE_MASK | SQ_POWER_THROTTLE2__LONG_TERM_INTERVAL_RATIO_MASK;
5156
table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
5157
5158
return 0;
5159
}
5160
5161
static int si_populate_ulv_state(struct amdgpu_device *adev,
5162
struct SISLANDS_SMC_SWSTATE_SINGLE *state)
5163
{
5164
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
5165
struct si_power_info *si_pi = si_get_pi(adev);
5166
struct si_ulv_param *ulv = &si_pi->ulv;
5167
u32 sclk_in_sr = 1350; /* ??? */
5168
int ret;
5169
5170
ret = si_convert_power_level_to_smc(adev, &ulv->pl,
5171
&state->level);
5172
if (!ret) {
5173
if (eg_pi->sclk_deep_sleep) {
5174
if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
5175
state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
5176
else
5177
state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
5178
}
5179
if (ulv->one_pcie_lane_in_ulv)
5180
state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
5181
state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
5182
state->level.ACIndex = 1;
5183
state->level.std_vddc = state->level.vddc;
5184
state->levelCount = 1;
5185
5186
state->flags |= PPSMC_SWSTATE_FLAG_DC;
5187
}
5188
5189
return ret;
5190
}
5191
5192
static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
5193
{
5194
struct si_power_info *si_pi = si_get_pi(adev);
5195
struct si_ulv_param *ulv = &si_pi->ulv;
5196
SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
5197
int ret;
5198
5199
ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
5200
&arb_regs);
5201
if (ret)
5202
return ret;
5203
5204
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
5205
ulv->volt_change_delay);
5206
5207
ret = amdgpu_si_copy_bytes_to_smc(adev,
5208
si_pi->arb_table_start +
5209
offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
5210
sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
5211
(u8 *)&arb_regs,
5212
sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
5213
si_pi->sram_end);
5214
5215
return ret;
5216
}
5217
5218
static void si_get_mvdd_configuration(struct amdgpu_device *adev)
5219
{
5220
struct rv7xx_power_info *pi = rv770_get_pi(adev);
5221
5222
pi->mvdd_split_frequency = 30000;
5223
}
5224
5225
static int si_init_smc_table(struct amdgpu_device *adev)
5226
{
5227
struct si_power_info *si_pi = si_get_pi(adev);
5228
struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
5229
const struct si_ulv_param *ulv = &si_pi->ulv;
5230
SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable;
5231
int ret;
5232
u32 lane_width;
5233
u32 vr_hot_gpio;
5234
5235
si_populate_smc_voltage_tables(adev, table);
5236
5237
switch (adev->pm.int_thermal_type) {
5238
case THERMAL_TYPE_SI:
5239
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
5240
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
5241
break;
5242
case THERMAL_TYPE_NONE:
5243
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
5244
break;
5245
default:
5246
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
5247
break;
5248
}
5249
5250
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
5251
table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
5252
5253
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
5254
if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
5255
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
5256
}
5257
5258
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
5259
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
5260
5261
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
5262
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
5263
5264
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
5265
table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
5266
5267
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
5268
table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
5269
vr_hot_gpio = adev->pm.dpm.backbias_response_time;
5270
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
5271
vr_hot_gpio);
5272
}
5273
5274
ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
5275
if (ret)
5276
return ret;
5277
5278
ret = si_populate_smc_acpi_state(adev, table);
5279
if (ret)
5280
return ret;
5281
5282
table->driverState.flags = table->initialState.flags;
5283
table->driverState.levelCount = table->initialState.levelCount;
5284
table->driverState.levels[0] = table->initialState.level;
5285
5286
ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
5287
SISLANDS_INITIAL_STATE_ARB_INDEX);
5288
if (ret)
5289
return ret;
5290
5291
if (ulv->supported && ulv->pl.vddc) {
5292
ret = si_populate_ulv_state(adev, &table->ULVState);
5293
if (ret)
5294
return ret;
5295
5296
ret = si_program_ulv_memory_timing_parameters(adev);
5297
if (ret)
5298
return ret;
5299
5300
WREG32(mmCG_ULV_CONTROL, ulv->cg_ulv_control);
5301
WREG32(mmCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
5302
5303
lane_width = amdgpu_get_pcie_lanes(adev);
5304
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
5305
} else {
5306
table->ULVState = table->initialState;
5307
}
5308
5309
return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
5310
(u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
5311
si_pi->sram_end);
5312
}
5313
5314
static int si_calculate_sclk_params(struct amdgpu_device *adev,
5315
u32 engine_clock,
5316
SISLANDS_SMC_SCLK_VALUE *sclk)
5317
{
5318
struct rv7xx_power_info *pi = rv770_get_pi(adev);
5319
struct si_power_info *si_pi = si_get_pi(adev);
5320
struct atom_clock_dividers dividers;
5321
u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
5322
u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
5323
u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
5324
u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
5325
u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
5326
u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
5327
u64 tmp;
5328
u32 reference_clock = adev->clock.spll.reference_freq;
5329
u32 reference_divider;
5330
u32 fbdiv;
5331
int ret;
5332
5333
ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
5334
engine_clock, false, &dividers);
5335
if (ret)
5336
return ret;
5337
5338
reference_divider = 1 + dividers.ref_div;
5339
5340
tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
5341
do_div(tmp, reference_clock);
5342
fbdiv = (u32) tmp;
5343
5344
spll_func_cntl &= ~(CG_SPLL_FUNC_CNTL__SPLL_PDIV_A_MASK | CG_SPLL_FUNC_CNTL__SPLL_REF_DIV_MASK);
5345
spll_func_cntl |= dividers.ref_div << CG_SPLL_FUNC_CNTL__SPLL_REF_DIV__SHIFT;
5346
spll_func_cntl |= dividers.post_div << CG_SPLL_FUNC_CNTL__SPLL_PDIV_A__SHIFT;
5347
5348
spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
5349
spll_func_cntl_2 |= 2 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT;
5350
5351
spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
5352
spll_func_cntl_3 |= fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT;
5353
spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
5354
5355
if (pi->sclk_ss) {
5356
struct amdgpu_atom_ss ss;
5357
u32 vco_freq = engine_clock * dividers.post_div;
5358
5359
if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
5360
ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
5361
u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
5362
u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
5363
5364
cg_spll_spread_spectrum &= ~CG_SPLL_SPREAD_SPECTRUM__CLK_S_MASK;
5365
cg_spll_spread_spectrum |= clk_s << CG_SPLL_SPREAD_SPECTRUM__CLK_S__SHIFT;
5366
cg_spll_spread_spectrum |= CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
5367
5368
cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLK_V_MASK;
5369
cg_spll_spread_spectrum_2 |= clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLK_V__SHIFT;
5370
}
5371
}
5372
5373
sclk->sclk_value = engine_clock;
5374
sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
5375
sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
5376
sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
5377
sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
5378
sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
5379
sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
5380
5381
return 0;
5382
}
5383
5384
static int si_populate_sclk_value(struct amdgpu_device *adev,
5385
u32 engine_clock,
5386
SISLANDS_SMC_SCLK_VALUE *sclk)
5387
{
5388
SISLANDS_SMC_SCLK_VALUE sclk_tmp;
5389
int ret;
5390
5391
ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
5392
if (!ret) {
5393
sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
5394
sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
5395
sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
5396
sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
5397
sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
5398
sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
5399
sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
5400
}
5401
5402
return ret;
5403
}
5404
5405
static int si_populate_mclk_value(struct amdgpu_device *adev,
5406
u32 engine_clock,
5407
u32 memory_clock,
5408
SISLANDS_SMC_MCLK_VALUE *mclk,
5409
bool strobe_mode,
5410
bool dll_state_on)
5411
{
5412
struct rv7xx_power_info *pi = rv770_get_pi(adev);
5413
struct si_power_info *si_pi = si_get_pi(adev);
5414
u32 dll_cntl = si_pi->clock_registers.dll_cntl;
5415
u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
5416
u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
5417
u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
5418
u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
5419
u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
5420
u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
5421
u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1;
5422
u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2;
5423
struct atom_mpll_param mpll_param;
5424
int ret;
5425
5426
ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
5427
if (ret)
5428
return ret;
5429
5430
mpll_func_cntl &= ~BWCTRL_MASK;
5431
mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
5432
5433
mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
5434
mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
5435
CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
5436
5437
mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
5438
mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
5439
5440
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
5441
mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
5442
mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
5443
YCLK_POST_DIV(mpll_param.post_div);
5444
}
5445
5446
if (pi->mclk_ss) {
5447
struct amdgpu_atom_ss ss;
5448
u32 freq_nom;
5449
u32 tmp;
5450
u32 reference_clock = adev->clock.mpll.reference_freq;
5451
5452
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
5453
freq_nom = memory_clock * 4;
5454
else
5455
freq_nom = memory_clock * 2;
5456
5457
tmp = freq_nom / reference_clock;
5458
tmp = tmp * tmp;
5459
if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
5460
ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
5461
u32 clks = reference_clock * 5 / ss.rate;
5462
u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
5463
5464
mpll_ss1 &= ~CLKV_MASK;
5465
mpll_ss1 |= CLKV(clkv);
5466
5467
mpll_ss2 &= ~CLKS_MASK;
5468
mpll_ss2 |= CLKS(clks);
5469
}
5470
}
5471
5472
mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
5473
mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
5474
5475
if (dll_state_on)
5476
mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
5477
else
5478
mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
5479
5480
mclk->mclk_value = cpu_to_be32(memory_clock);
5481
mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
5482
mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
5483
mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
5484
mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
5485
mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
5486
mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
5487
mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
5488
mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
5489
mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
5490
5491
return 0;
5492
}
5493
5494
static void si_populate_smc_sp(struct amdgpu_device *adev,
5495
struct amdgpu_ps *amdgpu_state,
5496
SISLANDS_SMC_SWSTATE *smc_state)
5497
{
5498
struct si_ps *ps = si_get_ps(amdgpu_state);
5499
struct rv7xx_power_info *pi = rv770_get_pi(adev);
5500
int i;
5501
5502
for (i = 0; i < ps->performance_level_count - 1; i++)
5503
smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
5504
5505
smc_state->levels[ps->performance_level_count - 1].bSP =
5506
cpu_to_be32(pi->psp);
5507
}
5508
5509
static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
5510
struct rv7xx_pl *pl,
5511
SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
5512
{
5513
struct rv7xx_power_info *pi = rv770_get_pi(adev);
5514
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
5515
struct si_power_info *si_pi = si_get_pi(adev);
5516
int ret;
5517
bool dll_state_on;
5518
u16 std_vddc;
5519
5520
if (eg_pi->pcie_performance_request &&
5521
(si_pi->force_pcie_gen != SI_PCIE_GEN_INVALID))
5522
level->gen2PCIE = (u8)si_pi->force_pcie_gen;
5523
else
5524
level->gen2PCIE = (u8)pl->pcie_gen;
5525
5526
ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
5527
if (ret)
5528
return ret;
5529
5530
level->mcFlags = 0;
5531
5532
if (pi->mclk_stutter_mode_threshold &&
5533
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
5534
!eg_pi->uvd_enabled &&
5535
(RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
5536
(adev->pm.pm_display_cfg.num_display <= 2)) {
5537
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
5538
}
5539
5540
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
5541
if (pl->mclk > pi->mclk_edc_enable_threshold)
5542
level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
5543
5544
if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
5545
level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
5546
5547
level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
5548
5549
if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
5550
if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
5551
((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
5552
dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
5553
else
5554
dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
5555
} else {
5556
dll_state_on = false;
5557
}
5558
} else {
5559
level->strobeMode = si_get_strobe_mode_settings(adev,
5560
pl->mclk);
5561
5562
dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
5563
}
5564
5565
ret = si_populate_mclk_value(adev,
5566
pl->sclk,
5567
pl->mclk,
5568
&level->mclk,
5569
(level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
5570
if (ret)
5571
return ret;
5572
5573
ret = si_populate_voltage_value(adev,
5574
&eg_pi->vddc_voltage_table,
5575
pl->vddc, &level->vddc);
5576
if (ret)
5577
return ret;
5578
5579
5580
ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
5581
if (ret)
5582
return ret;
5583
5584
ret = si_populate_std_voltage_value(adev, std_vddc,
5585
level->vddc.index, &level->std_vddc);
5586
if (ret)
5587
return ret;
5588
5589
if (eg_pi->vddci_control) {
5590
ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
5591
pl->vddci, &level->vddci);
5592
if (ret)
5593
return ret;
5594
}
5595
5596
if (si_pi->vddc_phase_shed_control) {
5597
ret = si_populate_phase_shedding_value(adev,
5598
&adev->pm.dpm.dyn_state.phase_shedding_limits_table,
5599
pl->vddc,
5600
pl->sclk,
5601
pl->mclk,
5602
&level->vddc);
5603
if (ret)
5604
return ret;
5605
}
5606
5607
level->MaxPoweredUpCU = si_pi->max_cu;
5608
5609
ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
5610
5611
return ret;
5612
}
5613
5614
static int si_populate_smc_t(struct amdgpu_device *adev,
5615
struct amdgpu_ps *amdgpu_state,
5616
SISLANDS_SMC_SWSTATE *smc_state)
5617
{
5618
struct rv7xx_power_info *pi = rv770_get_pi(adev);
5619
struct si_ps *state = si_get_ps(amdgpu_state);
5620
u32 a_t;
5621
u32 t_l, t_h;
5622
u32 high_bsp;
5623
int i, ret;
5624
5625
if (state->performance_level_count >= 9)
5626
return -EINVAL;
5627
5628
if (state->performance_level_count < 2) {
5629
a_t = 0xffff << CG_AT__CG_R__SHIFT | 0 << CG_AT__CG_L__SHIFT;
5630
smc_state->levels[0].aT = cpu_to_be32(a_t);
5631
return 0;
5632
}
5633
5634
smc_state->levels[0].aT = cpu_to_be32(0);
5635
5636
for (i = 0; i <= state->performance_level_count - 2; i++) {
5637
ret = r600_calculate_at(
5638
(50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
5639
100 * R600_AH_DFLT,
5640
state->performance_levels[i + 1].sclk,
5641
state->performance_levels[i].sclk,
5642
&t_l,
5643
&t_h);
5644
5645
if (ret) {
5646
t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
5647
t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
5648
}
5649
5650
a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_AT__CG_R_MASK;
5651
a_t |= (t_l * pi->bsp / 20000) << CG_AT__CG_R__SHIFT;
5652
smc_state->levels[i].aT = cpu_to_be32(a_t);
5653
5654
high_bsp = (i == state->performance_level_count - 2) ?
5655
pi->pbsp : pi->bsp;
5656
a_t = (0xffff) << CG_AT__CG_R__SHIFT | (t_h * high_bsp / 20000) << CG_AT__CG_L__SHIFT;
5657
smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
5658
}
5659
5660
return 0;
5661
}
5662
5663
static int si_disable_ulv(struct amdgpu_device *adev)
5664
{
5665
PPSMC_Result r;
5666
5667
r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV);
5668
return (r == PPSMC_Result_OK) ? 0 : -EINVAL;
5669
}
5670
5671
static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
5672
struct amdgpu_ps *amdgpu_state)
5673
{
5674
const struct si_power_info *si_pi = si_get_pi(adev);
5675
const struct si_ulv_param *ulv = &si_pi->ulv;
5676
const struct si_ps *state = si_get_ps(amdgpu_state);
5677
int i;
5678
5679
if (state->performance_levels[0].mclk != ulv->pl.mclk)
5680
return false;
5681
5682
/* XXX validate against display requirements! */
5683
5684
for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
5685
if (adev->pm.pm_display_cfg.display_clk <=
5686
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
5687
if (ulv->pl.vddc <
5688
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
5689
return false;
5690
}
5691
}
5692
5693
if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
5694
return false;
5695
5696
return true;
5697
}
5698
5699
static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
5700
struct amdgpu_ps *amdgpu_new_state)
5701
{
5702
const struct si_power_info *si_pi = si_get_pi(adev);
5703
const struct si_ulv_param *ulv = &si_pi->ulv;
5704
5705
if (ulv->supported) {
5706
if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
5707
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
5708
0 : -EINVAL;
5709
}
5710
return 0;
5711
}
5712
5713
static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
5714
struct amdgpu_ps *amdgpu_state,
5715
SISLANDS_SMC_SWSTATE *smc_state)
5716
{
5717
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
5718
struct ni_power_info *ni_pi = ni_get_pi(adev);
5719
struct si_power_info *si_pi = si_get_pi(adev);
5720
struct si_ps *state = si_get_ps(amdgpu_state);
5721
int i, ret;
5722
u32 threshold;
5723
u32 sclk_in_sr = 1350; /* ??? */
5724
5725
if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
5726
return -EINVAL;
5727
5728
threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
5729
5730
if (amdgpu_state->vclk && amdgpu_state->dclk) {
5731
eg_pi->uvd_enabled = true;
5732
if (eg_pi->smu_uvd_hs)
5733
smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
5734
} else {
5735
eg_pi->uvd_enabled = false;
5736
}
5737
5738
if (state->dc_compatible)
5739
smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
5740
5741
smc_state->levelCount = 0;
5742
for (i = 0; i < state->performance_level_count; i++) {
5743
if (eg_pi->sclk_deep_sleep) {
5744
if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
5745
if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
5746
smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
5747
else
5748
smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
5749
}
5750
}
5751
5752
ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
5753
&smc_state->levels[i]);
5754
smc_state->levels[i].arbRefreshState =
5755
(u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
5756
5757
if (ret)
5758
return ret;
5759
5760
if (ni_pi->enable_power_containment)
5761
smc_state->levels[i].displayWatermark =
5762
(state->performance_levels[i].sclk < threshold) ?
5763
PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
5764
else
5765
smc_state->levels[i].displayWatermark = (i < 2) ?
5766
PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
5767
5768
if (eg_pi->dynamic_ac_timing)
5769
smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
5770
else
5771
smc_state->levels[i].ACIndex = 0;
5772
5773
smc_state->levelCount++;
5774
}
5775
5776
si_write_smc_soft_register(adev,
5777
SI_SMC_SOFT_REGISTER_watermark_threshold,
5778
threshold / 512);
5779
5780
si_populate_smc_sp(adev, amdgpu_state, smc_state);
5781
5782
ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
5783
if (ret)
5784
ni_pi->enable_power_containment = false;
5785
5786
ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
5787
if (ret)
5788
ni_pi->enable_sq_ramping = false;
5789
5790
return si_populate_smc_t(adev, amdgpu_state, smc_state);
5791
}
5792
5793
static int si_upload_sw_state(struct amdgpu_device *adev,
5794
struct amdgpu_ps *amdgpu_new_state)
5795
{
5796
struct si_power_info *si_pi = si_get_pi(adev);
5797
struct si_ps *new_state = si_get_ps(amdgpu_new_state);
5798
int ret;
5799
u32 address = si_pi->state_table_start +
5800
offsetof(SISLANDS_SMC_STATETABLE, driverState);
5801
SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
5802
size_t state_size = struct_size(smc_state, levels,
5803
new_state->performance_level_count);
5804
memset(smc_state, 0, state_size);
5805
5806
ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
5807
if (ret)
5808
return ret;
5809
5810
return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
5811
state_size, si_pi->sram_end);
5812
}
5813
5814
static int si_upload_ulv_state(struct amdgpu_device *adev)
5815
{
5816
struct si_power_info *si_pi = si_get_pi(adev);
5817
struct si_ulv_param *ulv = &si_pi->ulv;
5818
int ret = 0;
5819
5820
if (ulv->supported && ulv->pl.vddc) {
5821
u32 address = si_pi->state_table_start +
5822
offsetof(SISLANDS_SMC_STATETABLE, ULVState);
5823
struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState;
5824
u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE);
5825
5826
memset(smc_state, 0, state_size);
5827
5828
ret = si_populate_ulv_state(adev, smc_state);
5829
if (!ret)
5830
ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
5831
state_size, si_pi->sram_end);
5832
}
5833
5834
return ret;
5835
}
5836
5837
static int si_upload_smc_data(struct amdgpu_device *adev)
5838
{
5839
const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
5840
u32 crtc_index = 0;
5841
u32 mclk_change_block_cp_min = 0;
5842
u32 mclk_change_block_cp_max = 0;
5843
5844
/* When a display is plugged in, program these so that the SMC
5845
* performs MCLK switching when it doesn't cause flickering.
5846
* When no display is plugged in, there is no need to restrict
5847
* MCLK switching, so program them to zero.
5848
*/
5849
if (cfg->num_display) {
5850
crtc_index = cfg->crtc_index;
5851
5852
if (cfg->line_time_in_us) {
5853
mclk_change_block_cp_min = 200 / cfg->line_time_in_us;
5854
mclk_change_block_cp_max = 100 / cfg->line_time_in_us;
5855
}
5856
}
5857
5858
si_write_smc_soft_register(adev,
5859
SI_SMC_SOFT_REGISTER_crtc_index,
5860
crtc_index);
5861
5862
si_write_smc_soft_register(adev,
5863
SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
5864
mclk_change_block_cp_min);
5865
5866
si_write_smc_soft_register(adev,
5867
SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
5868
mclk_change_block_cp_max);
5869
5870
return 0;
5871
}
5872
5873
static int si_set_mc_special_registers(struct amdgpu_device *adev,
5874
struct si_mc_reg_table *table)
5875
{
5876
u8 i, j, k;
5877
u32 temp_reg;
5878
5879
for (i = 0, j = table->last; i < table->last; i++) {
5880
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5881
return -EINVAL;
5882
switch (table->mc_reg_address[i].s1) {
5883
case MC_SEQ_MISC1:
5884
temp_reg = RREG32(MC_PMG_CMD_EMRS);
5885
table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
5886
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
5887
for (k = 0; k < table->num_entries; k++)
5888
table->mc_reg_table_entry[k].mc_data[j] =
5889
((temp_reg & 0xffff0000)) |
5890
((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
5891
j++;
5892
5893
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5894
return -EINVAL;
5895
temp_reg = RREG32(MC_PMG_CMD_MRS);
5896
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
5897
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
5898
for (k = 0; k < table->num_entries; k++) {
5899
table->mc_reg_table_entry[k].mc_data[j] =
5900
(temp_reg & 0xffff0000) |
5901
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5902
if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
5903
table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
5904
}
5905
j++;
5906
5907
if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
5908
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5909
return -EINVAL;
5910
table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
5911
table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
5912
for (k = 0; k < table->num_entries; k++)
5913
table->mc_reg_table_entry[k].mc_data[j] =
5914
(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
5915
j++;
5916
}
5917
break;
5918
case MC_SEQ_RESERVE_M:
5919
temp_reg = RREG32(MC_PMG_CMD_MRS1);
5920
table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
5921
table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
5922
for(k = 0; k < table->num_entries; k++)
5923
table->mc_reg_table_entry[k].mc_data[j] =
5924
(temp_reg & 0xffff0000) |
5925
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5926
j++;
5927
break;
5928
default:
5929
break;
5930
}
5931
}
5932
5933
table->last = j;
5934
5935
return 0;
5936
}
5937
5938
static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
5939
{
5940
bool result = true;
5941
switch (in_reg) {
5942
case MC_SEQ_RAS_TIMING:
5943
*out_reg = MC_SEQ_RAS_TIMING_LP;
5944
break;
5945
case MC_SEQ_CAS_TIMING:
5946
*out_reg = MC_SEQ_CAS_TIMING_LP;
5947
break;
5948
case MC_SEQ_MISC_TIMING:
5949
*out_reg = MC_SEQ_MISC_TIMING_LP;
5950
break;
5951
case MC_SEQ_MISC_TIMING2:
5952
*out_reg = MC_SEQ_MISC_TIMING2_LP;
5953
break;
5954
case MC_SEQ_RD_CTL_D0:
5955
*out_reg = MC_SEQ_RD_CTL_D0_LP;
5956
break;
5957
case MC_SEQ_RD_CTL_D1:
5958
*out_reg = MC_SEQ_RD_CTL_D1_LP;
5959
break;
5960
case MC_SEQ_WR_CTL_D0:
5961
*out_reg = MC_SEQ_WR_CTL_D0_LP;
5962
break;
5963
case MC_SEQ_WR_CTL_D1:
5964
*out_reg = MC_SEQ_WR_CTL_D1_LP;
5965
break;
5966
case MC_PMG_CMD_EMRS:
5967
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
5968
break;
5969
case MC_PMG_CMD_MRS:
5970
*out_reg = MC_SEQ_PMG_CMD_MRS_LP;
5971
break;
5972
case MC_PMG_CMD_MRS1:
5973
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
5974
break;
5975
case MC_SEQ_PMG_TIMING:
5976
*out_reg = MC_SEQ_PMG_TIMING_LP;
5977
break;
5978
case MC_PMG_CMD_MRS2:
5979
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
5980
break;
5981
case MC_SEQ_WR_CTL_2:
5982
*out_reg = MC_SEQ_WR_CTL_2_LP;
5983
break;
5984
default:
5985
result = false;
5986
break;
5987
}
5988
5989
return result;
5990
}
5991
5992
static void si_set_valid_flag(struct si_mc_reg_table *table)
5993
{
5994
u8 i, j;
5995
5996
for (i = 0; i < table->last; i++) {
5997
for (j = 1; j < table->num_entries; j++) {
5998
if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
5999
table->valid_flag |= 1 << i;
6000
break;
6001
}
6002
}
6003
}
6004
}
6005
6006
static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
6007
{
6008
u32 i;
6009
u16 address;
6010
6011
for (i = 0; i < table->last; i++)
6012
table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
6013
address : table->mc_reg_address[i].s1;
6014
6015
}
6016
6017
static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
6018
struct si_mc_reg_table *si_table)
6019
{
6020
u8 i, j;
6021
6022
if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
6023
return -EINVAL;
6024
if (table->num_entries > MAX_AC_TIMING_ENTRIES)
6025
return -EINVAL;
6026
6027
for (i = 0; i < table->last; i++)
6028
si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
6029
si_table->last = table->last;
6030
6031
for (i = 0; i < table->num_entries; i++) {
6032
si_table->mc_reg_table_entry[i].mclk_max =
6033
table->mc_reg_table_entry[i].mclk_max;
6034
for (j = 0; j < table->last; j++) {
6035
si_table->mc_reg_table_entry[i].mc_data[j] =
6036
table->mc_reg_table_entry[i].mc_data[j];
6037
}
6038
}
6039
si_table->num_entries = table->num_entries;
6040
6041
return 0;
6042
}
6043
6044
static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
6045
{
6046
struct si_power_info *si_pi = si_get_pi(adev);
6047
struct atom_mc_reg_table *table;
6048
struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
6049
u8 module_index = rv770_get_memory_module_index(adev);
6050
int ret;
6051
6052
table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
6053
if (!table)
6054
return -ENOMEM;
6055
6056
WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
6057
WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
6058
WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
6059
WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
6060
WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
6061
WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
6062
WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
6063
WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
6064
WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
6065
WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
6066
WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
6067
WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
6068
WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
6069
WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
6070
6071
ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
6072
if (ret)
6073
goto init_mc_done;
6074
6075
ret = si_copy_vbios_mc_reg_table(table, si_table);
6076
if (ret)
6077
goto init_mc_done;
6078
6079
si_set_s0_mc_reg_index(si_table);
6080
6081
ret = si_set_mc_special_registers(adev, si_table);
6082
if (ret)
6083
goto init_mc_done;
6084
6085
si_set_valid_flag(si_table);
6086
6087
init_mc_done:
6088
kfree(table);
6089
6090
return ret;
6091
6092
}
6093
6094
static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
6095
SMC_SIslands_MCRegisters *mc_reg_table)
6096
{
6097
struct si_power_info *si_pi = si_get_pi(adev);
6098
u32 i, j;
6099
6100
for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
6101
if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
6102
if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
6103
break;
6104
mc_reg_table->address[i].s0 =
6105
cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
6106
mc_reg_table->address[i].s1 =
6107
cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
6108
i++;
6109
}
6110
}
6111
mc_reg_table->last = (u8)i;
6112
}
6113
6114
static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
6115
SMC_SIslands_MCRegisterSet *data,
6116
u32 num_entries, u32 valid_flag)
6117
{
6118
u32 i, j;
6119
6120
for(i = 0, j = 0; j < num_entries; j++) {
6121
if (valid_flag & (1 << j)) {
6122
data->value[i] = cpu_to_be32(entry->mc_data[j]);
6123
i++;
6124
}
6125
}
6126
}
6127
6128
static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
6129
struct rv7xx_pl *pl,
6130
SMC_SIslands_MCRegisterSet *mc_reg_table_data)
6131
{
6132
struct si_power_info *si_pi = si_get_pi(adev);
6133
u32 i = 0;
6134
6135
for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
6136
if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
6137
break;
6138
}
6139
6140
if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
6141
--i;
6142
6143
si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
6144
mc_reg_table_data, si_pi->mc_reg_table.last,
6145
si_pi->mc_reg_table.valid_flag);
6146
}
6147
6148
static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
6149
struct amdgpu_ps *amdgpu_state,
6150
SMC_SIslands_MCRegisters *mc_reg_table)
6151
{
6152
struct si_ps *state = si_get_ps(amdgpu_state);
6153
int i;
6154
6155
for (i = 0; i < state->performance_level_count; i++) {
6156
si_convert_mc_reg_table_entry_to_smc(adev,
6157
&state->performance_levels[i],
6158
&mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
6159
}
6160
}
6161
6162
static int si_populate_mc_reg_table(struct amdgpu_device *adev,
6163
struct amdgpu_ps *amdgpu_boot_state)
6164
{
6165
struct si_ps *boot_state = si_get_ps(amdgpu_boot_state);
6166
struct si_power_info *si_pi = si_get_pi(adev);
6167
struct si_ulv_param *ulv = &si_pi->ulv;
6168
SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
6169
6170
memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
6171
6172
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
6173
6174
si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
6175
6176
si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
6177
&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
6178
6179
si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
6180
&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
6181
si_pi->mc_reg_table.last,
6182
si_pi->mc_reg_table.valid_flag);
6183
6184
if (ulv->supported && ulv->pl.vddc != 0)
6185
si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
6186
&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
6187
else
6188
si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
6189
&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
6190
si_pi->mc_reg_table.last,
6191
si_pi->mc_reg_table.valid_flag);
6192
6193
si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
6194
6195
return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
6196
(u8 *)smc_mc_reg_table,
6197
sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
6198
}
6199
6200
static int si_upload_mc_reg_table(struct amdgpu_device *adev,
6201
struct amdgpu_ps *amdgpu_new_state)
6202
{
6203
struct si_ps *new_state = si_get_ps(amdgpu_new_state);
6204
struct si_power_info *si_pi = si_get_pi(adev);
6205
u32 address = si_pi->mc_reg_table_start +
6206
offsetof(SMC_SIslands_MCRegisters,
6207
data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
6208
SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
6209
6210
memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
6211
6212
si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
6213
6214
return amdgpu_si_copy_bytes_to_smc(adev, address,
6215
(u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
6216
sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
6217
si_pi->sram_end);
6218
}
6219
6220
static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
6221
{
6222
if (enable)
6223
WREG32_P(mmGENERAL_PWRMGT, GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
6224
else
6225
WREG32_P(mmGENERAL_PWRMGT, 0, ~GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK);
6226
}
6227
6228
static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
6229
struct amdgpu_ps *amdgpu_state)
6230
{
6231
struct si_ps *state = si_get_ps(amdgpu_state);
6232
int i;
6233
u16 pcie_speed, max_speed = 0;
6234
6235
for (i = 0; i < state->performance_level_count; i++) {
6236
pcie_speed = state->performance_levels[i].pcie_gen;
6237
if (max_speed < pcie_speed)
6238
max_speed = pcie_speed;
6239
}
6240
return max_speed;
6241
}
6242
6243
static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
6244
{
6245
u32 speed_cntl;
6246
6247
speed_cntl = RREG32_PCIE_PORT(ixPCIE_LC_SPEED_CNTL) & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
6248
speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
6249
6250
return (u16)speed_cntl;
6251
}
6252
6253
static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
6254
struct amdgpu_ps *amdgpu_new_state,
6255
struct amdgpu_ps *amdgpu_current_state)
6256
{
6257
struct si_power_info *si_pi = si_get_pi(adev);
6258
enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
6259
enum si_pcie_gen current_link_speed;
6260
6261
if (si_pi->force_pcie_gen == SI_PCIE_GEN_INVALID)
6262
current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
6263
else
6264
current_link_speed = si_pi->force_pcie_gen;
6265
6266
si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID;
6267
si_pi->pspp_notify_required = false;
6268
if (target_link_speed > current_link_speed) {
6269
switch (target_link_speed) {
6270
#if defined(CONFIG_ACPI)
6271
case SI_PCIE_GEN3:
6272
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
6273
break;
6274
si_pi->force_pcie_gen = SI_PCIE_GEN2;
6275
if (current_link_speed == SI_PCIE_GEN2)
6276
break;
6277
fallthrough;
6278
case SI_PCIE_GEN2:
6279
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
6280
break;
6281
fallthrough;
6282
#endif
6283
default:
6284
si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
6285
break;
6286
}
6287
} else {
6288
if (target_link_speed < current_link_speed)
6289
si_pi->pspp_notify_required = true;
6290
}
6291
}
6292
6293
static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
6294
struct amdgpu_ps *amdgpu_new_state,
6295
struct amdgpu_ps *amdgpu_current_state)
6296
{
6297
struct si_power_info *si_pi = si_get_pi(adev);
6298
enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
6299
u8 request;
6300
6301
if (si_pi->pspp_notify_required) {
6302
if (target_link_speed == SI_PCIE_GEN3)
6303
request = PCIE_PERF_REQ_PECI_GEN3;
6304
else if (target_link_speed == SI_PCIE_GEN2)
6305
request = PCIE_PERF_REQ_PECI_GEN2;
6306
else
6307
request = PCIE_PERF_REQ_PECI_GEN1;
6308
6309
if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
6310
(si_get_current_pcie_speed(adev) > 0))
6311
return;
6312
6313
#if defined(CONFIG_ACPI)
6314
amdgpu_acpi_pcie_performance_request(adev, request, false);
6315
#endif
6316
}
6317
}
6318
6319
#if 0
6320
static int si_ds_request(struct amdgpu_device *adev,
6321
bool ds_status_on, u32 count_write)
6322
{
6323
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
6324
6325
if (eg_pi->sclk_deep_sleep) {
6326
if (ds_status_on)
6327
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
6328
PPSMC_Result_OK) ?
6329
0 : -EINVAL;
6330
else
6331
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
6332
PPSMC_Result_OK) ? 0 : -EINVAL;
6333
}
6334
return 0;
6335
}
6336
#endif
6337
6338
static void si_set_max_cu_value(struct amdgpu_device *adev)
6339
{
6340
struct si_power_info *si_pi = si_get_pi(adev);
6341
6342
if (adev->asic_type == CHIP_VERDE) {
6343
switch (adev->pdev->device) {
6344
case 0x6820:
6345
case 0x6825:
6346
case 0x6821:
6347
case 0x6823:
6348
case 0x6827:
6349
si_pi->max_cu = 10;
6350
break;
6351
case 0x682D:
6352
case 0x6824:
6353
case 0x682F:
6354
case 0x6826:
6355
si_pi->max_cu = 8;
6356
break;
6357
case 0x6828:
6358
case 0x6830:
6359
case 0x6831:
6360
case 0x6838:
6361
case 0x6839:
6362
case 0x683D:
6363
si_pi->max_cu = 10;
6364
break;
6365
case 0x683B:
6366
case 0x683F:
6367
case 0x6829:
6368
si_pi->max_cu = 8;
6369
break;
6370
default:
6371
si_pi->max_cu = 0;
6372
break;
6373
}
6374
} else {
6375
si_pi->max_cu = 0;
6376
}
6377
}
6378
6379
static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
6380
struct amdgpu_clock_voltage_dependency_table *table)
6381
{
6382
u32 i;
6383
int j;
6384
u16 leakage_voltage;
6385
6386
if (table) {
6387
for (i = 0; i < table->count; i++) {
6388
switch (si_get_leakage_voltage_from_leakage_index(adev,
6389
table->entries[i].v,
6390
&leakage_voltage)) {
6391
case 0:
6392
table->entries[i].v = leakage_voltage;
6393
break;
6394
case -EAGAIN:
6395
return -EINVAL;
6396
case -EINVAL:
6397
default:
6398
break;
6399
}
6400
}
6401
6402
for (j = (table->count - 2); j >= 0; j--) {
6403
table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
6404
table->entries[j].v : table->entries[j + 1].v;
6405
}
6406
}
6407
return 0;
6408
}
6409
6410
static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
6411
{
6412
int ret = 0;
6413
6414
ret = si_patch_single_dependency_table_based_on_leakage(adev,
6415
&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
6416
if (ret)
6417
DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
6418
ret = si_patch_single_dependency_table_based_on_leakage(adev,
6419
&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
6420
if (ret)
6421
DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
6422
ret = si_patch_single_dependency_table_based_on_leakage(adev,
6423
&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
6424
if (ret)
6425
DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
6426
return ret;
6427
}
6428
6429
static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
6430
struct amdgpu_ps *amdgpu_new_state,
6431
struct amdgpu_ps *amdgpu_current_state)
6432
{
6433
u32 lane_width;
6434
u32 new_lane_width =
6435
((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
6436
u32 current_lane_width =
6437
((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
6438
6439
if (new_lane_width != current_lane_width) {
6440
amdgpu_set_pcie_lanes(adev, new_lane_width);
6441
lane_width = amdgpu_get_pcie_lanes(adev);
6442
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
6443
}
6444
}
6445
6446
static void si_dpm_setup_asic(struct amdgpu_device *adev)
6447
{
6448
si_read_clock_registers(adev);
6449
si_enable_acpi_power_management(adev);
6450
}
6451
6452
static int si_thermal_enable_alert(struct amdgpu_device *adev,
6453
bool enable)
6454
{
6455
u32 thermal_int = RREG32(mmCG_THERMAL_INT);
6456
6457
if (enable) {
6458
PPSMC_Result result;
6459
6460
thermal_int &= ~(CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK);
6461
WREG32(mmCG_THERMAL_INT, thermal_int);
6462
result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
6463
if (result != PPSMC_Result_OK) {
6464
DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
6465
return -EINVAL;
6466
}
6467
} else {
6468
thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK | CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
6469
WREG32(mmCG_THERMAL_INT, thermal_int);
6470
}
6471
6472
return 0;
6473
}
6474
6475
static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
6476
int min_temp, int max_temp)
6477
{
6478
int low_temp = 0 * 1000;
6479
int high_temp = 255 * 1000;
6480
6481
if (low_temp < min_temp)
6482
low_temp = min_temp;
6483
if (high_temp > max_temp)
6484
high_temp = max_temp;
6485
if (high_temp < low_temp) {
6486
DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
6487
return -EINVAL;
6488
}
6489
6490
WREG32_P(mmCG_THERMAL_INT, (high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTH_MASK);
6491
WREG32_P(mmCG_THERMAL_INT, (low_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT, ~CG_THERMAL_INT__DIG_THERM_INTL_MASK);
6492
WREG32_P(mmCG_THERMAL_CTRL, (high_temp / 1000) << CG_THERMAL_CTRL__DIG_THERM_DPM__SHIFT, ~CG_THERMAL_CTRL__DIG_THERM_DPM_MASK);
6493
6494
adev->pm.dpm.thermal.min_temp = low_temp;
6495
adev->pm.dpm.thermal.max_temp = high_temp;
6496
6497
return 0;
6498
}
6499
6500
static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
6501
{
6502
struct si_power_info *si_pi = si_get_pi(adev);
6503
u32 tmp;
6504
6505
if (si_pi->fan_ctrl_is_in_default_mode) {
6506
tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
6507
si_pi->fan_ctrl_default_mode = tmp;
6508
tmp = (RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK) >> CG_FDO_CTRL2__TMIN__SHIFT;
6509
si_pi->t_min = tmp;
6510
si_pi->fan_ctrl_is_in_default_mode = false;
6511
}
6512
6513
tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
6514
tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
6515
WREG32(mmCG_FDO_CTRL2, tmp);
6516
6517
tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
6518
tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
6519
WREG32(mmCG_FDO_CTRL2, tmp);
6520
}
6521
6522
static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
6523
{
6524
struct si_power_info *si_pi = si_get_pi(adev);
6525
PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
6526
u32 duty100;
6527
u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
6528
u16 fdo_min, slope1, slope2;
6529
u32 reference_clock, tmp;
6530
int ret;
6531
u64 tmp64;
6532
6533
if (!si_pi->fan_table_start) {
6534
adev->pm.dpm.fan.ucode_fan_control = false;
6535
return 0;
6536
}
6537
6538
duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
6539
6540
if (duty100 == 0) {
6541
adev->pm.dpm.fan.ucode_fan_control = false;
6542
return 0;
6543
}
6544
6545
tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
6546
do_div(tmp64, 10000);
6547
fdo_min = (u16)tmp64;
6548
6549
t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
6550
t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
6551
6552
pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
6553
pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
6554
6555
slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
6556
slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
6557
6558
fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
6559
fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
6560
fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
6561
fan_table.slope1 = cpu_to_be16(slope1);
6562
fan_table.slope2 = cpu_to_be16(slope2);
6563
fan_table.fdo_min = cpu_to_be16(fdo_min);
6564
fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
6565
fan_table.hys_up = cpu_to_be16(1);
6566
fan_table.hys_slope = cpu_to_be16(1);
6567
fan_table.temp_resp_lim = cpu_to_be16(5);
6568
reference_clock = amdgpu_asic_get_xclk(adev);
6569
6570
fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
6571
reference_clock) / 1600);
6572
fan_table.fdo_max = cpu_to_be16((u16)duty100);
6573
6574
tmp = (RREG32(mmCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK) >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
6575
fan_table.temp_src = (uint8_t)tmp;
6576
6577
ret = amdgpu_si_copy_bytes_to_smc(adev,
6578
si_pi->fan_table_start,
6579
(u8 *)(&fan_table),
6580
sizeof(fan_table),
6581
si_pi->sram_end);
6582
6583
if (ret) {
6584
DRM_ERROR("Failed to load fan table to the SMC.");
6585
adev->pm.dpm.fan.ucode_fan_control = false;
6586
}
6587
6588
return ret;
6589
}
6590
6591
static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
6592
{
6593
struct si_power_info *si_pi = si_get_pi(adev);
6594
PPSMC_Result ret;
6595
6596
ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
6597
if (ret == PPSMC_Result_OK) {
6598
si_pi->fan_is_controlled_by_smc = true;
6599
return 0;
6600
} else {
6601
return -EINVAL;
6602
}
6603
}
6604
6605
static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
6606
{
6607
struct si_power_info *si_pi = si_get_pi(adev);
6608
PPSMC_Result ret;
6609
6610
ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
6611
6612
if (ret == PPSMC_Result_OK) {
6613
si_pi->fan_is_controlled_by_smc = false;
6614
return 0;
6615
} else {
6616
return -EINVAL;
6617
}
6618
}
6619
6620
static int si_dpm_get_fan_speed_pwm(void *handle,
6621
u32 *speed)
6622
{
6623
u32 duty, duty100;
6624
u64 tmp64;
6625
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6626
6627
if (!speed)
6628
return -EINVAL;
6629
6630
if (adev->pm.no_fan)
6631
return -ENOENT;
6632
6633
duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
6634
duty = (RREG32(mmCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK) >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
6635
6636
if (duty100 == 0)
6637
return -EINVAL;
6638
6639
tmp64 = (u64)duty * 255;
6640
do_div(tmp64, duty100);
6641
*speed = min_t(u32, tmp64, 255);
6642
6643
return 0;
6644
}
6645
6646
static int si_dpm_set_fan_speed_pwm(void *handle,
6647
u32 speed)
6648
{
6649
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6650
struct si_power_info *si_pi = si_get_pi(adev);
6651
u32 tmp;
6652
u32 duty, duty100;
6653
u64 tmp64;
6654
6655
if (adev->pm.no_fan)
6656
return -ENOENT;
6657
6658
if (si_pi->fan_is_controlled_by_smc)
6659
return -EINVAL;
6660
6661
if (speed > 255)
6662
return -EINVAL;
6663
6664
duty100 = (RREG32(mmCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
6665
6666
if (duty100 == 0)
6667
return -EINVAL;
6668
6669
tmp64 = (u64)speed * duty100;
6670
do_div(tmp64, 255);
6671
duty = (u32)tmp64;
6672
6673
tmp = RREG32(mmCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
6674
tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
6675
WREG32(mmCG_FDO_CTRL0, tmp);
6676
6677
return 0;
6678
}
6679
6680
static int si_dpm_set_fan_control_mode(void *handle, u32 mode)
6681
{
6682
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6683
6684
if (mode == U32_MAX)
6685
return -EINVAL;
6686
6687
if (mode) {
6688
/* stop auto-manage */
6689
if (adev->pm.dpm.fan.ucode_fan_control)
6690
si_fan_ctrl_stop_smc_fan_control(adev);
6691
si_fan_ctrl_set_static_mode(adev, mode);
6692
} else {
6693
/* restart auto-manage */
6694
if (adev->pm.dpm.fan.ucode_fan_control)
6695
si_thermal_start_smc_fan_control(adev);
6696
else
6697
si_fan_ctrl_set_default_mode(adev);
6698
}
6699
6700
return 0;
6701
}
6702
6703
static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)
6704
{
6705
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6706
struct si_power_info *si_pi = si_get_pi(adev);
6707
u32 tmp;
6708
6709
if (!fan_mode)
6710
return -EINVAL;
6711
6712
if (si_pi->fan_is_controlled_by_smc)
6713
return 0;
6714
6715
tmp = RREG32(mmCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
6716
*fan_mode = (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
6717
6718
return 0;
6719
}
6720
6721
#if 0
6722
static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
6723
u32 *speed)
6724
{
6725
u32 tach_period;
6726
u32 xclk = amdgpu_asic_get_xclk(adev);
6727
6728
if (adev->pm.no_fan)
6729
return -ENOENT;
6730
6731
if (adev->pm.fan_pulses_per_revolution == 0)
6732
return -ENOENT;
6733
6734
tach_period = (RREG32(mmCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
6735
if (tach_period == 0)
6736
return -ENOENT;
6737
6738
*speed = 60 * xclk * 10000 / tach_period;
6739
6740
return 0;
6741
}
6742
6743
static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
6744
u32 speed)
6745
{
6746
u32 tach_period, tmp;
6747
u32 xclk = amdgpu_asic_get_xclk(adev);
6748
6749
if (adev->pm.no_fan)
6750
return -ENOENT;
6751
6752
if (adev->pm.fan_pulses_per_revolution == 0)
6753
return -ENOENT;
6754
6755
if ((speed < adev->pm.fan_min_rpm) ||
6756
(speed > adev->pm.fan_max_rpm))
6757
return -EINVAL;
6758
6759
if (adev->pm.dpm.fan.ucode_fan_control)
6760
si_fan_ctrl_stop_smc_fan_control(adev);
6761
6762
tach_period = 60 * xclk * 10000 / (8 * speed);
6763
tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
6764
tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
6765
WREG32(mmCG_TACH_CTRL, tmp);
6766
6767
si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
6768
6769
return 0;
6770
}
6771
#endif
6772
6773
static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
6774
{
6775
struct si_power_info *si_pi = si_get_pi(adev);
6776
u32 tmp;
6777
6778
if (!si_pi->fan_ctrl_is_in_default_mode) {
6779
tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
6780
tmp |= si_pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
6781
WREG32(mmCG_FDO_CTRL2, tmp);
6782
6783
tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
6784
tmp |= si_pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
6785
WREG32(mmCG_FDO_CTRL2, tmp);
6786
si_pi->fan_ctrl_is_in_default_mode = true;
6787
}
6788
}
6789
6790
static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
6791
{
6792
if (adev->pm.dpm.fan.ucode_fan_control) {
6793
si_fan_ctrl_start_smc_fan_control(adev);
6794
si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
6795
}
6796
}
6797
6798
static void si_thermal_initialize(struct amdgpu_device *adev)
6799
{
6800
u32 tmp;
6801
6802
if (adev->pm.fan_pulses_per_revolution) {
6803
tmp = RREG32(mmCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
6804
tmp |= (adev->pm.fan_pulses_per_revolution -1) << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
6805
WREG32(mmCG_TACH_CTRL, tmp);
6806
}
6807
6808
tmp = RREG32(mmCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
6809
tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
6810
WREG32(mmCG_FDO_CTRL2, tmp);
6811
}
6812
6813
static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
6814
{
6815
int ret;
6816
6817
si_thermal_initialize(adev);
6818
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
6819
if (ret)
6820
return ret;
6821
ret = si_thermal_enable_alert(adev, true);
6822
if (ret)
6823
return ret;
6824
if (adev->pm.dpm.fan.ucode_fan_control) {
6825
ret = si_halt_smc(adev);
6826
if (ret)
6827
return ret;
6828
ret = si_thermal_setup_fan_table(adev);
6829
if (ret)
6830
return ret;
6831
ret = si_resume_smc(adev);
6832
if (ret)
6833
return ret;
6834
si_thermal_start_smc_fan_control(adev);
6835
}
6836
6837
return 0;
6838
}
6839
6840
static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
6841
{
6842
if (!adev->pm.no_fan) {
6843
si_fan_ctrl_set_default_mode(adev);
6844
si_fan_ctrl_stop_smc_fan_control(adev);
6845
}
6846
}
6847
6848
static int si_dpm_enable(struct amdgpu_device *adev)
6849
{
6850
struct rv7xx_power_info *pi = rv770_get_pi(adev);
6851
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
6852
struct si_power_info *si_pi = si_get_pi(adev);
6853
struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
6854
int ret;
6855
6856
if (amdgpu_si_is_smc_running(adev))
6857
return -EINVAL;
6858
if (pi->voltage_control || si_pi->voltage_control_svi2)
6859
si_enable_voltage_control(adev, true);
6860
if (pi->mvdd_control)
6861
si_get_mvdd_configuration(adev);
6862
if (pi->voltage_control || si_pi->voltage_control_svi2) {
6863
ret = si_construct_voltage_tables(adev);
6864
if (ret) {
6865
DRM_ERROR("si_construct_voltage_tables failed\n");
6866
return ret;
6867
}
6868
}
6869
if (eg_pi->dynamic_ac_timing) {
6870
ret = si_initialize_mc_reg_table(adev);
6871
if (ret)
6872
eg_pi->dynamic_ac_timing = false;
6873
}
6874
if (pi->dynamic_ss)
6875
si_enable_spread_spectrum(adev, true);
6876
if (pi->thermal_protection)
6877
si_enable_thermal_protection(adev, true);
6878
si_setup_bsp(adev);
6879
si_program_git(adev);
6880
si_program_tp(adev);
6881
si_program_tpp(adev);
6882
si_program_sstp(adev);
6883
si_enable_display_gap(adev);
6884
si_program_vc(adev);
6885
ret = si_upload_firmware(adev);
6886
if (ret) {
6887
DRM_ERROR("si_upload_firmware failed\n");
6888
return ret;
6889
}
6890
ret = si_process_firmware_header(adev);
6891
if (ret) {
6892
DRM_ERROR("si_process_firmware_header failed\n");
6893
return ret;
6894
}
6895
ret = si_initial_switch_from_arb_f0_to_f1(adev);
6896
if (ret) {
6897
DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
6898
return ret;
6899
}
6900
ret = si_init_smc_table(adev);
6901
if (ret) {
6902
DRM_ERROR("si_init_smc_table failed\n");
6903
return ret;
6904
}
6905
ret = si_init_smc_spll_table(adev);
6906
if (ret) {
6907
DRM_ERROR("si_init_smc_spll_table failed\n");
6908
return ret;
6909
}
6910
ret = si_init_arb_table_index(adev);
6911
if (ret) {
6912
DRM_ERROR("si_init_arb_table_index failed\n");
6913
return ret;
6914
}
6915
if (eg_pi->dynamic_ac_timing) {
6916
ret = si_populate_mc_reg_table(adev, boot_ps);
6917
if (ret) {
6918
DRM_ERROR("si_populate_mc_reg_table failed\n");
6919
return ret;
6920
}
6921
}
6922
ret = si_initialize_smc_cac_tables(adev);
6923
if (ret) {
6924
DRM_ERROR("si_initialize_smc_cac_tables failed\n");
6925
return ret;
6926
}
6927
ret = si_initialize_hardware_cac_manager(adev);
6928
if (ret) {
6929
DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
6930
return ret;
6931
}
6932
ret = si_initialize_smc_dte_tables(adev);
6933
if (ret) {
6934
DRM_ERROR("si_initialize_smc_dte_tables failed\n");
6935
return ret;
6936
}
6937
ret = si_populate_smc_tdp_limits(adev, boot_ps);
6938
if (ret) {
6939
DRM_ERROR("si_populate_smc_tdp_limits failed\n");
6940
return ret;
6941
}
6942
ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
6943
if (ret) {
6944
DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
6945
return ret;
6946
}
6947
si_program_response_times(adev);
6948
si_program_ds_registers(adev);
6949
si_dpm_start_smc(adev);
6950
ret = si_notify_smc_display_change(adev, false);
6951
if (ret) {
6952
DRM_ERROR("si_notify_smc_display_change failed\n");
6953
return ret;
6954
}
6955
si_enable_sclk_control(adev, true);
6956
si_start_dpm(adev);
6957
6958
si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
6959
si_thermal_start_thermal_controller(adev);
6960
6961
ni_update_current_ps(adev, boot_ps);
6962
6963
return 0;
6964
}
6965
6966
static int si_set_temperature_range(struct amdgpu_device *adev)
6967
{
6968
int ret;
6969
6970
ret = si_thermal_enable_alert(adev, false);
6971
if (ret)
6972
return ret;
6973
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
6974
if (ret)
6975
return ret;
6976
ret = si_thermal_enable_alert(adev, true);
6977
if (ret)
6978
return ret;
6979
6980
return ret;
6981
}
6982
6983
static void si_dpm_disable(struct amdgpu_device *adev)
6984
{
6985
struct rv7xx_power_info *pi = rv770_get_pi(adev);
6986
struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
6987
6988
if (!amdgpu_si_is_smc_running(adev))
6989
return;
6990
si_thermal_stop_thermal_controller(adev);
6991
si_disable_ulv(adev);
6992
si_clear_vc(adev);
6993
if (pi->thermal_protection)
6994
si_enable_thermal_protection(adev, false);
6995
si_enable_power_containment(adev, boot_ps, false);
6996
si_enable_smc_cac(adev, boot_ps, false);
6997
si_enable_spread_spectrum(adev, false);
6998
si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
6999
si_stop_dpm(adev);
7000
si_reset_to_default(adev);
7001
si_dpm_stop_smc(adev);
7002
si_force_switch_to_arb_f0(adev);
7003
7004
ni_update_current_ps(adev, boot_ps);
7005
}
7006
7007
static int si_dpm_pre_set_power_state(void *handle)
7008
{
7009
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7010
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7011
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
7012
struct amdgpu_ps *new_ps = &requested_ps;
7013
7014
ni_update_requested_ps(adev, new_ps);
7015
si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
7016
7017
return 0;
7018
}
7019
7020
static int si_power_control_set_level(struct amdgpu_device *adev)
7021
{
7022
struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
7023
int ret;
7024
7025
ret = si_restrict_performance_levels_before_switch(adev);
7026
if (ret)
7027
return ret;
7028
ret = si_halt_smc(adev);
7029
if (ret)
7030
return ret;
7031
ret = si_populate_smc_tdp_limits(adev, new_ps);
7032
if (ret)
7033
return ret;
7034
ret = si_populate_smc_tdp_limits_2(adev, new_ps);
7035
if (ret)
7036
return ret;
7037
ret = si_resume_smc(adev);
7038
if (ret)
7039
return ret;
7040
return si_set_sw_state(adev);
7041
}
7042
7043
static void si_set_vce_clock(struct amdgpu_device *adev,
7044
struct amdgpu_ps *new_rps,
7045
struct amdgpu_ps *old_rps)
7046
{
7047
if ((old_rps->evclk != new_rps->evclk) ||
7048
(old_rps->ecclk != new_rps->ecclk)) {
7049
/* Turn the clocks on when encoding, off otherwise */
7050
dev_dbg(adev->dev, "set VCE clocks: %u, %u\n", new_rps->evclk, new_rps->ecclk);
7051
7052
if (new_rps->evclk || new_rps->ecclk) {
7053
amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);
7054
amdgpu_device_ip_set_clockgating_state(
7055
adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE);
7056
amdgpu_device_ip_set_powergating_state(
7057
adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_UNGATE);
7058
} else {
7059
amdgpu_device_ip_set_powergating_state(
7060
adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE);
7061
amdgpu_device_ip_set_clockgating_state(
7062
adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE);
7063
amdgpu_asic_set_vce_clocks(adev, 0, 0);
7064
}
7065
}
7066
}
7067
7068
static int si_dpm_set_power_state(void *handle)
7069
{
7070
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7071
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7072
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
7073
struct amdgpu_ps *old_ps = &eg_pi->current_rps;
7074
int ret;
7075
7076
ret = si_disable_ulv(adev);
7077
if (ret) {
7078
DRM_ERROR("si_disable_ulv failed\n");
7079
return ret;
7080
}
7081
ret = si_restrict_performance_levels_before_switch(adev);
7082
if (ret) {
7083
DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
7084
return ret;
7085
}
7086
if (eg_pi->pcie_performance_request)
7087
si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
7088
ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
7089
ret = si_enable_power_containment(adev, new_ps, false);
7090
if (ret) {
7091
DRM_ERROR("si_enable_power_containment failed\n");
7092
return ret;
7093
}
7094
ret = si_enable_smc_cac(adev, new_ps, false);
7095
if (ret) {
7096
DRM_ERROR("si_enable_smc_cac failed\n");
7097
return ret;
7098
}
7099
ret = si_halt_smc(adev);
7100
if (ret) {
7101
DRM_ERROR("si_halt_smc failed\n");
7102
return ret;
7103
}
7104
ret = si_upload_sw_state(adev, new_ps);
7105
if (ret) {
7106
DRM_ERROR("si_upload_sw_state failed\n");
7107
return ret;
7108
}
7109
ret = si_upload_smc_data(adev);
7110
if (ret) {
7111
DRM_ERROR("si_upload_smc_data failed\n");
7112
return ret;
7113
}
7114
ret = si_upload_ulv_state(adev);
7115
if (ret) {
7116
DRM_ERROR("si_upload_ulv_state failed\n");
7117
return ret;
7118
}
7119
if (eg_pi->dynamic_ac_timing) {
7120
ret = si_upload_mc_reg_table(adev, new_ps);
7121
if (ret) {
7122
DRM_ERROR("si_upload_mc_reg_table failed\n");
7123
return ret;
7124
}
7125
}
7126
ret = si_program_memory_timing_parameters(adev, new_ps);
7127
if (ret) {
7128
DRM_ERROR("si_program_memory_timing_parameters failed\n");
7129
return ret;
7130
}
7131
si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
7132
7133
ret = si_resume_smc(adev);
7134
if (ret) {
7135
DRM_ERROR("si_resume_smc failed\n");
7136
return ret;
7137
}
7138
ret = si_set_sw_state(adev);
7139
if (ret) {
7140
DRM_ERROR("si_set_sw_state failed\n");
7141
return ret;
7142
}
7143
ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
7144
si_set_vce_clock(adev, new_ps, old_ps);
7145
if (eg_pi->pcie_performance_request)
7146
si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
7147
ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
7148
if (ret) {
7149
DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
7150
return ret;
7151
}
7152
ret = si_enable_smc_cac(adev, new_ps, true);
7153
if (ret) {
7154
DRM_ERROR("si_enable_smc_cac failed\n");
7155
return ret;
7156
}
7157
ret = si_enable_power_containment(adev, new_ps, true);
7158
if (ret) {
7159
DRM_ERROR("si_enable_power_containment failed\n");
7160
return ret;
7161
}
7162
7163
ret = si_power_control_set_level(adev);
7164
if (ret) {
7165
DRM_ERROR("si_power_control_set_level failed\n");
7166
return ret;
7167
}
7168
7169
return 0;
7170
}
7171
7172
static void si_dpm_post_set_power_state(void *handle)
7173
{
7174
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7175
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7176
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
7177
7178
ni_update_current_ps(adev, new_ps);
7179
}
7180
7181
#if 0
7182
void si_dpm_reset_asic(struct amdgpu_device *adev)
7183
{
7184
si_restrict_performance_levels_before_switch(adev);
7185
si_disable_ulv(adev);
7186
si_set_boot_state(adev);
7187
}
7188
#endif
7189
7190
static void si_dpm_display_configuration_changed(void *handle)
7191
{
7192
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7193
7194
si_program_display_gap(adev);
7195
}
7196
7197
7198
static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
7199
struct amdgpu_ps *rps,
7200
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
7201
u8 table_rev)
7202
{
7203
rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
7204
rps->class = le16_to_cpu(non_clock_info->usClassification);
7205
rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
7206
7207
if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
7208
rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
7209
rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
7210
} else if (r600_is_uvd_state(rps->class, rps->class2)) {
7211
rps->vclk = RV770_DEFAULT_VCLK_FREQ;
7212
rps->dclk = RV770_DEFAULT_DCLK_FREQ;
7213
} else {
7214
rps->vclk = 0;
7215
rps->dclk = 0;
7216
}
7217
7218
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
7219
adev->pm.dpm.boot_ps = rps;
7220
if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
7221
adev->pm.dpm.uvd_ps = rps;
7222
}
7223
7224
static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
7225
struct amdgpu_ps *rps, int index,
7226
union pplib_clock_info *clock_info)
7227
{
7228
struct rv7xx_power_info *pi = rv770_get_pi(adev);
7229
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7230
struct si_power_info *si_pi = si_get_pi(adev);
7231
struct si_ps *ps = si_get_ps(rps);
7232
u16 leakage_voltage;
7233
struct rv7xx_pl *pl = &ps->performance_levels[index];
7234
int ret;
7235
7236
ps->performance_level_count = index + 1;
7237
7238
pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
7239
pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
7240
pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
7241
pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
7242
7243
pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
7244
pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
7245
pl->flags = le32_to_cpu(clock_info->si.ulFlags);
7246
pl->pcie_gen = si_gen_pcie_gen_support(adev,
7247
si_pi->sys_pcie_mask,
7248
si_pi->boot_pcie_gen,
7249
clock_info->si.ucPCIEGen);
7250
7251
/* patch up vddc if necessary */
7252
ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
7253
&leakage_voltage);
7254
if (ret == 0)
7255
pl->vddc = leakage_voltage;
7256
7257
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
7258
pi->acpi_vddc = pl->vddc;
7259
eg_pi->acpi_vddci = pl->vddci;
7260
si_pi->acpi_pcie_gen = pl->pcie_gen;
7261
}
7262
7263
if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
7264
index == 0) {
7265
/* XXX disable for A0 tahiti */
7266
si_pi->ulv.supported = false;
7267
si_pi->ulv.pl = *pl;
7268
si_pi->ulv.one_pcie_lane_in_ulv = false;
7269
si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
7270
si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
7271
si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
7272
}
7273
7274
if (pi->min_vddc_in_table > pl->vddc)
7275
pi->min_vddc_in_table = pl->vddc;
7276
7277
if (pi->max_vddc_in_table < pl->vddc)
7278
pi->max_vddc_in_table = pl->vddc;
7279
7280
/* patch up boot state */
7281
if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
7282
u16 vddc, vddci, mvdd;
7283
amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
7284
pl->mclk = adev->clock.default_mclk;
7285
pl->sclk = adev->clock.default_sclk;
7286
pl->vddc = vddc;
7287
pl->vddci = vddci;
7288
si_pi->mvdd_bootup_value = mvdd;
7289
}
7290
7291
if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
7292
ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
7293
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
7294
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
7295
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
7296
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
7297
}
7298
}
7299
7300
union pplib_power_state {
7301
struct _ATOM_PPLIB_STATE v1;
7302
struct _ATOM_PPLIB_STATE_V2 v2;
7303
};
7304
7305
static int si_parse_power_table(struct amdgpu_device *adev)
7306
{
7307
struct amdgpu_mode_info *mode_info = &adev->mode_info;
7308
struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
7309
union pplib_power_state *power_state;
7310
int i, j, k, non_clock_array_index, clock_array_index;
7311
union pplib_clock_info *clock_info;
7312
struct _StateArray *state_array;
7313
struct _ClockInfoArray *clock_info_array;
7314
struct _NonClockInfoArray *non_clock_info_array;
7315
union power_info *power_info;
7316
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
7317
u16 data_offset;
7318
u8 frev, crev;
7319
u8 *power_state_offset;
7320
struct si_ps *ps;
7321
7322
if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
7323
&frev, &crev, &data_offset))
7324
return -EINVAL;
7325
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
7326
7327
amdgpu_add_thermal_controller(adev);
7328
7329
state_array = (struct _StateArray *)
7330
(mode_info->atom_context->bios + data_offset +
7331
le16_to_cpu(power_info->pplib.usStateArrayOffset));
7332
clock_info_array = (struct _ClockInfoArray *)
7333
(mode_info->atom_context->bios + data_offset +
7334
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
7335
non_clock_info_array = (struct _NonClockInfoArray *)
7336
(mode_info->atom_context->bios + data_offset +
7337
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
7338
7339
adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
7340
sizeof(struct amdgpu_ps),
7341
GFP_KERNEL);
7342
if (!adev->pm.dpm.ps)
7343
return -ENOMEM;
7344
power_state_offset = (u8 *)state_array->states;
7345
for (adev->pm.dpm.num_ps = 0, i = 0; i < state_array->ucNumEntries; i++) {
7346
u8 *idx;
7347
power_state = (union pplib_power_state *)power_state_offset;
7348
non_clock_array_index = power_state->v2.nonClockInfoIndex;
7349
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
7350
&non_clock_info_array->nonClockInfo[non_clock_array_index];
7351
ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL);
7352
if (ps == NULL)
7353
return -ENOMEM;
7354
adev->pm.dpm.ps[i].ps_priv = ps;
7355
si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
7356
non_clock_info,
7357
non_clock_info_array->ucEntrySize);
7358
k = 0;
7359
idx = (u8 *)&power_state->v2.clockInfoIndex[0];
7360
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
7361
clock_array_index = idx[j];
7362
if (clock_array_index >= clock_info_array->ucNumEntries)
7363
continue;
7364
if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
7365
break;
7366
clock_info = (union pplib_clock_info *)
7367
((u8 *)&clock_info_array->clockInfo[0] +
7368
(clock_array_index * clock_info_array->ucEntrySize));
7369
si_parse_pplib_clock_info(adev,
7370
&adev->pm.dpm.ps[i], k,
7371
clock_info);
7372
k++;
7373
}
7374
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
7375
adev->pm.dpm.num_ps++;
7376
}
7377
7378
/* fill in the vce power states */
7379
for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
7380
u32 sclk, mclk;
7381
clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
7382
clock_info = (union pplib_clock_info *)
7383
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
7384
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
7385
sclk |= clock_info->si.ucEngineClockHigh << 16;
7386
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
7387
mclk |= clock_info->si.ucMemoryClockHigh << 16;
7388
adev->pm.dpm.vce_states[i].sclk = sclk;
7389
adev->pm.dpm.vce_states[i].mclk = mclk;
7390
}
7391
7392
return 0;
7393
}
7394
7395
static int si_dpm_init(struct amdgpu_device *adev)
7396
{
7397
struct rv7xx_power_info *pi;
7398
struct evergreen_power_info *eg_pi;
7399
struct ni_power_info *ni_pi;
7400
struct si_power_info *si_pi;
7401
struct atom_clock_dividers dividers;
7402
int ret;
7403
7404
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
7405
if (si_pi == NULL)
7406
return -ENOMEM;
7407
adev->pm.dpm.priv = si_pi;
7408
ni_pi = &si_pi->ni;
7409
eg_pi = &ni_pi->eg;
7410
pi = &eg_pi->rv7xx;
7411
7412
si_pi->sys_pcie_mask =
7413
adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
7414
si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID;
7415
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
7416
7417
si_set_max_cu_value(adev);
7418
7419
rv770_get_max_vddc(adev);
7420
si_get_leakage_vddc(adev);
7421
si_patch_dependency_tables_based_on_leakage(adev);
7422
7423
pi->acpi_vddc = 0;
7424
eg_pi->acpi_vddci = 0;
7425
pi->min_vddc_in_table = 0;
7426
pi->max_vddc_in_table = 0;
7427
7428
ret = amdgpu_get_platform_caps(adev);
7429
if (ret)
7430
return ret;
7431
7432
ret = amdgpu_parse_extended_power_table(adev);
7433
if (ret)
7434
return ret;
7435
7436
ret = si_parse_power_table(adev);
7437
if (ret)
7438
return ret;
7439
7440
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
7441
kcalloc(4,
7442
sizeof(struct amdgpu_clock_voltage_dependency_entry),
7443
GFP_KERNEL);
7444
if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries)
7445
return -ENOMEM;
7446
7447
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
7448
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
7449
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
7450
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
7451
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
7452
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
7453
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
7454
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
7455
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
7456
7457
if (adev->pm.dpm.voltage_response_time == 0)
7458
adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
7459
if (adev->pm.dpm.backbias_response_time == 0)
7460
adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
7461
7462
ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
7463
0, false, &dividers);
7464
if (ret)
7465
pi->ref_div = dividers.ref_div + 1;
7466
else
7467
pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
7468
7469
eg_pi->smu_uvd_hs = false;
7470
7471
pi->mclk_strobe_mode_threshold = 40000;
7472
if (si_is_special_1gb_platform(adev))
7473
pi->mclk_stutter_mode_threshold = 0;
7474
else
7475
pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
7476
pi->mclk_edc_enable_threshold = 40000;
7477
eg_pi->mclk_edc_wr_enable_threshold = 40000;
7478
7479
ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
7480
7481
pi->voltage_control =
7482
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7483
VOLTAGE_OBJ_GPIO_LUT);
7484
if (!pi->voltage_control) {
7485
si_pi->voltage_control_svi2 =
7486
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7487
VOLTAGE_OBJ_SVID2);
7488
if (si_pi->voltage_control_svi2)
7489
amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7490
&si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
7491
}
7492
7493
pi->mvdd_control =
7494
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
7495
VOLTAGE_OBJ_GPIO_LUT);
7496
7497
eg_pi->vddci_control =
7498
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
7499
VOLTAGE_OBJ_GPIO_LUT);
7500
if (!eg_pi->vddci_control)
7501
si_pi->vddci_control_svi2 =
7502
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
7503
VOLTAGE_OBJ_SVID2);
7504
7505
si_pi->vddc_phase_shed_control =
7506
amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7507
VOLTAGE_OBJ_PHASE_LUT);
7508
7509
rv770_get_engine_memory_ss(adev);
7510
7511
pi->asi = RV770_ASI_DFLT;
7512
pi->pasi = CYPRESS_HASI_DFLT;
7513
pi->vrc = SISLANDS_VRC_DFLT;
7514
7515
eg_pi->sclk_deep_sleep = true;
7516
si_pi->sclk_deep_sleep_above_low = false;
7517
7518
if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
7519
pi->thermal_protection = true;
7520
else
7521
pi->thermal_protection = false;
7522
7523
eg_pi->dynamic_ac_timing = true;
7524
7525
#if defined(CONFIG_ACPI)
7526
eg_pi->pcie_performance_request =
7527
amdgpu_acpi_is_pcie_performance_request_supported(adev);
7528
#else
7529
eg_pi->pcie_performance_request = false;
7530
#endif
7531
7532
si_pi->sram_end = SMC_RAM_END;
7533
7534
adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
7535
adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
7536
adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
7537
adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
7538
adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
7539
adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
7540
adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
7541
7542
si_initialize_powertune_defaults(adev);
7543
7544
/* make sure dc limits are valid */
7545
if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
7546
(adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
7547
adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
7548
adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
7549
7550
si_pi->fan_ctrl_is_in_default_mode = true;
7551
7552
return 0;
7553
}
7554
7555
static void si_dpm_fini(struct amdgpu_device *adev)
7556
{
7557
int i;
7558
7559
if (adev->pm.dpm.ps)
7560
for (i = 0; i < adev->pm.dpm.num_ps; i++)
7561
kfree(adev->pm.dpm.ps[i].ps_priv);
7562
kfree(adev->pm.dpm.ps);
7563
kfree(adev->pm.dpm.priv);
7564
kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
7565
amdgpu_free_extended_power_table(adev);
7566
}
7567
7568
static void si_dpm_debugfs_print_current_performance_level(void *handle,
7569
struct seq_file *m)
7570
{
7571
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7572
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7573
struct amdgpu_ps *rps = &eg_pi->current_rps;
7574
struct si_ps *ps = si_get_ps(rps);
7575
struct rv7xx_pl *pl;
7576
u32 current_index =
7577
(RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
7578
TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
7579
7580
if (current_index >= ps->performance_level_count) {
7581
seq_printf(m, "invalid dpm profile %d\n", current_index);
7582
} else {
7583
pl = &ps->performance_levels[current_index];
7584
seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
7585
seq_printf(m, "vce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk);
7586
seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7587
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
7588
}
7589
}
7590
7591
static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
7592
struct amdgpu_irq_src *source,
7593
unsigned type,
7594
enum amdgpu_interrupt_state state)
7595
{
7596
u32 cg_thermal_int;
7597
7598
switch (type) {
7599
case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
7600
switch (state) {
7601
case AMDGPU_IRQ_STATE_DISABLE:
7602
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
7603
cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
7604
WREG32(mmCG_THERMAL_INT, cg_thermal_int);
7605
break;
7606
case AMDGPU_IRQ_STATE_ENABLE:
7607
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
7608
cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_HIGH_MASK;
7609
WREG32(mmCG_THERMAL_INT, cg_thermal_int);
7610
break;
7611
default:
7612
break;
7613
}
7614
break;
7615
7616
case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
7617
switch (state) {
7618
case AMDGPU_IRQ_STATE_DISABLE:
7619
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
7620
cg_thermal_int |= CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
7621
WREG32(mmCG_THERMAL_INT, cg_thermal_int);
7622
break;
7623
case AMDGPU_IRQ_STATE_ENABLE:
7624
cg_thermal_int = RREG32_SMC(mmCG_THERMAL_INT);
7625
cg_thermal_int &= ~CG_THERMAL_INT__THERM_INT_MASK_LOW_MASK;
7626
WREG32(mmCG_THERMAL_INT, cg_thermal_int);
7627
break;
7628
default:
7629
break;
7630
}
7631
break;
7632
7633
default:
7634
break;
7635
}
7636
return 0;
7637
}
7638
7639
static int si_dpm_process_interrupt(struct amdgpu_device *adev,
7640
struct amdgpu_irq_src *source,
7641
struct amdgpu_iv_entry *entry)
7642
{
7643
bool queue_thermal = false;
7644
7645
if (entry == NULL)
7646
return -EINVAL;
7647
7648
switch (entry->src_id) {
7649
case 230: /* thermal low to high */
7650
DRM_DEBUG("IH: thermal low to high\n");
7651
adev->pm.dpm.thermal.high_to_low = false;
7652
queue_thermal = true;
7653
break;
7654
case 231: /* thermal high to low */
7655
DRM_DEBUG("IH: thermal high to low\n");
7656
adev->pm.dpm.thermal.high_to_low = true;
7657
queue_thermal = true;
7658
break;
7659
default:
7660
break;
7661
}
7662
7663
if (queue_thermal)
7664
schedule_work(&adev->pm.dpm.thermal.work);
7665
7666
return 0;
7667
}
7668
7669
static int si_dpm_late_init(struct amdgpu_ip_block *ip_block)
7670
{
7671
int ret;
7672
struct amdgpu_device *adev = ip_block->adev;
7673
7674
if (!adev->pm.dpm_enabled)
7675
return 0;
7676
7677
ret = si_set_temperature_range(adev);
7678
if (ret)
7679
return ret;
7680
#if 0 //TODO ?
7681
si_dpm_powergate_uvd(adev, true);
7682
#endif
7683
return 0;
7684
}
7685
7686
/**
7687
* si_dpm_init_microcode - load ucode images from disk
7688
*
7689
* @adev: amdgpu_device pointer
7690
*
7691
* Use the firmware interface to load the ucode images into
7692
* the driver (not loaded into hw).
7693
* Returns 0 on success, error on failure.
7694
*/
7695
static int si_dpm_init_microcode(struct amdgpu_device *adev)
7696
{
7697
const char *chip_name;
7698
int err;
7699
7700
DRM_DEBUG("\n");
7701
switch (adev->asic_type) {
7702
case CHIP_TAHITI:
7703
chip_name = "tahiti";
7704
break;
7705
case CHIP_PITCAIRN:
7706
if ((adev->pdev->revision == 0x81) &&
7707
((adev->pdev->device == 0x6810) ||
7708
(adev->pdev->device == 0x6811)))
7709
chip_name = "pitcairn_k";
7710
else
7711
chip_name = "pitcairn";
7712
break;
7713
case CHIP_VERDE:
7714
if (((adev->pdev->device == 0x6820) &&
7715
((adev->pdev->revision == 0x81) ||
7716
(adev->pdev->revision == 0x83))) ||
7717
((adev->pdev->device == 0x6821) &&
7718
((adev->pdev->revision == 0x83) ||
7719
(adev->pdev->revision == 0x87))) ||
7720
((adev->pdev->revision == 0x87) &&
7721
((adev->pdev->device == 0x6823) ||
7722
(adev->pdev->device == 0x682b))))
7723
chip_name = "verde_k";
7724
else
7725
chip_name = "verde";
7726
break;
7727
case CHIP_OLAND:
7728
if (((adev->pdev->revision == 0x81) &&
7729
((adev->pdev->device == 0x6600) ||
7730
(adev->pdev->device == 0x6604) ||
7731
(adev->pdev->device == 0x6605) ||
7732
(adev->pdev->device == 0x6610))) ||
7733
((adev->pdev->revision == 0x83) &&
7734
(adev->pdev->device == 0x6610)))
7735
chip_name = "oland_k";
7736
else
7737
chip_name = "oland";
7738
break;
7739
case CHIP_HAINAN:
7740
if (((adev->pdev->revision == 0x81) &&
7741
(adev->pdev->device == 0x6660)) ||
7742
((adev->pdev->revision == 0x83) &&
7743
((adev->pdev->device == 0x6660) ||
7744
(adev->pdev->device == 0x6663) ||
7745
(adev->pdev->device == 0x6665) ||
7746
(adev->pdev->device == 0x6667))))
7747
chip_name = "hainan_k";
7748
else if ((adev->pdev->revision == 0xc3) &&
7749
(adev->pdev->device == 0x6665))
7750
chip_name = "banks_k_2";
7751
else
7752
chip_name = "hainan";
7753
break;
7754
default: BUG();
7755
}
7756
7757
err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED,
7758
"amdgpu/%s_smc.bin", chip_name);
7759
if (err) {
7760
DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s_smc.bin\"\n",
7761
err, chip_name);
7762
amdgpu_ucode_release(&adev->pm.fw);
7763
}
7764
return err;
7765
}
7766
7767
static int si_dpm_sw_init(struct amdgpu_ip_block *ip_block)
7768
{
7769
int ret;
7770
struct amdgpu_device *adev = ip_block->adev;
7771
7772
ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
7773
if (ret)
7774
return ret;
7775
7776
ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
7777
if (ret)
7778
return ret;
7779
7780
/* default to balanced state */
7781
adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
7782
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
7783
adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
7784
adev->pm.default_sclk = adev->clock.default_sclk;
7785
adev->pm.default_mclk = adev->clock.default_mclk;
7786
adev->pm.current_sclk = adev->clock.default_sclk;
7787
adev->pm.current_mclk = adev->clock.default_mclk;
7788
adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
7789
7790
if (amdgpu_dpm == 0)
7791
return 0;
7792
7793
ret = si_dpm_init_microcode(adev);
7794
if (ret)
7795
return ret;
7796
7797
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
7798
ret = si_dpm_init(adev);
7799
if (ret)
7800
goto dpm_failed;
7801
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
7802
if (amdgpu_dpm == 1)
7803
amdgpu_pm_print_power_states(adev);
7804
drm_info(adev_to_drm(adev), "si dpm initialized\n");
7805
return 0;
7806
7807
dpm_failed:
7808
si_dpm_fini(adev);
7809
drm_err(adev_to_drm(adev), "dpm initialization failed\n");
7810
return ret;
7811
}
7812
7813
static int si_dpm_sw_fini(struct amdgpu_ip_block *ip_block)
7814
{
7815
struct amdgpu_device *adev = ip_block->adev;
7816
7817
flush_work(&adev->pm.dpm.thermal.work);
7818
7819
si_dpm_fini(adev);
7820
7821
return 0;
7822
}
7823
7824
static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
7825
{
7826
int ret;
7827
7828
struct amdgpu_device *adev = ip_block->adev;
7829
7830
if (!amdgpu_dpm)
7831
return 0;
7832
7833
mutex_lock(&adev->pm.mutex);
7834
si_dpm_setup_asic(adev);
7835
ret = si_dpm_enable(adev);
7836
if (ret)
7837
adev->pm.dpm_enabled = false;
7838
else
7839
adev->pm.dpm_enabled = true;
7840
amdgpu_legacy_dpm_compute_clocks(adev);
7841
mutex_unlock(&adev->pm.mutex);
7842
return ret;
7843
}
7844
7845
static int si_dpm_hw_fini(struct amdgpu_ip_block *ip_block)
7846
{
7847
struct amdgpu_device *adev = ip_block->adev;
7848
7849
if (adev->pm.dpm_enabled)
7850
si_dpm_disable(adev);
7851
7852
return 0;
7853
}
7854
7855
static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
7856
{
7857
struct amdgpu_device *adev = ip_block->adev;
7858
7859
cancel_work_sync(&adev->pm.dpm.thermal.work);
7860
7861
if (adev->pm.dpm_enabled) {
7862
mutex_lock(&adev->pm.mutex);
7863
adev->pm.dpm_enabled = false;
7864
/* disable dpm */
7865
si_dpm_disable(adev);
7866
/* reset the power state */
7867
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
7868
mutex_unlock(&adev->pm.mutex);
7869
}
7870
7871
return 0;
7872
}
7873
7874
static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
7875
{
7876
int ret = 0;
7877
struct amdgpu_device *adev = ip_block->adev;
7878
7879
if (!amdgpu_dpm)
7880
return 0;
7881
7882
if (!adev->pm.dpm_enabled) {
7883
/* asic init will reset to the boot state */
7884
mutex_lock(&adev->pm.mutex);
7885
si_dpm_setup_asic(adev);
7886
ret = si_dpm_enable(adev);
7887
if (ret) {
7888
adev->pm.dpm_enabled = false;
7889
} else {
7890
adev->pm.dpm_enabled = true;
7891
amdgpu_legacy_dpm_compute_clocks(adev);
7892
}
7893
mutex_unlock(&adev->pm.mutex);
7894
}
7895
7896
return ret;
7897
}
7898
7899
static bool si_dpm_is_idle(struct amdgpu_ip_block *ip_block)
7900
{
7901
/* XXX */
7902
return true;
7903
}
7904
7905
static int si_dpm_wait_for_idle(struct amdgpu_ip_block *ip_block)
7906
{
7907
/* XXX */
7908
return 0;
7909
}
7910
7911
static int si_dpm_set_clockgating_state(struct amdgpu_ip_block *ip_block,
7912
enum amd_clockgating_state state)
7913
{
7914
return 0;
7915
}
7916
7917
static int si_dpm_set_powergating_state(struct amdgpu_ip_block *ip_block,
7918
enum amd_powergating_state state)
7919
{
7920
return 0;
7921
}
7922
7923
/* get temperature in millidegrees */
7924
static int si_dpm_get_temp(void *handle)
7925
{
7926
u32 temp;
7927
int actual_temp = 0;
7928
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7929
7930
temp = (RREG32(mmCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
7931
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
7932
7933
if (temp & 0x200)
7934
actual_temp = 255;
7935
else
7936
actual_temp = temp & 0x1ff;
7937
7938
actual_temp = (actual_temp * 1000);
7939
7940
return actual_temp;
7941
}
7942
7943
static u32 si_dpm_get_sclk(void *handle, bool low)
7944
{
7945
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7946
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7947
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
7948
7949
if (low)
7950
return requested_state->performance_levels[0].sclk;
7951
else
7952
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
7953
}
7954
7955
static u32 si_dpm_get_mclk(void *handle, bool low)
7956
{
7957
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7958
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7959
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
7960
7961
if (low)
7962
return requested_state->performance_levels[0].mclk;
7963
else
7964
return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
7965
}
7966
7967
static void si_dpm_print_power_state(void *handle,
7968
void *current_ps)
7969
{
7970
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7971
struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
7972
struct si_ps *ps = si_get_ps(rps);
7973
struct rv7xx_pl *pl;
7974
int i;
7975
7976
amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
7977
amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
7978
drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
7979
drm_dbg(adev_to_drm(adev), "\tvce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk);
7980
for (i = 0; i < ps->performance_level_count; i++) {
7981
pl = &ps->performance_levels[i];
7982
drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7983
i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
7984
}
7985
amdgpu_dpm_dbg_print_ps_status(adev, rps);
7986
}
7987
7988
static int si_dpm_early_init(struct amdgpu_ip_block *ip_block)
7989
{
7990
7991
struct amdgpu_device *adev = ip_block->adev;
7992
7993
adev->powerplay.pp_funcs = &si_dpm_funcs;
7994
adev->powerplay.pp_handle = adev;
7995
si_dpm_set_irq_funcs(adev);
7996
return 0;
7997
}
7998
7999
static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
8000
const struct rv7xx_pl *si_cpl2)
8001
{
8002
return ((si_cpl1->mclk == si_cpl2->mclk) &&
8003
(si_cpl1->sclk == si_cpl2->sclk) &&
8004
(si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
8005
(si_cpl1->vddc == si_cpl2->vddc) &&
8006
(si_cpl1->vddci == si_cpl2->vddci));
8007
}
8008
8009
static int si_check_state_equal(void *handle,
8010
void *current_ps,
8011
void *request_ps,
8012
bool *equal)
8013
{
8014
struct si_ps *si_cps;
8015
struct si_ps *si_rps;
8016
int i;
8017
struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
8018
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
8019
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
8020
8021
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
8022
return -EINVAL;
8023
8024
si_cps = si_get_ps((struct amdgpu_ps *)cps);
8025
si_rps = si_get_ps((struct amdgpu_ps *)rps);
8026
8027
if (si_cps == NULL) {
8028
printk("si_cps is NULL\n");
8029
*equal = false;
8030
return 0;
8031
}
8032
8033
if (si_cps->performance_level_count != si_rps->performance_level_count) {
8034
*equal = false;
8035
return 0;
8036
}
8037
8038
for (i = 0; i < si_cps->performance_level_count; i++) {
8039
if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
8040
&(si_rps->performance_levels[i]))) {
8041
*equal = false;
8042
return 0;
8043
}
8044
}
8045
8046
/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
8047
*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
8048
*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
8049
8050
return 0;
8051
}
8052
8053
static int si_dpm_read_sensor(void *handle, int idx,
8054
void *value, int *size)
8055
{
8056
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
8057
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
8058
struct amdgpu_ps *rps = &eg_pi->current_rps;
8059
struct si_ps *ps = si_get_ps(rps);
8060
uint32_t sclk, mclk;
8061
u32 pl_index =
8062
(RREG32(mmTARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX_MASK) >>
8063
TARGET_AND_CURRENT_PROFILE_INDEX__CURRENT_STATE_INDEX__SHIFT;
8064
8065
/* size must be at least 4 bytes for all sensors */
8066
if (*size < 4)
8067
return -EINVAL;
8068
8069
switch (idx) {
8070
case AMDGPU_PP_SENSOR_GFX_SCLK:
8071
if (pl_index < ps->performance_level_count) {
8072
sclk = ps->performance_levels[pl_index].sclk;
8073
*((uint32_t *)value) = sclk;
8074
*size = 4;
8075
return 0;
8076
}
8077
return -EINVAL;
8078
case AMDGPU_PP_SENSOR_GFX_MCLK:
8079
if (pl_index < ps->performance_level_count) {
8080
mclk = ps->performance_levels[pl_index].mclk;
8081
*((uint32_t *)value) = mclk;
8082
*size = 4;
8083
return 0;
8084
}
8085
return -EINVAL;
8086
case AMDGPU_PP_SENSOR_GPU_TEMP:
8087
*((uint32_t *)value) = si_dpm_get_temp(adev);
8088
*size = 4;
8089
return 0;
8090
default:
8091
return -EOPNOTSUPP;
8092
}
8093
}
8094
8095
static const struct amd_ip_funcs si_dpm_ip_funcs = {
8096
.name = "si_dpm",
8097
.early_init = si_dpm_early_init,
8098
.late_init = si_dpm_late_init,
8099
.sw_init = si_dpm_sw_init,
8100
.sw_fini = si_dpm_sw_fini,
8101
.hw_init = si_dpm_hw_init,
8102
.hw_fini = si_dpm_hw_fini,
8103
.suspend = si_dpm_suspend,
8104
.resume = si_dpm_resume,
8105
.is_idle = si_dpm_is_idle,
8106
.wait_for_idle = si_dpm_wait_for_idle,
8107
.set_clockgating_state = si_dpm_set_clockgating_state,
8108
.set_powergating_state = si_dpm_set_powergating_state,
8109
};
8110
8111
const struct amdgpu_ip_block_version si_smu_ip_block =
8112
{
8113
.type = AMD_IP_BLOCK_TYPE_SMC,
8114
.major = 6,
8115
.minor = 0,
8116
.rev = 0,
8117
.funcs = &si_dpm_ip_funcs,
8118
};
8119
8120
static const struct amd_pm_funcs si_dpm_funcs = {
8121
.pre_set_power_state = &si_dpm_pre_set_power_state,
8122
.set_power_state = &si_dpm_set_power_state,
8123
.post_set_power_state = &si_dpm_post_set_power_state,
8124
.display_configuration_changed = &si_dpm_display_configuration_changed,
8125
.get_sclk = &si_dpm_get_sclk,
8126
.get_mclk = &si_dpm_get_mclk,
8127
.print_power_state = &si_dpm_print_power_state,
8128
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
8129
.force_performance_level = &si_dpm_force_performance_level,
8130
.vblank_too_short = &si_dpm_vblank_too_short,
8131
.set_fan_control_mode = &si_dpm_set_fan_control_mode,
8132
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
8133
.set_fan_speed_pwm = &si_dpm_set_fan_speed_pwm,
8134
.get_fan_speed_pwm = &si_dpm_get_fan_speed_pwm,
8135
.check_state_equal = &si_check_state_equal,
8136
.get_vce_clock_state = amdgpu_get_vce_clock_state,
8137
.read_sensor = &si_dpm_read_sensor,
8138
.pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,
8139
};
8140
8141
static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
8142
.set = si_dpm_set_interrupt_state,
8143
.process = si_dpm_process_interrupt,
8144
};
8145
8146
static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
8147
{
8148
adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
8149
adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
8150
}
8151
8152
8153