Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
awilliam
GitHub Repository: awilliam/linux-vfio
Path: blob/master/drivers/gpu/drm/radeon/r600.c
15113 views
1
/*
2
* Copyright 2008 Advanced Micro Devices, Inc.
3
* Copyright 2008 Red Hat Inc.
4
* Copyright 2009 Jerome Glisse.
5
*
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the "Software"),
8
* to deal in the Software without restriction, including without limitation
9
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
* and/or sell copies of the Software, and to permit persons to whom the
11
* Software is furnished to do so, subject to the following conditions:
12
*
13
* The above copyright notice and this permission notice shall be included in
14
* all copies or substantial portions of the Software.
15
*
16
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
* OTHER DEALINGS IN THE SOFTWARE.
23
*
24
* Authors: Dave Airlie
25
* Alex Deucher
26
* Jerome Glisse
27
*/
28
#include <linux/slab.h>
29
#include <linux/seq_file.h>
30
#include <linux/firmware.h>
31
#include <linux/platform_device.h>
32
#include "drmP.h"
33
#include "radeon_drm.h"
34
#include "radeon.h"
35
#include "radeon_asic.h"
36
#include "radeon_mode.h"
37
#include "r600d.h"
38
#include "atom.h"
39
#include "avivod.h"
40
41
#define PFP_UCODE_SIZE 576
42
#define PM4_UCODE_SIZE 1792
43
#define RLC_UCODE_SIZE 768
44
#define R700_PFP_UCODE_SIZE 848
45
#define R700_PM4_UCODE_SIZE 1360
46
#define R700_RLC_UCODE_SIZE 1024
47
#define EVERGREEN_PFP_UCODE_SIZE 1120
48
#define EVERGREEN_PM4_UCODE_SIZE 1376
49
#define EVERGREEN_RLC_UCODE_SIZE 768
50
#define CAYMAN_RLC_UCODE_SIZE 1024
51
52
/* Firmware Names */
53
MODULE_FIRMWARE("radeon/R600_pfp.bin");
54
MODULE_FIRMWARE("radeon/R600_me.bin");
55
MODULE_FIRMWARE("radeon/RV610_pfp.bin");
56
MODULE_FIRMWARE("radeon/RV610_me.bin");
57
MODULE_FIRMWARE("radeon/RV630_pfp.bin");
58
MODULE_FIRMWARE("radeon/RV630_me.bin");
59
MODULE_FIRMWARE("radeon/RV620_pfp.bin");
60
MODULE_FIRMWARE("radeon/RV620_me.bin");
61
MODULE_FIRMWARE("radeon/RV635_pfp.bin");
62
MODULE_FIRMWARE("radeon/RV635_me.bin");
63
MODULE_FIRMWARE("radeon/RV670_pfp.bin");
64
MODULE_FIRMWARE("radeon/RV670_me.bin");
65
MODULE_FIRMWARE("radeon/RS780_pfp.bin");
66
MODULE_FIRMWARE("radeon/RS780_me.bin");
67
MODULE_FIRMWARE("radeon/RV770_pfp.bin");
68
MODULE_FIRMWARE("radeon/RV770_me.bin");
69
MODULE_FIRMWARE("radeon/RV730_pfp.bin");
70
MODULE_FIRMWARE("radeon/RV730_me.bin");
71
MODULE_FIRMWARE("radeon/RV710_pfp.bin");
72
MODULE_FIRMWARE("radeon/RV710_me.bin");
73
MODULE_FIRMWARE("radeon/R600_rlc.bin");
74
MODULE_FIRMWARE("radeon/R700_rlc.bin");
75
MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
76
MODULE_FIRMWARE("radeon/CEDAR_me.bin");
77
MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
78
MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
79
MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
80
MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
81
MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
82
MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
83
MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
84
MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
85
MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
86
MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
88
MODULE_FIRMWARE("radeon/PALM_me.bin");
89
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
90
MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
91
MODULE_FIRMWARE("radeon/SUMO_me.bin");
92
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
93
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
94
95
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
96
97
/* r600,rv610,rv630,rv620,rv635,rv670 */
98
int r600_mc_wait_for_idle(struct radeon_device *rdev);
99
void r600_gpu_init(struct radeon_device *rdev);
100
void r600_fini(struct radeon_device *rdev);
101
void r600_irq_disable(struct radeon_device *rdev);
102
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
103
104
/* get temperature in millidegrees */
105
int rv6xx_get_temp(struct radeon_device *rdev)
106
{
107
u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
108
ASIC_T_SHIFT;
109
int actual_temp = temp & 0xff;
110
111
if (temp & 0x100)
112
actual_temp -= 256;
113
114
return actual_temp * 1000;
115
}
116
117
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
118
{
119
int i;
120
121
rdev->pm.dynpm_can_upclock = true;
122
rdev->pm.dynpm_can_downclock = true;
123
124
/* power state array is low to high, default is first */
125
if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
126
int min_power_state_index = 0;
127
128
if (rdev->pm.num_power_states > 2)
129
min_power_state_index = 1;
130
131
switch (rdev->pm.dynpm_planned_action) {
132
case DYNPM_ACTION_MINIMUM:
133
rdev->pm.requested_power_state_index = min_power_state_index;
134
rdev->pm.requested_clock_mode_index = 0;
135
rdev->pm.dynpm_can_downclock = false;
136
break;
137
case DYNPM_ACTION_DOWNCLOCK:
138
if (rdev->pm.current_power_state_index == min_power_state_index) {
139
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
140
rdev->pm.dynpm_can_downclock = false;
141
} else {
142
if (rdev->pm.active_crtc_count > 1) {
143
for (i = 0; i < rdev->pm.num_power_states; i++) {
144
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
145
continue;
146
else if (i >= rdev->pm.current_power_state_index) {
147
rdev->pm.requested_power_state_index =
148
rdev->pm.current_power_state_index;
149
break;
150
} else {
151
rdev->pm.requested_power_state_index = i;
152
break;
153
}
154
}
155
} else {
156
if (rdev->pm.current_power_state_index == 0)
157
rdev->pm.requested_power_state_index =
158
rdev->pm.num_power_states - 1;
159
else
160
rdev->pm.requested_power_state_index =
161
rdev->pm.current_power_state_index - 1;
162
}
163
}
164
rdev->pm.requested_clock_mode_index = 0;
165
/* don't use the power state if crtcs are active and no display flag is set */
166
if ((rdev->pm.active_crtc_count > 0) &&
167
(rdev->pm.power_state[rdev->pm.requested_power_state_index].
168
clock_info[rdev->pm.requested_clock_mode_index].flags &
169
RADEON_PM_MODE_NO_DISPLAY)) {
170
rdev->pm.requested_power_state_index++;
171
}
172
break;
173
case DYNPM_ACTION_UPCLOCK:
174
if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
175
rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
176
rdev->pm.dynpm_can_upclock = false;
177
} else {
178
if (rdev->pm.active_crtc_count > 1) {
179
for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
180
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
181
continue;
182
else if (i <= rdev->pm.current_power_state_index) {
183
rdev->pm.requested_power_state_index =
184
rdev->pm.current_power_state_index;
185
break;
186
} else {
187
rdev->pm.requested_power_state_index = i;
188
break;
189
}
190
}
191
} else
192
rdev->pm.requested_power_state_index =
193
rdev->pm.current_power_state_index + 1;
194
}
195
rdev->pm.requested_clock_mode_index = 0;
196
break;
197
case DYNPM_ACTION_DEFAULT:
198
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
199
rdev->pm.requested_clock_mode_index = 0;
200
rdev->pm.dynpm_can_upclock = false;
201
break;
202
case DYNPM_ACTION_NONE:
203
default:
204
DRM_ERROR("Requested mode for not defined action\n");
205
return;
206
}
207
} else {
208
/* XXX select a power state based on AC/DC, single/dualhead, etc. */
209
/* for now just select the first power state and switch between clock modes */
210
/* power state array is low to high, default is first (0) */
211
if (rdev->pm.active_crtc_count > 1) {
212
rdev->pm.requested_power_state_index = -1;
213
/* start at 1 as we don't want the default mode */
214
for (i = 1; i < rdev->pm.num_power_states; i++) {
215
if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
216
continue;
217
else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
218
(rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
219
rdev->pm.requested_power_state_index = i;
220
break;
221
}
222
}
223
/* if nothing selected, grab the default state. */
224
if (rdev->pm.requested_power_state_index == -1)
225
rdev->pm.requested_power_state_index = 0;
226
} else
227
rdev->pm.requested_power_state_index = 1;
228
229
switch (rdev->pm.dynpm_planned_action) {
230
case DYNPM_ACTION_MINIMUM:
231
rdev->pm.requested_clock_mode_index = 0;
232
rdev->pm.dynpm_can_downclock = false;
233
break;
234
case DYNPM_ACTION_DOWNCLOCK:
235
if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
236
if (rdev->pm.current_clock_mode_index == 0) {
237
rdev->pm.requested_clock_mode_index = 0;
238
rdev->pm.dynpm_can_downclock = false;
239
} else
240
rdev->pm.requested_clock_mode_index =
241
rdev->pm.current_clock_mode_index - 1;
242
} else {
243
rdev->pm.requested_clock_mode_index = 0;
244
rdev->pm.dynpm_can_downclock = false;
245
}
246
/* don't use the power state if crtcs are active and no display flag is set */
247
if ((rdev->pm.active_crtc_count > 0) &&
248
(rdev->pm.power_state[rdev->pm.requested_power_state_index].
249
clock_info[rdev->pm.requested_clock_mode_index].flags &
250
RADEON_PM_MODE_NO_DISPLAY)) {
251
rdev->pm.requested_clock_mode_index++;
252
}
253
break;
254
case DYNPM_ACTION_UPCLOCK:
255
if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
256
if (rdev->pm.current_clock_mode_index ==
257
(rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
258
rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
259
rdev->pm.dynpm_can_upclock = false;
260
} else
261
rdev->pm.requested_clock_mode_index =
262
rdev->pm.current_clock_mode_index + 1;
263
} else {
264
rdev->pm.requested_clock_mode_index =
265
rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
266
rdev->pm.dynpm_can_upclock = false;
267
}
268
break;
269
case DYNPM_ACTION_DEFAULT:
270
rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
271
rdev->pm.requested_clock_mode_index = 0;
272
rdev->pm.dynpm_can_upclock = false;
273
break;
274
case DYNPM_ACTION_NONE:
275
default:
276
DRM_ERROR("Requested mode for not defined action\n");
277
return;
278
}
279
}
280
281
DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
282
rdev->pm.power_state[rdev->pm.requested_power_state_index].
283
clock_info[rdev->pm.requested_clock_mode_index].sclk,
284
rdev->pm.power_state[rdev->pm.requested_power_state_index].
285
clock_info[rdev->pm.requested_clock_mode_index].mclk,
286
rdev->pm.power_state[rdev->pm.requested_power_state_index].
287
pcie_lanes);
288
}
289
290
static int r600_pm_get_type_index(struct radeon_device *rdev,
291
enum radeon_pm_state_type ps_type,
292
int instance)
293
{
294
int i;
295
int found_instance = -1;
296
297
for (i = 0; i < rdev->pm.num_power_states; i++) {
298
if (rdev->pm.power_state[i].type == ps_type) {
299
found_instance++;
300
if (found_instance == instance)
301
return i;
302
}
303
}
304
/* return default if no match */
305
return rdev->pm.default_power_state_index;
306
}
307
308
void rs780_pm_init_profile(struct radeon_device *rdev)
309
{
310
if (rdev->pm.num_power_states == 2) {
311
/* default */
312
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
313
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
314
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
315
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
316
/* low sh */
317
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
318
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
319
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
320
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
321
/* mid sh */
322
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
323
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
324
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
325
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
326
/* high sh */
327
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
328
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
329
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
330
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
331
/* low mh */
332
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
333
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
334
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
335
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
336
/* mid mh */
337
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
338
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
339
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
340
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
341
/* high mh */
342
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
343
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
344
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
345
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
346
} else if (rdev->pm.num_power_states == 3) {
347
/* default */
348
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
349
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
350
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
351
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
352
/* low sh */
353
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
354
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
355
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
356
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
357
/* mid sh */
358
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
359
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
360
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
361
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
362
/* high sh */
363
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
364
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
365
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
366
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
367
/* low mh */
368
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
369
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
370
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
371
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
372
/* mid mh */
373
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
374
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
375
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
376
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
377
/* high mh */
378
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
379
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
380
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
381
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
382
} else {
383
/* default */
384
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
385
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
386
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
387
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
388
/* low sh */
389
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
390
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
391
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
392
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
393
/* mid sh */
394
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
395
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
396
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
397
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
398
/* high sh */
399
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
400
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
401
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
402
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
403
/* low mh */
404
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
405
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
406
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
407
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
408
/* mid mh */
409
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
410
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
411
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
412
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
413
/* high mh */
414
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
415
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
416
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
417
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
418
}
419
}
420
421
void r600_pm_init_profile(struct radeon_device *rdev)
422
{
423
if (rdev->family == CHIP_R600) {
424
/* XXX */
425
/* default */
426
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
427
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
428
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
429
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
430
/* low sh */
431
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
432
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
433
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
434
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
435
/* mid sh */
436
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
437
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
438
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
439
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
440
/* high sh */
441
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
442
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
443
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
444
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
445
/* low mh */
446
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
447
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
448
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
449
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
450
/* mid mh */
451
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
452
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
453
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
454
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
455
/* high mh */
456
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
457
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
458
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
459
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
460
} else {
461
if (rdev->pm.num_power_states < 4) {
462
/* default */
463
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
464
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
465
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
466
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
467
/* low sh */
468
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
469
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
470
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
471
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
472
/* mid sh */
473
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
474
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
475
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
476
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
477
/* high sh */
478
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
479
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
480
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
481
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
482
/* low mh */
483
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
484
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
485
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
486
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
487
/* low mh */
488
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
489
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
490
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
491
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
492
/* high mh */
493
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
494
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
495
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
496
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
497
} else {
498
/* default */
499
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
500
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
501
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
502
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
503
/* low sh */
504
if (rdev->flags & RADEON_IS_MOBILITY) {
505
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
506
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
507
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
508
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
509
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
510
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
511
} else {
512
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
513
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
514
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
515
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
516
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
517
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
518
}
519
/* mid sh */
520
if (rdev->flags & RADEON_IS_MOBILITY) {
521
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
522
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
523
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
524
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
525
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
526
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
527
} else {
528
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
529
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
530
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
531
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
532
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
533
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
534
}
535
/* high sh */
536
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
537
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
538
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
539
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
540
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
541
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
542
/* low mh */
543
if (rdev->flags & RADEON_IS_MOBILITY) {
544
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
545
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
546
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
547
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
548
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
549
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
550
} else {
551
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
552
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
553
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
554
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
555
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
556
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
557
}
558
/* mid mh */
559
if (rdev->flags & RADEON_IS_MOBILITY) {
560
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
561
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
562
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
563
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
564
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
565
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
566
} else {
567
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
568
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
569
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
570
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
571
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
572
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
573
}
574
/* high mh */
575
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
576
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
577
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
578
r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
579
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
580
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
581
}
582
}
583
}
584
585
void r600_pm_misc(struct radeon_device *rdev)
586
{
587
int req_ps_idx = rdev->pm.requested_power_state_index;
588
int req_cm_idx = rdev->pm.requested_clock_mode_index;
589
struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
590
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
591
592
if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
593
/* 0xff01 is a flag rather then an actual voltage */
594
if (voltage->voltage == 0xff01)
595
return;
596
if (voltage->voltage != rdev->pm.current_vddc) {
597
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
598
rdev->pm.current_vddc = voltage->voltage;
599
DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
600
}
601
}
602
}
603
604
bool r600_gui_idle(struct radeon_device *rdev)
605
{
606
if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
607
return false;
608
else
609
return true;
610
}
611
612
/* hpd for digital panel detect/disconnect */
613
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
614
{
615
bool connected = false;
616
617
if (ASIC_IS_DCE3(rdev)) {
618
switch (hpd) {
619
case RADEON_HPD_1:
620
if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
621
connected = true;
622
break;
623
case RADEON_HPD_2:
624
if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
625
connected = true;
626
break;
627
case RADEON_HPD_3:
628
if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
629
connected = true;
630
break;
631
case RADEON_HPD_4:
632
if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
633
connected = true;
634
break;
635
/* DCE 3.2 */
636
case RADEON_HPD_5:
637
if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
638
connected = true;
639
break;
640
case RADEON_HPD_6:
641
if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
642
connected = true;
643
break;
644
default:
645
break;
646
}
647
} else {
648
switch (hpd) {
649
case RADEON_HPD_1:
650
if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
651
connected = true;
652
break;
653
case RADEON_HPD_2:
654
if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
655
connected = true;
656
break;
657
case RADEON_HPD_3:
658
if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
659
connected = true;
660
break;
661
default:
662
break;
663
}
664
}
665
return connected;
666
}
667
668
void r600_hpd_set_polarity(struct radeon_device *rdev,
669
enum radeon_hpd_id hpd)
670
{
671
u32 tmp;
672
bool connected = r600_hpd_sense(rdev, hpd);
673
674
if (ASIC_IS_DCE3(rdev)) {
675
switch (hpd) {
676
case RADEON_HPD_1:
677
tmp = RREG32(DC_HPD1_INT_CONTROL);
678
if (connected)
679
tmp &= ~DC_HPDx_INT_POLARITY;
680
else
681
tmp |= DC_HPDx_INT_POLARITY;
682
WREG32(DC_HPD1_INT_CONTROL, tmp);
683
break;
684
case RADEON_HPD_2:
685
tmp = RREG32(DC_HPD2_INT_CONTROL);
686
if (connected)
687
tmp &= ~DC_HPDx_INT_POLARITY;
688
else
689
tmp |= DC_HPDx_INT_POLARITY;
690
WREG32(DC_HPD2_INT_CONTROL, tmp);
691
break;
692
case RADEON_HPD_3:
693
tmp = RREG32(DC_HPD3_INT_CONTROL);
694
if (connected)
695
tmp &= ~DC_HPDx_INT_POLARITY;
696
else
697
tmp |= DC_HPDx_INT_POLARITY;
698
WREG32(DC_HPD3_INT_CONTROL, tmp);
699
break;
700
case RADEON_HPD_4:
701
tmp = RREG32(DC_HPD4_INT_CONTROL);
702
if (connected)
703
tmp &= ~DC_HPDx_INT_POLARITY;
704
else
705
tmp |= DC_HPDx_INT_POLARITY;
706
WREG32(DC_HPD4_INT_CONTROL, tmp);
707
break;
708
case RADEON_HPD_5:
709
tmp = RREG32(DC_HPD5_INT_CONTROL);
710
if (connected)
711
tmp &= ~DC_HPDx_INT_POLARITY;
712
else
713
tmp |= DC_HPDx_INT_POLARITY;
714
WREG32(DC_HPD5_INT_CONTROL, tmp);
715
break;
716
/* DCE 3.2 */
717
case RADEON_HPD_6:
718
tmp = RREG32(DC_HPD6_INT_CONTROL);
719
if (connected)
720
tmp &= ~DC_HPDx_INT_POLARITY;
721
else
722
tmp |= DC_HPDx_INT_POLARITY;
723
WREG32(DC_HPD6_INT_CONTROL, tmp);
724
break;
725
default:
726
break;
727
}
728
} else {
729
switch (hpd) {
730
case RADEON_HPD_1:
731
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
732
if (connected)
733
tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
734
else
735
tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
736
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
737
break;
738
case RADEON_HPD_2:
739
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
740
if (connected)
741
tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
742
else
743
tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
744
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
745
break;
746
case RADEON_HPD_3:
747
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
748
if (connected)
749
tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
750
else
751
tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
752
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
753
break;
754
default:
755
break;
756
}
757
}
758
}
759
760
void r600_hpd_init(struct radeon_device *rdev)
761
{
762
struct drm_device *dev = rdev->ddev;
763
struct drm_connector *connector;
764
765
if (ASIC_IS_DCE3(rdev)) {
766
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
767
if (ASIC_IS_DCE32(rdev))
768
tmp |= DC_HPDx_EN;
769
770
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
771
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
772
switch (radeon_connector->hpd.hpd) {
773
case RADEON_HPD_1:
774
WREG32(DC_HPD1_CONTROL, tmp);
775
rdev->irq.hpd[0] = true;
776
break;
777
case RADEON_HPD_2:
778
WREG32(DC_HPD2_CONTROL, tmp);
779
rdev->irq.hpd[1] = true;
780
break;
781
case RADEON_HPD_3:
782
WREG32(DC_HPD3_CONTROL, tmp);
783
rdev->irq.hpd[2] = true;
784
break;
785
case RADEON_HPD_4:
786
WREG32(DC_HPD4_CONTROL, tmp);
787
rdev->irq.hpd[3] = true;
788
break;
789
/* DCE 3.2 */
790
case RADEON_HPD_5:
791
WREG32(DC_HPD5_CONTROL, tmp);
792
rdev->irq.hpd[4] = true;
793
break;
794
case RADEON_HPD_6:
795
WREG32(DC_HPD6_CONTROL, tmp);
796
rdev->irq.hpd[5] = true;
797
break;
798
default:
799
break;
800
}
801
}
802
} else {
803
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
804
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
805
switch (radeon_connector->hpd.hpd) {
806
case RADEON_HPD_1:
807
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
808
rdev->irq.hpd[0] = true;
809
break;
810
case RADEON_HPD_2:
811
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
812
rdev->irq.hpd[1] = true;
813
break;
814
case RADEON_HPD_3:
815
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
816
rdev->irq.hpd[2] = true;
817
break;
818
default:
819
break;
820
}
821
}
822
}
823
if (rdev->irq.installed)
824
r600_irq_set(rdev);
825
}
826
827
void r600_hpd_fini(struct radeon_device *rdev)
828
{
829
struct drm_device *dev = rdev->ddev;
830
struct drm_connector *connector;
831
832
if (ASIC_IS_DCE3(rdev)) {
833
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
834
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
835
switch (radeon_connector->hpd.hpd) {
836
case RADEON_HPD_1:
837
WREG32(DC_HPD1_CONTROL, 0);
838
rdev->irq.hpd[0] = false;
839
break;
840
case RADEON_HPD_2:
841
WREG32(DC_HPD2_CONTROL, 0);
842
rdev->irq.hpd[1] = false;
843
break;
844
case RADEON_HPD_3:
845
WREG32(DC_HPD3_CONTROL, 0);
846
rdev->irq.hpd[2] = false;
847
break;
848
case RADEON_HPD_4:
849
WREG32(DC_HPD4_CONTROL, 0);
850
rdev->irq.hpd[3] = false;
851
break;
852
/* DCE 3.2 */
853
case RADEON_HPD_5:
854
WREG32(DC_HPD5_CONTROL, 0);
855
rdev->irq.hpd[4] = false;
856
break;
857
case RADEON_HPD_6:
858
WREG32(DC_HPD6_CONTROL, 0);
859
rdev->irq.hpd[5] = false;
860
break;
861
default:
862
break;
863
}
864
}
865
} else {
866
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
867
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
868
switch (radeon_connector->hpd.hpd) {
869
case RADEON_HPD_1:
870
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
871
rdev->irq.hpd[0] = false;
872
break;
873
case RADEON_HPD_2:
874
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
875
rdev->irq.hpd[1] = false;
876
break;
877
case RADEON_HPD_3:
878
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
879
rdev->irq.hpd[2] = false;
880
break;
881
default:
882
break;
883
}
884
}
885
}
886
}
887
888
/*
889
* R600 PCIE GART
890
*/
891
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
892
{
893
unsigned i;
894
u32 tmp;
895
896
/* flush hdp cache so updates hit vram */
897
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
898
!(rdev->flags & RADEON_IS_AGP)) {
899
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
900
u32 tmp;
901
902
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
903
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
904
* This seems to cause problems on some AGP cards. Just use the old
905
* method for them.
906
*/
907
WREG32(HDP_DEBUG1, 0);
908
tmp = readl((void __iomem *)ptr);
909
} else
910
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
911
912
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
913
WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
914
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
915
for (i = 0; i < rdev->usec_timeout; i++) {
916
/* read MC_STATUS */
917
tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
918
tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
919
if (tmp == 2) {
920
printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
921
return;
922
}
923
if (tmp) {
924
return;
925
}
926
udelay(1);
927
}
928
}
929
930
int r600_pcie_gart_init(struct radeon_device *rdev)
931
{
932
int r;
933
934
if (rdev->gart.table.vram.robj) {
935
WARN(1, "R600 PCIE GART already initialized\n");
936
return 0;
937
}
938
/* Initialize common gart structure */
939
r = radeon_gart_init(rdev);
940
if (r)
941
return r;
942
rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
943
return radeon_gart_table_vram_alloc(rdev);
944
}
945
946
int r600_pcie_gart_enable(struct radeon_device *rdev)
947
{
948
u32 tmp;
949
int r, i;
950
951
if (rdev->gart.table.vram.robj == NULL) {
952
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
953
return -EINVAL;
954
}
955
r = radeon_gart_table_vram_pin(rdev);
956
if (r)
957
return r;
958
radeon_gart_restore(rdev);
959
960
/* Setup L2 cache */
961
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
962
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
963
EFFECTIVE_L2_QUEUE_SIZE(7));
964
WREG32(VM_L2_CNTL2, 0);
965
WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
966
/* Setup TLB control */
967
tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
968
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
969
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
970
ENABLE_WAIT_L2_QUERY;
971
WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
972
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
973
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
974
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
975
WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
976
WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
977
WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
978
WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
979
WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
980
WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
981
WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
982
WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
983
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
984
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
985
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
986
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
987
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
988
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
989
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
990
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
991
(u32)(rdev->dummy_page.addr >> 12));
992
for (i = 1; i < 7; i++)
993
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
994
995
r600_pcie_gart_tlb_flush(rdev);
996
rdev->gart.ready = true;
997
return 0;
998
}
999
1000
void r600_pcie_gart_disable(struct radeon_device *rdev)
1001
{
1002
u32 tmp;
1003
int i, r;
1004
1005
/* Disable all tables */
1006
for (i = 0; i < 7; i++)
1007
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1008
1009
/* Disable L2 cache */
1010
WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
1011
EFFECTIVE_L2_QUEUE_SIZE(7));
1012
WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1013
/* Setup L1 TLB control */
1014
tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1015
ENABLE_WAIT_L2_QUERY;
1016
WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1017
WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1018
WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1019
WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1020
WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1021
WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1022
WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1023
WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1024
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1025
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1026
WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1027
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1028
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1029
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1030
if (rdev->gart.table.vram.robj) {
1031
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1032
if (likely(r == 0)) {
1033
radeon_bo_kunmap(rdev->gart.table.vram.robj);
1034
radeon_bo_unpin(rdev->gart.table.vram.robj);
1035
radeon_bo_unreserve(rdev->gart.table.vram.robj);
1036
}
1037
}
1038
}
1039
1040
void r600_pcie_gart_fini(struct radeon_device *rdev)
1041
{
1042
radeon_gart_fini(rdev);
1043
r600_pcie_gart_disable(rdev);
1044
radeon_gart_table_vram_free(rdev);
1045
}
1046
1047
void r600_agp_enable(struct radeon_device *rdev)
1048
{
1049
u32 tmp;
1050
int i;
1051
1052
/* Setup L2 cache */
1053
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1054
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1055
EFFECTIVE_L2_QUEUE_SIZE(7));
1056
WREG32(VM_L2_CNTL2, 0);
1057
WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1058
/* Setup TLB control */
1059
tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1060
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1061
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1062
ENABLE_WAIT_L2_QUERY;
1063
WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1064
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1065
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1066
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1067
WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1068
WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1069
WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1070
WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1071
WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1072
WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1073
WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1074
WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1075
WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1076
WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1077
for (i = 0; i < 7; i++)
1078
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1079
}
1080
1081
int r600_mc_wait_for_idle(struct radeon_device *rdev)
1082
{
1083
unsigned i;
1084
u32 tmp;
1085
1086
for (i = 0; i < rdev->usec_timeout; i++) {
1087
/* read MC_STATUS */
1088
tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1089
if (!tmp)
1090
return 0;
1091
udelay(1);
1092
}
1093
return -1;
1094
}
1095
1096
static void r600_mc_program(struct radeon_device *rdev)
1097
{
1098
struct rv515_mc_save save;
1099
u32 tmp;
1100
int i, j;
1101
1102
/* Initialize HDP */
1103
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1104
WREG32((0x2c14 + j), 0x00000000);
1105
WREG32((0x2c18 + j), 0x00000000);
1106
WREG32((0x2c1c + j), 0x00000000);
1107
WREG32((0x2c20 + j), 0x00000000);
1108
WREG32((0x2c24 + j), 0x00000000);
1109
}
1110
WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1111
1112
rv515_mc_stop(rdev, &save);
1113
if (r600_mc_wait_for_idle(rdev)) {
1114
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1115
}
1116
/* Lockout access through VGA aperture (doesn't exist before R600) */
1117
WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1118
/* Update configuration */
1119
if (rdev->flags & RADEON_IS_AGP) {
1120
if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1121
/* VRAM before AGP */
1122
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1123
rdev->mc.vram_start >> 12);
1124
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1125
rdev->mc.gtt_end >> 12);
1126
} else {
1127
/* VRAM after AGP */
1128
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1129
rdev->mc.gtt_start >> 12);
1130
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1131
rdev->mc.vram_end >> 12);
1132
}
1133
} else {
1134
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1135
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1136
}
1137
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1138
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1139
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1140
WREG32(MC_VM_FB_LOCATION, tmp);
1141
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1142
WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1143
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1144
if (rdev->flags & RADEON_IS_AGP) {
1145
WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1146
WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1147
WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1148
} else {
1149
WREG32(MC_VM_AGP_BASE, 0);
1150
WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1151
WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1152
}
1153
if (r600_mc_wait_for_idle(rdev)) {
1154
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1155
}
1156
rv515_mc_resume(rdev, &save);
1157
/* we need to own VRAM, so turn off the VGA renderer here
1158
* to stop it overwriting our objects */
1159
rv515_vga_render_disable(rdev);
1160
}
1161
1162
/**
1163
* r600_vram_gtt_location - try to find VRAM & GTT location
1164
* @rdev: radeon device structure holding all necessary informations
1165
* @mc: memory controller structure holding memory informations
1166
*
1167
* Function will place try to place VRAM at same place as in CPU (PCI)
1168
* address space as some GPU seems to have issue when we reprogram at
1169
* different address space.
1170
*
1171
* If there is not enough space to fit the unvisible VRAM after the
1172
* aperture then we limit the VRAM size to the aperture.
1173
*
1174
* If we are using AGP then place VRAM adjacent to AGP aperture are we need
1175
* them to be in one from GPU point of view so that we can program GPU to
1176
* catch access outside them (weird GPU policy see ??).
1177
*
1178
* This function will never fails, worst case are limiting VRAM or GTT.
1179
*
1180
* Note: GTT start, end, size should be initialized before calling this
1181
* function on AGP platform.
1182
*/
1183
static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1184
{
1185
u64 size_bf, size_af;
1186
1187
if (mc->mc_vram_size > 0xE0000000) {
1188
/* leave room for at least 512M GTT */
1189
dev_warn(rdev->dev, "limiting VRAM\n");
1190
mc->real_vram_size = 0xE0000000;
1191
mc->mc_vram_size = 0xE0000000;
1192
}
1193
if (rdev->flags & RADEON_IS_AGP) {
1194
size_bf = mc->gtt_start;
1195
size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1196
if (size_bf > size_af) {
1197
if (mc->mc_vram_size > size_bf) {
1198
dev_warn(rdev->dev, "limiting VRAM\n");
1199
mc->real_vram_size = size_bf;
1200
mc->mc_vram_size = size_bf;
1201
}
1202
mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1203
} else {
1204
if (mc->mc_vram_size > size_af) {
1205
dev_warn(rdev->dev, "limiting VRAM\n");
1206
mc->real_vram_size = size_af;
1207
mc->mc_vram_size = size_af;
1208
}
1209
mc->vram_start = mc->gtt_end;
1210
}
1211
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1212
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1213
mc->mc_vram_size >> 20, mc->vram_start,
1214
mc->vram_end, mc->real_vram_size >> 20);
1215
} else {
1216
u64 base = 0;
1217
if (rdev->flags & RADEON_IS_IGP) {
1218
base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1219
base <<= 24;
1220
}
1221
radeon_vram_location(rdev, &rdev->mc, base);
1222
rdev->mc.gtt_base_align = 0;
1223
radeon_gtt_location(rdev, mc);
1224
}
1225
}
1226
1227
int r600_mc_init(struct radeon_device *rdev)
1228
{
1229
u32 tmp;
1230
int chansize, numchan;
1231
1232
/* Get VRAM informations */
1233
rdev->mc.vram_is_ddr = true;
1234
tmp = RREG32(RAMCFG);
1235
if (tmp & CHANSIZE_OVERRIDE) {
1236
chansize = 16;
1237
} else if (tmp & CHANSIZE_MASK) {
1238
chansize = 64;
1239
} else {
1240
chansize = 32;
1241
}
1242
tmp = RREG32(CHMAP);
1243
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1244
case 0:
1245
default:
1246
numchan = 1;
1247
break;
1248
case 1:
1249
numchan = 2;
1250
break;
1251
case 2:
1252
numchan = 4;
1253
break;
1254
case 3:
1255
numchan = 8;
1256
break;
1257
}
1258
rdev->mc.vram_width = numchan * chansize;
1259
/* Could aper size report 0 ? */
1260
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1261
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1262
/* Setup GPU memory space */
1263
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1264
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1265
rdev->mc.visible_vram_size = rdev->mc.aper_size;
1266
r600_vram_gtt_location(rdev, &rdev->mc);
1267
1268
if (rdev->flags & RADEON_IS_IGP) {
1269
rs690_pm_info(rdev);
1270
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1271
}
1272
radeon_update_bandwidth_info(rdev);
1273
return 0;
1274
}
1275
1276
/* We doesn't check that the GPU really needs a reset we simply do the
1277
* reset, it's up to the caller to determine if the GPU needs one. We
1278
* might add an helper function to check that.
1279
*/
1280
int r600_gpu_soft_reset(struct radeon_device *rdev)
1281
{
1282
struct rv515_mc_save save;
1283
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1284
S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1285
S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1286
S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1287
S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1288
S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1289
S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1290
S_008010_GUI_ACTIVE(1);
1291
u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1292
S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1293
S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1294
S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1295
S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1296
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1297
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1298
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1299
u32 tmp;
1300
1301
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1302
return 0;
1303
1304
dev_info(rdev->dev, "GPU softreset \n");
1305
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1306
RREG32(R_008010_GRBM_STATUS));
1307
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1308
RREG32(R_008014_GRBM_STATUS2));
1309
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1310
RREG32(R_000E50_SRBM_STATUS));
1311
rv515_mc_stop(rdev, &save);
1312
if (r600_mc_wait_for_idle(rdev)) {
1313
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1314
}
1315
/* Disable CP parsing/prefetching */
1316
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1317
/* Check if any of the rendering block is busy and reset it */
1318
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1319
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1320
tmp = S_008020_SOFT_RESET_CR(1) |
1321
S_008020_SOFT_RESET_DB(1) |
1322
S_008020_SOFT_RESET_CB(1) |
1323
S_008020_SOFT_RESET_PA(1) |
1324
S_008020_SOFT_RESET_SC(1) |
1325
S_008020_SOFT_RESET_SMX(1) |
1326
S_008020_SOFT_RESET_SPI(1) |
1327
S_008020_SOFT_RESET_SX(1) |
1328
S_008020_SOFT_RESET_SH(1) |
1329
S_008020_SOFT_RESET_TC(1) |
1330
S_008020_SOFT_RESET_TA(1) |
1331
S_008020_SOFT_RESET_VC(1) |
1332
S_008020_SOFT_RESET_VGT(1);
1333
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1334
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1335
RREG32(R_008020_GRBM_SOFT_RESET);
1336
mdelay(15);
1337
WREG32(R_008020_GRBM_SOFT_RESET, 0);
1338
}
1339
/* Reset CP (we always reset CP) */
1340
tmp = S_008020_SOFT_RESET_CP(1);
1341
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1342
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1343
RREG32(R_008020_GRBM_SOFT_RESET);
1344
mdelay(15);
1345
WREG32(R_008020_GRBM_SOFT_RESET, 0);
1346
/* Wait a little for things to settle down */
1347
mdelay(1);
1348
dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1349
RREG32(R_008010_GRBM_STATUS));
1350
dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1351
RREG32(R_008014_GRBM_STATUS2));
1352
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1353
RREG32(R_000E50_SRBM_STATUS));
1354
rv515_mc_resume(rdev, &save);
1355
return 0;
1356
}
1357
1358
bool r600_gpu_is_lockup(struct radeon_device *rdev)
1359
{
1360
u32 srbm_status;
1361
u32 grbm_status;
1362
u32 grbm_status2;
1363
struct r100_gpu_lockup *lockup;
1364
int r;
1365
1366
if (rdev->family >= CHIP_RV770)
1367
lockup = &rdev->config.rv770.lockup;
1368
else
1369
lockup = &rdev->config.r600.lockup;
1370
1371
srbm_status = RREG32(R_000E50_SRBM_STATUS);
1372
grbm_status = RREG32(R_008010_GRBM_STATUS);
1373
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1374
if (!G_008010_GUI_ACTIVE(grbm_status)) {
1375
r100_gpu_lockup_update(lockup, &rdev->cp);
1376
return false;
1377
}
1378
/* force CP activities */
1379
r = radeon_ring_lock(rdev, 2);
1380
if (!r) {
1381
/* PACKET2 NOP */
1382
radeon_ring_write(rdev, 0x80000000);
1383
radeon_ring_write(rdev, 0x80000000);
1384
radeon_ring_unlock_commit(rdev);
1385
}
1386
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1387
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
1388
}
1389
1390
int r600_asic_reset(struct radeon_device *rdev)
1391
{
1392
return r600_gpu_soft_reset(rdev);
1393
}
1394
1395
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1396
u32 num_backends,
1397
u32 backend_disable_mask)
1398
{
1399
u32 backend_map = 0;
1400
u32 enabled_backends_mask;
1401
u32 enabled_backends_count;
1402
u32 cur_pipe;
1403
u32 swizzle_pipe[R6XX_MAX_PIPES];
1404
u32 cur_backend;
1405
u32 i;
1406
1407
if (num_tile_pipes > R6XX_MAX_PIPES)
1408
num_tile_pipes = R6XX_MAX_PIPES;
1409
if (num_tile_pipes < 1)
1410
num_tile_pipes = 1;
1411
if (num_backends > R6XX_MAX_BACKENDS)
1412
num_backends = R6XX_MAX_BACKENDS;
1413
if (num_backends < 1)
1414
num_backends = 1;
1415
1416
enabled_backends_mask = 0;
1417
enabled_backends_count = 0;
1418
for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1419
if (((backend_disable_mask >> i) & 1) == 0) {
1420
enabled_backends_mask |= (1 << i);
1421
++enabled_backends_count;
1422
}
1423
if (enabled_backends_count == num_backends)
1424
break;
1425
}
1426
1427
if (enabled_backends_count == 0) {
1428
enabled_backends_mask = 1;
1429
enabled_backends_count = 1;
1430
}
1431
1432
if (enabled_backends_count != num_backends)
1433
num_backends = enabled_backends_count;
1434
1435
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1436
switch (num_tile_pipes) {
1437
case 1:
1438
swizzle_pipe[0] = 0;
1439
break;
1440
case 2:
1441
swizzle_pipe[0] = 0;
1442
swizzle_pipe[1] = 1;
1443
break;
1444
case 3:
1445
swizzle_pipe[0] = 0;
1446
swizzle_pipe[1] = 1;
1447
swizzle_pipe[2] = 2;
1448
break;
1449
case 4:
1450
swizzle_pipe[0] = 0;
1451
swizzle_pipe[1] = 1;
1452
swizzle_pipe[2] = 2;
1453
swizzle_pipe[3] = 3;
1454
break;
1455
case 5:
1456
swizzle_pipe[0] = 0;
1457
swizzle_pipe[1] = 1;
1458
swizzle_pipe[2] = 2;
1459
swizzle_pipe[3] = 3;
1460
swizzle_pipe[4] = 4;
1461
break;
1462
case 6:
1463
swizzle_pipe[0] = 0;
1464
swizzle_pipe[1] = 2;
1465
swizzle_pipe[2] = 4;
1466
swizzle_pipe[3] = 5;
1467
swizzle_pipe[4] = 1;
1468
swizzle_pipe[5] = 3;
1469
break;
1470
case 7:
1471
swizzle_pipe[0] = 0;
1472
swizzle_pipe[1] = 2;
1473
swizzle_pipe[2] = 4;
1474
swizzle_pipe[3] = 6;
1475
swizzle_pipe[4] = 1;
1476
swizzle_pipe[5] = 3;
1477
swizzle_pipe[6] = 5;
1478
break;
1479
case 8:
1480
swizzle_pipe[0] = 0;
1481
swizzle_pipe[1] = 2;
1482
swizzle_pipe[2] = 4;
1483
swizzle_pipe[3] = 6;
1484
swizzle_pipe[4] = 1;
1485
swizzle_pipe[5] = 3;
1486
swizzle_pipe[6] = 5;
1487
swizzle_pipe[7] = 7;
1488
break;
1489
}
1490
1491
cur_backend = 0;
1492
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1493
while (((1 << cur_backend) & enabled_backends_mask) == 0)
1494
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1495
1496
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1497
1498
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1499
}
1500
1501
return backend_map;
1502
}
1503
1504
int r600_count_pipe_bits(uint32_t val)
1505
{
1506
int i, ret = 0;
1507
1508
for (i = 0; i < 32; i++) {
1509
ret += val & 1;
1510
val >>= 1;
1511
}
1512
return ret;
1513
}
1514
1515
void r600_gpu_init(struct radeon_device *rdev)
1516
{
1517
u32 tiling_config;
1518
u32 ramcfg;
1519
u32 backend_map;
1520
u32 cc_rb_backend_disable;
1521
u32 cc_gc_shader_pipe_config;
1522
u32 tmp;
1523
int i, j;
1524
u32 sq_config;
1525
u32 sq_gpr_resource_mgmt_1 = 0;
1526
u32 sq_gpr_resource_mgmt_2 = 0;
1527
u32 sq_thread_resource_mgmt = 0;
1528
u32 sq_stack_resource_mgmt_1 = 0;
1529
u32 sq_stack_resource_mgmt_2 = 0;
1530
1531
/* FIXME: implement */
1532
switch (rdev->family) {
1533
case CHIP_R600:
1534
rdev->config.r600.max_pipes = 4;
1535
rdev->config.r600.max_tile_pipes = 8;
1536
rdev->config.r600.max_simds = 4;
1537
rdev->config.r600.max_backends = 4;
1538
rdev->config.r600.max_gprs = 256;
1539
rdev->config.r600.max_threads = 192;
1540
rdev->config.r600.max_stack_entries = 256;
1541
rdev->config.r600.max_hw_contexts = 8;
1542
rdev->config.r600.max_gs_threads = 16;
1543
rdev->config.r600.sx_max_export_size = 128;
1544
rdev->config.r600.sx_max_export_pos_size = 16;
1545
rdev->config.r600.sx_max_export_smx_size = 128;
1546
rdev->config.r600.sq_num_cf_insts = 2;
1547
break;
1548
case CHIP_RV630:
1549
case CHIP_RV635:
1550
rdev->config.r600.max_pipes = 2;
1551
rdev->config.r600.max_tile_pipes = 2;
1552
rdev->config.r600.max_simds = 3;
1553
rdev->config.r600.max_backends = 1;
1554
rdev->config.r600.max_gprs = 128;
1555
rdev->config.r600.max_threads = 192;
1556
rdev->config.r600.max_stack_entries = 128;
1557
rdev->config.r600.max_hw_contexts = 8;
1558
rdev->config.r600.max_gs_threads = 4;
1559
rdev->config.r600.sx_max_export_size = 128;
1560
rdev->config.r600.sx_max_export_pos_size = 16;
1561
rdev->config.r600.sx_max_export_smx_size = 128;
1562
rdev->config.r600.sq_num_cf_insts = 2;
1563
break;
1564
case CHIP_RV610:
1565
case CHIP_RV620:
1566
case CHIP_RS780:
1567
case CHIP_RS880:
1568
rdev->config.r600.max_pipes = 1;
1569
rdev->config.r600.max_tile_pipes = 1;
1570
rdev->config.r600.max_simds = 2;
1571
rdev->config.r600.max_backends = 1;
1572
rdev->config.r600.max_gprs = 128;
1573
rdev->config.r600.max_threads = 192;
1574
rdev->config.r600.max_stack_entries = 128;
1575
rdev->config.r600.max_hw_contexts = 4;
1576
rdev->config.r600.max_gs_threads = 4;
1577
rdev->config.r600.sx_max_export_size = 128;
1578
rdev->config.r600.sx_max_export_pos_size = 16;
1579
rdev->config.r600.sx_max_export_smx_size = 128;
1580
rdev->config.r600.sq_num_cf_insts = 1;
1581
break;
1582
case CHIP_RV670:
1583
rdev->config.r600.max_pipes = 4;
1584
rdev->config.r600.max_tile_pipes = 4;
1585
rdev->config.r600.max_simds = 4;
1586
rdev->config.r600.max_backends = 4;
1587
rdev->config.r600.max_gprs = 192;
1588
rdev->config.r600.max_threads = 192;
1589
rdev->config.r600.max_stack_entries = 256;
1590
rdev->config.r600.max_hw_contexts = 8;
1591
rdev->config.r600.max_gs_threads = 16;
1592
rdev->config.r600.sx_max_export_size = 128;
1593
rdev->config.r600.sx_max_export_pos_size = 16;
1594
rdev->config.r600.sx_max_export_smx_size = 128;
1595
rdev->config.r600.sq_num_cf_insts = 2;
1596
break;
1597
default:
1598
break;
1599
}
1600
1601
/* Initialize HDP */
1602
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1603
WREG32((0x2c14 + j), 0x00000000);
1604
WREG32((0x2c18 + j), 0x00000000);
1605
WREG32((0x2c1c + j), 0x00000000);
1606
WREG32((0x2c20 + j), 0x00000000);
1607
WREG32((0x2c24 + j), 0x00000000);
1608
}
1609
1610
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1611
1612
/* Setup tiling */
1613
tiling_config = 0;
1614
ramcfg = RREG32(RAMCFG);
1615
switch (rdev->config.r600.max_tile_pipes) {
1616
case 1:
1617
tiling_config |= PIPE_TILING(0);
1618
break;
1619
case 2:
1620
tiling_config |= PIPE_TILING(1);
1621
break;
1622
case 4:
1623
tiling_config |= PIPE_TILING(2);
1624
break;
1625
case 8:
1626
tiling_config |= PIPE_TILING(3);
1627
break;
1628
default:
1629
break;
1630
}
1631
rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1632
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1633
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1634
tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1635
if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1636
rdev->config.r600.tiling_group_size = 512;
1637
else
1638
rdev->config.r600.tiling_group_size = 256;
1639
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1640
if (tmp > 3) {
1641
tiling_config |= ROW_TILING(3);
1642
tiling_config |= SAMPLE_SPLIT(3);
1643
} else {
1644
tiling_config |= ROW_TILING(tmp);
1645
tiling_config |= SAMPLE_SPLIT(tmp);
1646
}
1647
tiling_config |= BANK_SWAPS(1);
1648
1649
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1650
cc_rb_backend_disable |=
1651
BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1652
1653
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1654
cc_gc_shader_pipe_config |=
1655
INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1656
cc_gc_shader_pipe_config |=
1657
INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1658
1659
backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1660
(R6XX_MAX_BACKENDS -
1661
r600_count_pipe_bits((cc_rb_backend_disable &
1662
R6XX_MAX_BACKENDS_MASK) >> 16)),
1663
(cc_rb_backend_disable >> 16));
1664
rdev->config.r600.tile_config = tiling_config;
1665
tiling_config |= BACKEND_MAP(backend_map);
1666
WREG32(GB_TILING_CONFIG, tiling_config);
1667
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1668
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1669
1670
/* Setup pipes */
1671
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1672
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1673
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1674
1675
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1676
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1677
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1678
1679
/* Setup some CP states */
1680
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1681
WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1682
1683
WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1684
SYNC_WALKER | SYNC_ALIGNER));
1685
/* Setup various GPU states */
1686
if (rdev->family == CHIP_RV670)
1687
WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1688
1689
tmp = RREG32(SX_DEBUG_1);
1690
tmp |= SMX_EVENT_RELEASE;
1691
if ((rdev->family > CHIP_R600))
1692
tmp |= ENABLE_NEW_SMX_ADDRESS;
1693
WREG32(SX_DEBUG_1, tmp);
1694
1695
if (((rdev->family) == CHIP_R600) ||
1696
((rdev->family) == CHIP_RV630) ||
1697
((rdev->family) == CHIP_RV610) ||
1698
((rdev->family) == CHIP_RV620) ||
1699
((rdev->family) == CHIP_RS780) ||
1700
((rdev->family) == CHIP_RS880)) {
1701
WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1702
} else {
1703
WREG32(DB_DEBUG, 0);
1704
}
1705
WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1706
DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1707
1708
WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1709
WREG32(VGT_NUM_INSTANCES, 0);
1710
1711
WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1712
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1713
1714
tmp = RREG32(SQ_MS_FIFO_SIZES);
1715
if (((rdev->family) == CHIP_RV610) ||
1716
((rdev->family) == CHIP_RV620) ||
1717
((rdev->family) == CHIP_RS780) ||
1718
((rdev->family) == CHIP_RS880)) {
1719
tmp = (CACHE_FIFO_SIZE(0xa) |
1720
FETCH_FIFO_HIWATER(0xa) |
1721
DONE_FIFO_HIWATER(0xe0) |
1722
ALU_UPDATE_FIFO_HIWATER(0x8));
1723
} else if (((rdev->family) == CHIP_R600) ||
1724
((rdev->family) == CHIP_RV630)) {
1725
tmp &= ~DONE_FIFO_HIWATER(0xff);
1726
tmp |= DONE_FIFO_HIWATER(0x4);
1727
}
1728
WREG32(SQ_MS_FIFO_SIZES, tmp);
1729
1730
/* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1731
* should be adjusted as needed by the 2D/3D drivers. This just sets default values
1732
*/
1733
sq_config = RREG32(SQ_CONFIG);
1734
sq_config &= ~(PS_PRIO(3) |
1735
VS_PRIO(3) |
1736
GS_PRIO(3) |
1737
ES_PRIO(3));
1738
sq_config |= (DX9_CONSTS |
1739
VC_ENABLE |
1740
PS_PRIO(0) |
1741
VS_PRIO(1) |
1742
GS_PRIO(2) |
1743
ES_PRIO(3));
1744
1745
if ((rdev->family) == CHIP_R600) {
1746
sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1747
NUM_VS_GPRS(124) |
1748
NUM_CLAUSE_TEMP_GPRS(4));
1749
sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1750
NUM_ES_GPRS(0));
1751
sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1752
NUM_VS_THREADS(48) |
1753
NUM_GS_THREADS(4) |
1754
NUM_ES_THREADS(4));
1755
sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1756
NUM_VS_STACK_ENTRIES(128));
1757
sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1758
NUM_ES_STACK_ENTRIES(0));
1759
} else if (((rdev->family) == CHIP_RV610) ||
1760
((rdev->family) == CHIP_RV620) ||
1761
((rdev->family) == CHIP_RS780) ||
1762
((rdev->family) == CHIP_RS880)) {
1763
/* no vertex cache */
1764
sq_config &= ~VC_ENABLE;
1765
1766
sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1767
NUM_VS_GPRS(44) |
1768
NUM_CLAUSE_TEMP_GPRS(2));
1769
sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1770
NUM_ES_GPRS(17));
1771
sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1772
NUM_VS_THREADS(78) |
1773
NUM_GS_THREADS(4) |
1774
NUM_ES_THREADS(31));
1775
sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1776
NUM_VS_STACK_ENTRIES(40));
1777
sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1778
NUM_ES_STACK_ENTRIES(16));
1779
} else if (((rdev->family) == CHIP_RV630) ||
1780
((rdev->family) == CHIP_RV635)) {
1781
sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1782
NUM_VS_GPRS(44) |
1783
NUM_CLAUSE_TEMP_GPRS(2));
1784
sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1785
NUM_ES_GPRS(18));
1786
sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1787
NUM_VS_THREADS(78) |
1788
NUM_GS_THREADS(4) |
1789
NUM_ES_THREADS(31));
1790
sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1791
NUM_VS_STACK_ENTRIES(40));
1792
sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1793
NUM_ES_STACK_ENTRIES(16));
1794
} else if ((rdev->family) == CHIP_RV670) {
1795
sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1796
NUM_VS_GPRS(44) |
1797
NUM_CLAUSE_TEMP_GPRS(2));
1798
sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1799
NUM_ES_GPRS(17));
1800
sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1801
NUM_VS_THREADS(78) |
1802
NUM_GS_THREADS(4) |
1803
NUM_ES_THREADS(31));
1804
sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1805
NUM_VS_STACK_ENTRIES(64));
1806
sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1807
NUM_ES_STACK_ENTRIES(64));
1808
}
1809
1810
WREG32(SQ_CONFIG, sq_config);
1811
WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1812
WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1813
WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1814
WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1815
WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1816
1817
if (((rdev->family) == CHIP_RV610) ||
1818
((rdev->family) == CHIP_RV620) ||
1819
((rdev->family) == CHIP_RS780) ||
1820
((rdev->family) == CHIP_RS880)) {
1821
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1822
} else {
1823
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1824
}
1825
1826
/* More default values. 2D/3D driver should adjust as needed */
1827
WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1828
S1_X(0x4) | S1_Y(0xc)));
1829
WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1830
S1_X(0x2) | S1_Y(0x2) |
1831
S2_X(0xa) | S2_Y(0x6) |
1832
S3_X(0x6) | S3_Y(0xa)));
1833
WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1834
S1_X(0x4) | S1_Y(0xc) |
1835
S2_X(0x1) | S2_Y(0x6) |
1836
S3_X(0xa) | S3_Y(0xe)));
1837
WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1838
S5_X(0x0) | S5_Y(0x0) |
1839
S6_X(0xb) | S6_Y(0x4) |
1840
S7_X(0x7) | S7_Y(0x8)));
1841
1842
WREG32(VGT_STRMOUT_EN, 0);
1843
tmp = rdev->config.r600.max_pipes * 16;
1844
switch (rdev->family) {
1845
case CHIP_RV610:
1846
case CHIP_RV620:
1847
case CHIP_RS780:
1848
case CHIP_RS880:
1849
tmp += 32;
1850
break;
1851
case CHIP_RV670:
1852
tmp += 128;
1853
break;
1854
default:
1855
break;
1856
}
1857
if (tmp > 256) {
1858
tmp = 256;
1859
}
1860
WREG32(VGT_ES_PER_GS, 128);
1861
WREG32(VGT_GS_PER_ES, tmp);
1862
WREG32(VGT_GS_PER_VS, 2);
1863
WREG32(VGT_GS_VERTEX_REUSE, 16);
1864
1865
/* more default values. 2D/3D driver should adjust as needed */
1866
WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1867
WREG32(VGT_STRMOUT_EN, 0);
1868
WREG32(SX_MISC, 0);
1869
WREG32(PA_SC_MODE_CNTL, 0);
1870
WREG32(PA_SC_AA_CONFIG, 0);
1871
WREG32(PA_SC_LINE_STIPPLE, 0);
1872
WREG32(SPI_INPUT_Z, 0);
1873
WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1874
WREG32(CB_COLOR7_FRAG, 0);
1875
1876
/* Clear render buffer base addresses */
1877
WREG32(CB_COLOR0_BASE, 0);
1878
WREG32(CB_COLOR1_BASE, 0);
1879
WREG32(CB_COLOR2_BASE, 0);
1880
WREG32(CB_COLOR3_BASE, 0);
1881
WREG32(CB_COLOR4_BASE, 0);
1882
WREG32(CB_COLOR5_BASE, 0);
1883
WREG32(CB_COLOR6_BASE, 0);
1884
WREG32(CB_COLOR7_BASE, 0);
1885
WREG32(CB_COLOR7_FRAG, 0);
1886
1887
switch (rdev->family) {
1888
case CHIP_RV610:
1889
case CHIP_RV620:
1890
case CHIP_RS780:
1891
case CHIP_RS880:
1892
tmp = TC_L2_SIZE(8);
1893
break;
1894
case CHIP_RV630:
1895
case CHIP_RV635:
1896
tmp = TC_L2_SIZE(4);
1897
break;
1898
case CHIP_R600:
1899
tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1900
break;
1901
default:
1902
tmp = TC_L2_SIZE(0);
1903
break;
1904
}
1905
WREG32(TC_CNTL, tmp);
1906
1907
tmp = RREG32(HDP_HOST_PATH_CNTL);
1908
WREG32(HDP_HOST_PATH_CNTL, tmp);
1909
1910
tmp = RREG32(ARB_POP);
1911
tmp |= ENABLE_TC128;
1912
WREG32(ARB_POP, tmp);
1913
1914
WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1915
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1916
NUM_CLIP_SEQ(3)));
1917
WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1918
}
1919
1920
1921
/*
1922
* Indirect registers accessor
1923
*/
1924
u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1925
{
1926
u32 r;
1927
1928
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1929
(void)RREG32(PCIE_PORT_INDEX);
1930
r = RREG32(PCIE_PORT_DATA);
1931
return r;
1932
}
1933
1934
void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1935
{
1936
WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1937
(void)RREG32(PCIE_PORT_INDEX);
1938
WREG32(PCIE_PORT_DATA, (v));
1939
(void)RREG32(PCIE_PORT_DATA);
1940
}
1941
1942
/*
1943
* CP & Ring
1944
*/
1945
void r600_cp_stop(struct radeon_device *rdev)
1946
{
1947
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1948
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1949
WREG32(SCRATCH_UMSK, 0);
1950
}
1951
1952
int r600_init_microcode(struct radeon_device *rdev)
1953
{
1954
struct platform_device *pdev;
1955
const char *chip_name;
1956
const char *rlc_chip_name;
1957
size_t pfp_req_size, me_req_size, rlc_req_size;
1958
char fw_name[30];
1959
int err;
1960
1961
DRM_DEBUG("\n");
1962
1963
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1964
err = IS_ERR(pdev);
1965
if (err) {
1966
printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1967
return -EINVAL;
1968
}
1969
1970
switch (rdev->family) {
1971
case CHIP_R600:
1972
chip_name = "R600";
1973
rlc_chip_name = "R600";
1974
break;
1975
case CHIP_RV610:
1976
chip_name = "RV610";
1977
rlc_chip_name = "R600";
1978
break;
1979
case CHIP_RV630:
1980
chip_name = "RV630";
1981
rlc_chip_name = "R600";
1982
break;
1983
case CHIP_RV620:
1984
chip_name = "RV620";
1985
rlc_chip_name = "R600";
1986
break;
1987
case CHIP_RV635:
1988
chip_name = "RV635";
1989
rlc_chip_name = "R600";
1990
break;
1991
case CHIP_RV670:
1992
chip_name = "RV670";
1993
rlc_chip_name = "R600";
1994
break;
1995
case CHIP_RS780:
1996
case CHIP_RS880:
1997
chip_name = "RS780";
1998
rlc_chip_name = "R600";
1999
break;
2000
case CHIP_RV770:
2001
chip_name = "RV770";
2002
rlc_chip_name = "R700";
2003
break;
2004
case CHIP_RV730:
2005
case CHIP_RV740:
2006
chip_name = "RV730";
2007
rlc_chip_name = "R700";
2008
break;
2009
case CHIP_RV710:
2010
chip_name = "RV710";
2011
rlc_chip_name = "R700";
2012
break;
2013
case CHIP_CEDAR:
2014
chip_name = "CEDAR";
2015
rlc_chip_name = "CEDAR";
2016
break;
2017
case CHIP_REDWOOD:
2018
chip_name = "REDWOOD";
2019
rlc_chip_name = "REDWOOD";
2020
break;
2021
case CHIP_JUNIPER:
2022
chip_name = "JUNIPER";
2023
rlc_chip_name = "JUNIPER";
2024
break;
2025
case CHIP_CYPRESS:
2026
case CHIP_HEMLOCK:
2027
chip_name = "CYPRESS";
2028
rlc_chip_name = "CYPRESS";
2029
break;
2030
case CHIP_PALM:
2031
chip_name = "PALM";
2032
rlc_chip_name = "SUMO";
2033
break;
2034
case CHIP_SUMO:
2035
chip_name = "SUMO";
2036
rlc_chip_name = "SUMO";
2037
break;
2038
case CHIP_SUMO2:
2039
chip_name = "SUMO2";
2040
rlc_chip_name = "SUMO";
2041
break;
2042
default: BUG();
2043
}
2044
2045
if (rdev->family >= CHIP_CEDAR) {
2046
pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2047
me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2048
rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2049
} else if (rdev->family >= CHIP_RV770) {
2050
pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2051
me_req_size = R700_PM4_UCODE_SIZE * 4;
2052
rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2053
} else {
2054
pfp_req_size = PFP_UCODE_SIZE * 4;
2055
me_req_size = PM4_UCODE_SIZE * 12;
2056
rlc_req_size = RLC_UCODE_SIZE * 4;
2057
}
2058
2059
DRM_INFO("Loading %s Microcode\n", chip_name);
2060
2061
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2062
err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2063
if (err)
2064
goto out;
2065
if (rdev->pfp_fw->size != pfp_req_size) {
2066
printk(KERN_ERR
2067
"r600_cp: Bogus length %zu in firmware \"%s\"\n",
2068
rdev->pfp_fw->size, fw_name);
2069
err = -EINVAL;
2070
goto out;
2071
}
2072
2073
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2074
err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2075
if (err)
2076
goto out;
2077
if (rdev->me_fw->size != me_req_size) {
2078
printk(KERN_ERR
2079
"r600_cp: Bogus length %zu in firmware \"%s\"\n",
2080
rdev->me_fw->size, fw_name);
2081
err = -EINVAL;
2082
}
2083
2084
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2085
err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2086
if (err)
2087
goto out;
2088
if (rdev->rlc_fw->size != rlc_req_size) {
2089
printk(KERN_ERR
2090
"r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2091
rdev->rlc_fw->size, fw_name);
2092
err = -EINVAL;
2093
}
2094
2095
out:
2096
platform_device_unregister(pdev);
2097
2098
if (err) {
2099
if (err != -EINVAL)
2100
printk(KERN_ERR
2101
"r600_cp: Failed to load firmware \"%s\"\n",
2102
fw_name);
2103
release_firmware(rdev->pfp_fw);
2104
rdev->pfp_fw = NULL;
2105
release_firmware(rdev->me_fw);
2106
rdev->me_fw = NULL;
2107
release_firmware(rdev->rlc_fw);
2108
rdev->rlc_fw = NULL;
2109
}
2110
return err;
2111
}
2112
2113
static int r600_cp_load_microcode(struct radeon_device *rdev)
2114
{
2115
const __be32 *fw_data;
2116
int i;
2117
2118
if (!rdev->me_fw || !rdev->pfp_fw)
2119
return -EINVAL;
2120
2121
r600_cp_stop(rdev);
2122
2123
WREG32(CP_RB_CNTL,
2124
#ifdef __BIG_ENDIAN
2125
BUF_SWAP_32BIT |
2126
#endif
2127
RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2128
2129
/* Reset cp */
2130
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2131
RREG32(GRBM_SOFT_RESET);
2132
mdelay(15);
2133
WREG32(GRBM_SOFT_RESET, 0);
2134
2135
WREG32(CP_ME_RAM_WADDR, 0);
2136
2137
fw_data = (const __be32 *)rdev->me_fw->data;
2138
WREG32(CP_ME_RAM_WADDR, 0);
2139
for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2140
WREG32(CP_ME_RAM_DATA,
2141
be32_to_cpup(fw_data++));
2142
2143
fw_data = (const __be32 *)rdev->pfp_fw->data;
2144
WREG32(CP_PFP_UCODE_ADDR, 0);
2145
for (i = 0; i < PFP_UCODE_SIZE; i++)
2146
WREG32(CP_PFP_UCODE_DATA,
2147
be32_to_cpup(fw_data++));
2148
2149
WREG32(CP_PFP_UCODE_ADDR, 0);
2150
WREG32(CP_ME_RAM_WADDR, 0);
2151
WREG32(CP_ME_RAM_RADDR, 0);
2152
return 0;
2153
}
2154
2155
int r600_cp_start(struct radeon_device *rdev)
2156
{
2157
int r;
2158
uint32_t cp_me;
2159
2160
r = radeon_ring_lock(rdev, 7);
2161
if (r) {
2162
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2163
return r;
2164
}
2165
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2166
radeon_ring_write(rdev, 0x1);
2167
if (rdev->family >= CHIP_RV770) {
2168
radeon_ring_write(rdev, 0x0);
2169
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2170
} else {
2171
radeon_ring_write(rdev, 0x3);
2172
radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
2173
}
2174
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2175
radeon_ring_write(rdev, 0);
2176
radeon_ring_write(rdev, 0);
2177
radeon_ring_unlock_commit(rdev);
2178
2179
cp_me = 0xff;
2180
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2181
return 0;
2182
}
2183
2184
int r600_cp_resume(struct radeon_device *rdev)
2185
{
2186
u32 tmp;
2187
u32 rb_bufsz;
2188
int r;
2189
2190
/* Reset cp */
2191
WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2192
RREG32(GRBM_SOFT_RESET);
2193
mdelay(15);
2194
WREG32(GRBM_SOFT_RESET, 0);
2195
2196
/* Set ring buffer size */
2197
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
2198
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2199
#ifdef __BIG_ENDIAN
2200
tmp |= BUF_SWAP_32BIT;
2201
#endif
2202
WREG32(CP_RB_CNTL, tmp);
2203
WREG32(CP_SEM_WAIT_TIMER, 0x4);
2204
2205
/* Set the write pointer delay */
2206
WREG32(CP_RB_WPTR_DELAY, 0);
2207
2208
/* Initialize the ring buffer's read and write pointers */
2209
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2210
WREG32(CP_RB_RPTR_WR, 0);
2211
WREG32(CP_RB_WPTR, 0);
2212
2213
/* set the wb address whether it's enabled or not */
2214
WREG32(CP_RB_RPTR_ADDR,
2215
#ifdef __BIG_ENDIAN
2216
RB_RPTR_SWAP(2) |
2217
#endif
2218
((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2219
WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2220
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2221
2222
if (rdev->wb.enabled)
2223
WREG32(SCRATCH_UMSK, 0xff);
2224
else {
2225
tmp |= RB_NO_UPDATE;
2226
WREG32(SCRATCH_UMSK, 0);
2227
}
2228
2229
mdelay(1);
2230
WREG32(CP_RB_CNTL, tmp);
2231
2232
WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2233
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2234
2235
rdev->cp.rptr = RREG32(CP_RB_RPTR);
2236
rdev->cp.wptr = RREG32(CP_RB_WPTR);
2237
2238
r600_cp_start(rdev);
2239
rdev->cp.ready = true;
2240
r = radeon_ring_test(rdev);
2241
if (r) {
2242
rdev->cp.ready = false;
2243
return r;
2244
}
2245
return 0;
2246
}
2247
2248
void r600_cp_commit(struct radeon_device *rdev)
2249
{
2250
WREG32(CP_RB_WPTR, rdev->cp.wptr);
2251
(void)RREG32(CP_RB_WPTR);
2252
}
2253
2254
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2255
{
2256
u32 rb_bufsz;
2257
2258
/* Align ring size */
2259
rb_bufsz = drm_order(ring_size / 8);
2260
ring_size = (1 << (rb_bufsz + 1)) * 4;
2261
rdev->cp.ring_size = ring_size;
2262
rdev->cp.align_mask = 16 - 1;
2263
}
2264
2265
void r600_cp_fini(struct radeon_device *rdev)
2266
{
2267
r600_cp_stop(rdev);
2268
radeon_ring_fini(rdev);
2269
}
2270
2271
2272
/*
2273
* GPU scratch registers helpers function.
2274
*/
2275
void r600_scratch_init(struct radeon_device *rdev)
2276
{
2277
int i;
2278
2279
rdev->scratch.num_reg = 7;
2280
rdev->scratch.reg_base = SCRATCH_REG0;
2281
for (i = 0; i < rdev->scratch.num_reg; i++) {
2282
rdev->scratch.free[i] = true;
2283
rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
2284
}
2285
}
2286
2287
int r600_ring_test(struct radeon_device *rdev)
2288
{
2289
uint32_t scratch;
2290
uint32_t tmp = 0;
2291
unsigned i;
2292
int r;
2293
2294
r = radeon_scratch_get(rdev, &scratch);
2295
if (r) {
2296
DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2297
return r;
2298
}
2299
WREG32(scratch, 0xCAFEDEAD);
2300
r = radeon_ring_lock(rdev, 3);
2301
if (r) {
2302
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2303
radeon_scratch_free(rdev, scratch);
2304
return r;
2305
}
2306
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2307
radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2308
radeon_ring_write(rdev, 0xDEADBEEF);
2309
radeon_ring_unlock_commit(rdev);
2310
for (i = 0; i < rdev->usec_timeout; i++) {
2311
tmp = RREG32(scratch);
2312
if (tmp == 0xDEADBEEF)
2313
break;
2314
DRM_UDELAY(1);
2315
}
2316
if (i < rdev->usec_timeout) {
2317
DRM_INFO("ring test succeeded in %d usecs\n", i);
2318
} else {
2319
DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2320
scratch, tmp);
2321
r = -EINVAL;
2322
}
2323
radeon_scratch_free(rdev, scratch);
2324
return r;
2325
}
2326
2327
void r600_fence_ring_emit(struct radeon_device *rdev,
2328
struct radeon_fence *fence)
2329
{
2330
if (rdev->wb.use_event) {
2331
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
2332
(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
2333
/* EVENT_WRITE_EOP - flush caches, send int */
2334
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
2335
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
2336
radeon_ring_write(rdev, addr & 0xffffffff);
2337
radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
2338
radeon_ring_write(rdev, fence->seq);
2339
radeon_ring_write(rdev, 0);
2340
} else {
2341
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2342
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
2343
/* wait for 3D idle clean */
2344
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2345
radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2346
radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2347
/* Emit fence sequence & fire IRQ */
2348
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2349
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2350
radeon_ring_write(rdev, fence->seq);
2351
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2352
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2353
radeon_ring_write(rdev, RB_INT_STAT);
2354
}
2355
}
2356
2357
int r600_copy_blit(struct radeon_device *rdev,
2358
uint64_t src_offset, uint64_t dst_offset,
2359
unsigned num_pages, struct radeon_fence *fence)
2360
{
2361
int r;
2362
2363
mutex_lock(&rdev->r600_blit.mutex);
2364
rdev->r600_blit.vb_ib = NULL;
2365
r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2366
if (r) {
2367
if (rdev->r600_blit.vb_ib)
2368
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2369
mutex_unlock(&rdev->r600_blit.mutex);
2370
return r;
2371
}
2372
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2373
r600_blit_done_copy(rdev, fence);
2374
mutex_unlock(&rdev->r600_blit.mutex);
2375
return 0;
2376
}
2377
2378
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2379
uint32_t tiling_flags, uint32_t pitch,
2380
uint32_t offset, uint32_t obj_size)
2381
{
2382
/* FIXME: implement */
2383
return 0;
2384
}
2385
2386
void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2387
{
2388
/* FIXME: implement */
2389
}
2390
2391
int r600_startup(struct radeon_device *rdev)
2392
{
2393
int r;
2394
2395
/* enable pcie gen2 link */
2396
r600_pcie_gen2_enable(rdev);
2397
2398
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2399
r = r600_init_microcode(rdev);
2400
if (r) {
2401
DRM_ERROR("Failed to load firmware!\n");
2402
return r;
2403
}
2404
}
2405
2406
r600_mc_program(rdev);
2407
if (rdev->flags & RADEON_IS_AGP) {
2408
r600_agp_enable(rdev);
2409
} else {
2410
r = r600_pcie_gart_enable(rdev);
2411
if (r)
2412
return r;
2413
}
2414
r600_gpu_init(rdev);
2415
r = r600_blit_init(rdev);
2416
if (r) {
2417
r600_blit_fini(rdev);
2418
rdev->asic->copy = NULL;
2419
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2420
}
2421
2422
/* allocate wb buffer */
2423
r = radeon_wb_init(rdev);
2424
if (r)
2425
return r;
2426
2427
/* Enable IRQ */
2428
r = r600_irq_init(rdev);
2429
if (r) {
2430
DRM_ERROR("radeon: IH init failed (%d).\n", r);
2431
radeon_irq_kms_fini(rdev);
2432
return r;
2433
}
2434
r600_irq_set(rdev);
2435
2436
r = radeon_ring_init(rdev, rdev->cp.ring_size);
2437
if (r)
2438
return r;
2439
r = r600_cp_load_microcode(rdev);
2440
if (r)
2441
return r;
2442
r = r600_cp_resume(rdev);
2443
if (r)
2444
return r;
2445
2446
return 0;
2447
}
2448
2449
void r600_vga_set_state(struct radeon_device *rdev, bool state)
2450
{
2451
uint32_t temp;
2452
2453
temp = RREG32(CONFIG_CNTL);
2454
if (state == false) {
2455
temp &= ~(1<<0);
2456
temp |= (1<<1);
2457
} else {
2458
temp &= ~(1<<1);
2459
}
2460
WREG32(CONFIG_CNTL, temp);
2461
}
2462
2463
int r600_resume(struct radeon_device *rdev)
2464
{
2465
int r;
2466
2467
/* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2468
* posting will perform necessary task to bring back GPU into good
2469
* shape.
2470
*/
2471
/* post card */
2472
atom_asic_init(rdev->mode_info.atom_context);
2473
2474
r = r600_startup(rdev);
2475
if (r) {
2476
DRM_ERROR("r600 startup failed on resume\n");
2477
return r;
2478
}
2479
2480
r = r600_ib_test(rdev);
2481
if (r) {
2482
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
2483
return r;
2484
}
2485
2486
r = r600_audio_init(rdev);
2487
if (r) {
2488
DRM_ERROR("radeon: audio resume failed\n");
2489
return r;
2490
}
2491
2492
return r;
2493
}
2494
2495
int r600_suspend(struct radeon_device *rdev)
2496
{
2497
int r;
2498
2499
r600_audio_fini(rdev);
2500
/* FIXME: we should wait for ring to be empty */
2501
r600_cp_stop(rdev);
2502
rdev->cp.ready = false;
2503
r600_irq_suspend(rdev);
2504
radeon_wb_disable(rdev);
2505
r600_pcie_gart_disable(rdev);
2506
/* unpin shaders bo */
2507
if (rdev->r600_blit.shader_obj) {
2508
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2509
if (!r) {
2510
radeon_bo_unpin(rdev->r600_blit.shader_obj);
2511
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2512
}
2513
}
2514
return 0;
2515
}
2516
2517
/* Plan is to move initialization in that function and use
2518
* helper function so that radeon_device_init pretty much
2519
* do nothing more than calling asic specific function. This
2520
* should also allow to remove a bunch of callback function
2521
* like vram_info.
2522
*/
2523
int r600_init(struct radeon_device *rdev)
2524
{
2525
int r;
2526
2527
if (r600_debugfs_mc_info_init(rdev)) {
2528
DRM_ERROR("Failed to register debugfs file for mc !\n");
2529
}
2530
/* This don't do much */
2531
r = radeon_gem_init(rdev);
2532
if (r)
2533
return r;
2534
/* Read BIOS */
2535
if (!radeon_get_bios(rdev)) {
2536
if (ASIC_IS_AVIVO(rdev))
2537
return -EINVAL;
2538
}
2539
/* Must be an ATOMBIOS */
2540
if (!rdev->is_atom_bios) {
2541
dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2542
return -EINVAL;
2543
}
2544
r = radeon_atombios_init(rdev);
2545
if (r)
2546
return r;
2547
/* Post card if necessary */
2548
if (!radeon_card_posted(rdev)) {
2549
if (!rdev->bios) {
2550
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2551
return -EINVAL;
2552
}
2553
DRM_INFO("GPU not posted. posting now...\n");
2554
atom_asic_init(rdev->mode_info.atom_context);
2555
}
2556
/* Initialize scratch registers */
2557
r600_scratch_init(rdev);
2558
/* Initialize surface registers */
2559
radeon_surface_init(rdev);
2560
/* Initialize clocks */
2561
radeon_get_clock_info(rdev->ddev);
2562
/* Fence driver */
2563
r = radeon_fence_driver_init(rdev);
2564
if (r)
2565
return r;
2566
if (rdev->flags & RADEON_IS_AGP) {
2567
r = radeon_agp_init(rdev);
2568
if (r)
2569
radeon_agp_disable(rdev);
2570
}
2571
r = r600_mc_init(rdev);
2572
if (r)
2573
return r;
2574
/* Memory manager */
2575
r = radeon_bo_init(rdev);
2576
if (r)
2577
return r;
2578
2579
r = radeon_irq_kms_init(rdev);
2580
if (r)
2581
return r;
2582
2583
rdev->cp.ring_obj = NULL;
2584
r600_ring_init(rdev, 1024 * 1024);
2585
2586
rdev->ih.ring_obj = NULL;
2587
r600_ih_ring_init(rdev, 64 * 1024);
2588
2589
r = r600_pcie_gart_init(rdev);
2590
if (r)
2591
return r;
2592
2593
rdev->accel_working = true;
2594
r = r600_startup(rdev);
2595
if (r) {
2596
dev_err(rdev->dev, "disabling GPU acceleration\n");
2597
r600_cp_fini(rdev);
2598
r600_irq_fini(rdev);
2599
radeon_wb_fini(rdev);
2600
radeon_irq_kms_fini(rdev);
2601
r600_pcie_gart_fini(rdev);
2602
rdev->accel_working = false;
2603
}
2604
if (rdev->accel_working) {
2605
r = radeon_ib_pool_init(rdev);
2606
if (r) {
2607
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2608
rdev->accel_working = false;
2609
} else {
2610
r = r600_ib_test(rdev);
2611
if (r) {
2612
dev_err(rdev->dev, "IB test failed (%d).\n", r);
2613
rdev->accel_working = false;
2614
}
2615
}
2616
}
2617
2618
r = r600_audio_init(rdev);
2619
if (r)
2620
return r; /* TODO error handling */
2621
return 0;
2622
}
2623
2624
void r600_fini(struct radeon_device *rdev)
2625
{
2626
r600_audio_fini(rdev);
2627
r600_blit_fini(rdev);
2628
r600_cp_fini(rdev);
2629
r600_irq_fini(rdev);
2630
radeon_wb_fini(rdev);
2631
radeon_ib_pool_fini(rdev);
2632
radeon_irq_kms_fini(rdev);
2633
r600_pcie_gart_fini(rdev);
2634
radeon_agp_fini(rdev);
2635
radeon_gem_fini(rdev);
2636
radeon_fence_driver_fini(rdev);
2637
radeon_bo_fini(rdev);
2638
radeon_atombios_fini(rdev);
2639
kfree(rdev->bios);
2640
rdev->bios = NULL;
2641
}
2642
2643
2644
/*
2645
* CS stuff
2646
*/
2647
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2648
{
2649
/* FIXME: implement */
2650
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2651
radeon_ring_write(rdev,
2652
#ifdef __BIG_ENDIAN
2653
(2 << 0) |
2654
#endif
2655
(ib->gpu_addr & 0xFFFFFFFC));
2656
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2657
radeon_ring_write(rdev, ib->length_dw);
2658
}
2659
2660
int r600_ib_test(struct radeon_device *rdev)
2661
{
2662
struct radeon_ib *ib;
2663
uint32_t scratch;
2664
uint32_t tmp = 0;
2665
unsigned i;
2666
int r;
2667
2668
r = radeon_scratch_get(rdev, &scratch);
2669
if (r) {
2670
DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2671
return r;
2672
}
2673
WREG32(scratch, 0xCAFEDEAD);
2674
r = radeon_ib_get(rdev, &ib);
2675
if (r) {
2676
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2677
return r;
2678
}
2679
ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2680
ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2681
ib->ptr[2] = 0xDEADBEEF;
2682
ib->ptr[3] = PACKET2(0);
2683
ib->ptr[4] = PACKET2(0);
2684
ib->ptr[5] = PACKET2(0);
2685
ib->ptr[6] = PACKET2(0);
2686
ib->ptr[7] = PACKET2(0);
2687
ib->ptr[8] = PACKET2(0);
2688
ib->ptr[9] = PACKET2(0);
2689
ib->ptr[10] = PACKET2(0);
2690
ib->ptr[11] = PACKET2(0);
2691
ib->ptr[12] = PACKET2(0);
2692
ib->ptr[13] = PACKET2(0);
2693
ib->ptr[14] = PACKET2(0);
2694
ib->ptr[15] = PACKET2(0);
2695
ib->length_dw = 16;
2696
r = radeon_ib_schedule(rdev, ib);
2697
if (r) {
2698
radeon_scratch_free(rdev, scratch);
2699
radeon_ib_free(rdev, &ib);
2700
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2701
return r;
2702
}
2703
r = radeon_fence_wait(ib->fence, false);
2704
if (r) {
2705
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2706
return r;
2707
}
2708
for (i = 0; i < rdev->usec_timeout; i++) {
2709
tmp = RREG32(scratch);
2710
if (tmp == 0xDEADBEEF)
2711
break;
2712
DRM_UDELAY(1);
2713
}
2714
if (i < rdev->usec_timeout) {
2715
DRM_INFO("ib test succeeded in %u usecs\n", i);
2716
} else {
2717
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2718
scratch, tmp);
2719
r = -EINVAL;
2720
}
2721
radeon_scratch_free(rdev, scratch);
2722
radeon_ib_free(rdev, &ib);
2723
return r;
2724
}
2725
2726
/*
2727
* Interrupts
2728
*
2729
* Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2730
* the same as the CP ring buffer, but in reverse. Rather than the CPU
2731
* writing to the ring and the GPU consuming, the GPU writes to the ring
2732
* and host consumes. As the host irq handler processes interrupts, it
2733
* increments the rptr. When the rptr catches up with the wptr, all the
2734
* current interrupts have been processed.
2735
*/
2736
2737
void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2738
{
2739
u32 rb_bufsz;
2740
2741
/* Align ring size */
2742
rb_bufsz = drm_order(ring_size / 4);
2743
ring_size = (1 << rb_bufsz) * 4;
2744
rdev->ih.ring_size = ring_size;
2745
rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2746
rdev->ih.rptr = 0;
2747
}
2748
2749
static int r600_ih_ring_alloc(struct radeon_device *rdev)
2750
{
2751
int r;
2752
2753
/* Allocate ring buffer */
2754
if (rdev->ih.ring_obj == NULL) {
2755
r = radeon_bo_create(rdev, rdev->ih.ring_size,
2756
PAGE_SIZE, true,
2757
RADEON_GEM_DOMAIN_GTT,
2758
&rdev->ih.ring_obj);
2759
if (r) {
2760
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2761
return r;
2762
}
2763
r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2764
if (unlikely(r != 0))
2765
return r;
2766
r = radeon_bo_pin(rdev->ih.ring_obj,
2767
RADEON_GEM_DOMAIN_GTT,
2768
&rdev->ih.gpu_addr);
2769
if (r) {
2770
radeon_bo_unreserve(rdev->ih.ring_obj);
2771
DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2772
return r;
2773
}
2774
r = radeon_bo_kmap(rdev->ih.ring_obj,
2775
(void **)&rdev->ih.ring);
2776
radeon_bo_unreserve(rdev->ih.ring_obj);
2777
if (r) {
2778
DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2779
return r;
2780
}
2781
}
2782
return 0;
2783
}
2784
2785
static void r600_ih_ring_fini(struct radeon_device *rdev)
2786
{
2787
int r;
2788
if (rdev->ih.ring_obj) {
2789
r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2790
if (likely(r == 0)) {
2791
radeon_bo_kunmap(rdev->ih.ring_obj);
2792
radeon_bo_unpin(rdev->ih.ring_obj);
2793
radeon_bo_unreserve(rdev->ih.ring_obj);
2794
}
2795
radeon_bo_unref(&rdev->ih.ring_obj);
2796
rdev->ih.ring = NULL;
2797
rdev->ih.ring_obj = NULL;
2798
}
2799
}
2800
2801
void r600_rlc_stop(struct radeon_device *rdev)
2802
{
2803
2804
if ((rdev->family >= CHIP_RV770) &&
2805
(rdev->family <= CHIP_RV740)) {
2806
/* r7xx asics need to soft reset RLC before halting */
2807
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2808
RREG32(SRBM_SOFT_RESET);
2809
udelay(15000);
2810
WREG32(SRBM_SOFT_RESET, 0);
2811
RREG32(SRBM_SOFT_RESET);
2812
}
2813
2814
WREG32(RLC_CNTL, 0);
2815
}
2816
2817
static void r600_rlc_start(struct radeon_device *rdev)
2818
{
2819
WREG32(RLC_CNTL, RLC_ENABLE);
2820
}
2821
2822
static int r600_rlc_init(struct radeon_device *rdev)
2823
{
2824
u32 i;
2825
const __be32 *fw_data;
2826
2827
if (!rdev->rlc_fw)
2828
return -EINVAL;
2829
2830
r600_rlc_stop(rdev);
2831
2832
WREG32(RLC_HB_BASE, 0);
2833
WREG32(RLC_HB_CNTL, 0);
2834
WREG32(RLC_HB_RPTR, 0);
2835
WREG32(RLC_HB_WPTR, 0);
2836
if (rdev->family <= CHIP_CAICOS) {
2837
WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2838
WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2839
}
2840
WREG32(RLC_MC_CNTL, 0);
2841
WREG32(RLC_UCODE_CNTL, 0);
2842
2843
fw_data = (const __be32 *)rdev->rlc_fw->data;
2844
if (rdev->family >= CHIP_CAYMAN) {
2845
for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
2846
WREG32(RLC_UCODE_ADDR, i);
2847
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2848
}
2849
} else if (rdev->family >= CHIP_CEDAR) {
2850
for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2851
WREG32(RLC_UCODE_ADDR, i);
2852
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2853
}
2854
} else if (rdev->family >= CHIP_RV770) {
2855
for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2856
WREG32(RLC_UCODE_ADDR, i);
2857
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2858
}
2859
} else {
2860
for (i = 0; i < RLC_UCODE_SIZE; i++) {
2861
WREG32(RLC_UCODE_ADDR, i);
2862
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2863
}
2864
}
2865
WREG32(RLC_UCODE_ADDR, 0);
2866
2867
r600_rlc_start(rdev);
2868
2869
return 0;
2870
}
2871
2872
static void r600_enable_interrupts(struct radeon_device *rdev)
2873
{
2874
u32 ih_cntl = RREG32(IH_CNTL);
2875
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2876
2877
ih_cntl |= ENABLE_INTR;
2878
ih_rb_cntl |= IH_RB_ENABLE;
2879
WREG32(IH_CNTL, ih_cntl);
2880
WREG32(IH_RB_CNTL, ih_rb_cntl);
2881
rdev->ih.enabled = true;
2882
}
2883
2884
void r600_disable_interrupts(struct radeon_device *rdev)
2885
{
2886
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2887
u32 ih_cntl = RREG32(IH_CNTL);
2888
2889
ih_rb_cntl &= ~IH_RB_ENABLE;
2890
ih_cntl &= ~ENABLE_INTR;
2891
WREG32(IH_RB_CNTL, ih_rb_cntl);
2892
WREG32(IH_CNTL, ih_cntl);
2893
/* set rptr, wptr to 0 */
2894
WREG32(IH_RB_RPTR, 0);
2895
WREG32(IH_RB_WPTR, 0);
2896
rdev->ih.enabled = false;
2897
rdev->ih.wptr = 0;
2898
rdev->ih.rptr = 0;
2899
}
2900
2901
static void r600_disable_interrupt_state(struct radeon_device *rdev)
2902
{
2903
u32 tmp;
2904
2905
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2906
WREG32(GRBM_INT_CNTL, 0);
2907
WREG32(DxMODE_INT_MASK, 0);
2908
WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
2909
WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
2910
if (ASIC_IS_DCE3(rdev)) {
2911
WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2912
WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2913
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2914
WREG32(DC_HPD1_INT_CONTROL, tmp);
2915
tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2916
WREG32(DC_HPD2_INT_CONTROL, tmp);
2917
tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2918
WREG32(DC_HPD3_INT_CONTROL, tmp);
2919
tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2920
WREG32(DC_HPD4_INT_CONTROL, tmp);
2921
if (ASIC_IS_DCE32(rdev)) {
2922
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2923
WREG32(DC_HPD5_INT_CONTROL, tmp);
2924
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2925
WREG32(DC_HPD6_INT_CONTROL, tmp);
2926
}
2927
} else {
2928
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2929
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2930
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2931
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2932
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2933
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2934
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2935
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2936
}
2937
}
2938
2939
int r600_irq_init(struct radeon_device *rdev)
2940
{
2941
int ret = 0;
2942
int rb_bufsz;
2943
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2944
2945
/* allocate ring */
2946
ret = r600_ih_ring_alloc(rdev);
2947
if (ret)
2948
return ret;
2949
2950
/* disable irqs */
2951
r600_disable_interrupts(rdev);
2952
2953
/* init rlc */
2954
ret = r600_rlc_init(rdev);
2955
if (ret) {
2956
r600_ih_ring_fini(rdev);
2957
return ret;
2958
}
2959
2960
/* setup interrupt control */
2961
/* set dummy read address to ring address */
2962
WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2963
interrupt_cntl = RREG32(INTERRUPT_CNTL);
2964
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2965
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2966
*/
2967
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2968
/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2969
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2970
WREG32(INTERRUPT_CNTL, interrupt_cntl);
2971
2972
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2973
rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2974
2975
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2976
IH_WPTR_OVERFLOW_CLEAR |
2977
(rb_bufsz << 1));
2978
2979
if (rdev->wb.enabled)
2980
ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
2981
2982
/* set the writeback address whether it's enabled or not */
2983
WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
2984
WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
2985
2986
WREG32(IH_RB_CNTL, ih_rb_cntl);
2987
2988
/* set rptr, wptr to 0 */
2989
WREG32(IH_RB_RPTR, 0);
2990
WREG32(IH_RB_WPTR, 0);
2991
2992
/* Default settings for IH_CNTL (disabled at first) */
2993
ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2994
/* RPTR_REARM only works if msi's are enabled */
2995
if (rdev->msi_enabled)
2996
ih_cntl |= RPTR_REARM;
2997
2998
#ifdef __BIG_ENDIAN
2999
ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
3000
#endif
3001
WREG32(IH_CNTL, ih_cntl);
3002
3003
/* force the active interrupt state to all disabled */
3004
if (rdev->family >= CHIP_CEDAR)
3005
evergreen_disable_interrupt_state(rdev);
3006
else
3007
r600_disable_interrupt_state(rdev);
3008
3009
/* enable irqs */
3010
r600_enable_interrupts(rdev);
3011
3012
return ret;
3013
}
3014
3015
void r600_irq_suspend(struct radeon_device *rdev)
3016
{
3017
r600_irq_disable(rdev);
3018
r600_rlc_stop(rdev);
3019
}
3020
3021
void r600_irq_fini(struct radeon_device *rdev)
3022
{
3023
r600_irq_suspend(rdev);
3024
r600_ih_ring_fini(rdev);
3025
}
3026
3027
int r600_irq_set(struct radeon_device *rdev)
3028
{
3029
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3030
u32 mode_int = 0;
3031
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3032
u32 grbm_int_cntl = 0;
3033
u32 hdmi1, hdmi2;
3034
u32 d1grph = 0, d2grph = 0;
3035
3036
if (!rdev->irq.installed) {
3037
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3038
return -EINVAL;
3039
}
3040
/* don't enable anything if the ih is disabled */
3041
if (!rdev->ih.enabled) {
3042
r600_disable_interrupts(rdev);
3043
/* force the active interrupt state to all disabled */
3044
r600_disable_interrupt_state(rdev);
3045
return 0;
3046
}
3047
3048
hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3049
if (ASIC_IS_DCE3(rdev)) {
3050
hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3051
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3052
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3053
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3054
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3055
if (ASIC_IS_DCE32(rdev)) {
3056
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3057
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3058
}
3059
} else {
3060
hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3061
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3062
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3063
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3064
}
3065
3066
if (rdev->irq.sw_int) {
3067
DRM_DEBUG("r600_irq_set: sw int\n");
3068
cp_int_cntl |= RB_INT_ENABLE;
3069
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3070
}
3071
if (rdev->irq.crtc_vblank_int[0] ||
3072
rdev->irq.pflip[0]) {
3073
DRM_DEBUG("r600_irq_set: vblank 0\n");
3074
mode_int |= D1MODE_VBLANK_INT_MASK;
3075
}
3076
if (rdev->irq.crtc_vblank_int[1] ||
3077
rdev->irq.pflip[1]) {
3078
DRM_DEBUG("r600_irq_set: vblank 1\n");
3079
mode_int |= D2MODE_VBLANK_INT_MASK;
3080
}
3081
if (rdev->irq.hpd[0]) {
3082
DRM_DEBUG("r600_irq_set: hpd 1\n");
3083
hpd1 |= DC_HPDx_INT_EN;
3084
}
3085
if (rdev->irq.hpd[1]) {
3086
DRM_DEBUG("r600_irq_set: hpd 2\n");
3087
hpd2 |= DC_HPDx_INT_EN;
3088
}
3089
if (rdev->irq.hpd[2]) {
3090
DRM_DEBUG("r600_irq_set: hpd 3\n");
3091
hpd3 |= DC_HPDx_INT_EN;
3092
}
3093
if (rdev->irq.hpd[3]) {
3094
DRM_DEBUG("r600_irq_set: hpd 4\n");
3095
hpd4 |= DC_HPDx_INT_EN;
3096
}
3097
if (rdev->irq.hpd[4]) {
3098
DRM_DEBUG("r600_irq_set: hpd 5\n");
3099
hpd5 |= DC_HPDx_INT_EN;
3100
}
3101
if (rdev->irq.hpd[5]) {
3102
DRM_DEBUG("r600_irq_set: hpd 6\n");
3103
hpd6 |= DC_HPDx_INT_EN;
3104
}
3105
if (rdev->irq.hdmi[0]) {
3106
DRM_DEBUG("r600_irq_set: hdmi 1\n");
3107
hdmi1 |= R600_HDMI_INT_EN;
3108
}
3109
if (rdev->irq.hdmi[1]) {
3110
DRM_DEBUG("r600_irq_set: hdmi 2\n");
3111
hdmi2 |= R600_HDMI_INT_EN;
3112
}
3113
if (rdev->irq.gui_idle) {
3114
DRM_DEBUG("gui idle\n");
3115
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3116
}
3117
3118
WREG32(CP_INT_CNTL, cp_int_cntl);
3119
WREG32(DxMODE_INT_MASK, mode_int);
3120
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3121
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
3122
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3123
WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3124
if (ASIC_IS_DCE3(rdev)) {
3125
WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
3126
WREG32(DC_HPD1_INT_CONTROL, hpd1);
3127
WREG32(DC_HPD2_INT_CONTROL, hpd2);
3128
WREG32(DC_HPD3_INT_CONTROL, hpd3);
3129
WREG32(DC_HPD4_INT_CONTROL, hpd4);
3130
if (ASIC_IS_DCE32(rdev)) {
3131
WREG32(DC_HPD5_INT_CONTROL, hpd5);
3132
WREG32(DC_HPD6_INT_CONTROL, hpd6);
3133
}
3134
} else {
3135
WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
3136
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3137
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3138
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3139
}
3140
3141
return 0;
3142
}
3143
3144
static inline void r600_irq_ack(struct radeon_device *rdev)
3145
{
3146
u32 tmp;
3147
3148
if (ASIC_IS_DCE3(rdev)) {
3149
rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3150
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3151
rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3152
} else {
3153
rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
3154
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3155
rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
3156
}
3157
rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
3158
rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
3159
3160
if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3161
WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3162
if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
3163
WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
3164
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
3165
WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3166
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
3167
WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3168
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
3169
WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3170
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
3171
WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3172
if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3173
if (ASIC_IS_DCE3(rdev)) {
3174
tmp = RREG32(DC_HPD1_INT_CONTROL);
3175
tmp |= DC_HPDx_INT_ACK;
3176
WREG32(DC_HPD1_INT_CONTROL, tmp);
3177
} else {
3178
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3179
tmp |= DC_HPDx_INT_ACK;
3180
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3181
}
3182
}
3183
if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3184
if (ASIC_IS_DCE3(rdev)) {
3185
tmp = RREG32(DC_HPD2_INT_CONTROL);
3186
tmp |= DC_HPDx_INT_ACK;
3187
WREG32(DC_HPD2_INT_CONTROL, tmp);
3188
} else {
3189
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3190
tmp |= DC_HPDx_INT_ACK;
3191
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3192
}
3193
}
3194
if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3195
if (ASIC_IS_DCE3(rdev)) {
3196
tmp = RREG32(DC_HPD3_INT_CONTROL);
3197
tmp |= DC_HPDx_INT_ACK;
3198
WREG32(DC_HPD3_INT_CONTROL, tmp);
3199
} else {
3200
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3201
tmp |= DC_HPDx_INT_ACK;
3202
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3203
}
3204
}
3205
if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3206
tmp = RREG32(DC_HPD4_INT_CONTROL);
3207
tmp |= DC_HPDx_INT_ACK;
3208
WREG32(DC_HPD4_INT_CONTROL, tmp);
3209
}
3210
if (ASIC_IS_DCE32(rdev)) {
3211
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3212
tmp = RREG32(DC_HPD5_INT_CONTROL);
3213
tmp |= DC_HPDx_INT_ACK;
3214
WREG32(DC_HPD5_INT_CONTROL, tmp);
3215
}
3216
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3217
tmp = RREG32(DC_HPD5_INT_CONTROL);
3218
tmp |= DC_HPDx_INT_ACK;
3219
WREG32(DC_HPD6_INT_CONTROL, tmp);
3220
}
3221
}
3222
if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3223
WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3224
}
3225
if (ASIC_IS_DCE3(rdev)) {
3226
if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3227
WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3228
}
3229
} else {
3230
if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3231
WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3232
}
3233
}
3234
}
3235
3236
void r600_irq_disable(struct radeon_device *rdev)
3237
{
3238
r600_disable_interrupts(rdev);
3239
/* Wait and acknowledge irq */
3240
mdelay(1);
3241
r600_irq_ack(rdev);
3242
r600_disable_interrupt_state(rdev);
3243
}
3244
3245
static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3246
{
3247
u32 wptr, tmp;
3248
3249
if (rdev->wb.enabled)
3250
wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
3251
else
3252
wptr = RREG32(IH_RB_WPTR);
3253
3254
if (wptr & RB_OVERFLOW) {
3255
/* When a ring buffer overflow happen start parsing interrupt
3256
* from the last not overwritten vector (wptr + 16). Hopefully
3257
* this should allow us to catchup.
3258
*/
3259
dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3260
wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3261
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3262
tmp = RREG32(IH_RB_CNTL);
3263
tmp |= IH_WPTR_OVERFLOW_CLEAR;
3264
WREG32(IH_RB_CNTL, tmp);
3265
}
3266
return (wptr & rdev->ih.ptr_mask);
3267
}
3268
3269
/* r600 IV Ring
3270
* Each IV ring entry is 128 bits:
3271
* [7:0] - interrupt source id
3272
* [31:8] - reserved
3273
* [59:32] - interrupt source data
3274
* [127:60] - reserved
3275
*
3276
* The basic interrupt vector entries
3277
* are decoded as follows:
3278
* src_id src_data description
3279
* 1 0 D1 Vblank
3280
* 1 1 D1 Vline
3281
* 5 0 D2 Vblank
3282
* 5 1 D2 Vline
3283
* 19 0 FP Hot plug detection A
3284
* 19 1 FP Hot plug detection B
3285
* 19 2 DAC A auto-detection
3286
* 19 3 DAC B auto-detection
3287
* 21 4 HDMI block A
3288
* 21 5 HDMI block B
3289
* 176 - CP_INT RB
3290
* 177 - CP_INT IB1
3291
* 178 - CP_INT IB2
3292
* 181 - EOP Interrupt
3293
* 233 - GUI Idle
3294
*
3295
* Note, these are based on r600 and may need to be
3296
* adjusted or added to on newer asics
3297
*/
3298
3299
int r600_irq_process(struct radeon_device *rdev)
3300
{
3301
u32 wptr;
3302
u32 rptr;
3303
u32 src_id, src_data;
3304
u32 ring_index;
3305
unsigned long flags;
3306
bool queue_hotplug = false;
3307
3308
if (!rdev->ih.enabled || rdev->shutdown)
3309
return IRQ_NONE;
3310
3311
wptr = r600_get_ih_wptr(rdev);
3312
rptr = rdev->ih.rptr;
3313
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3314
3315
spin_lock_irqsave(&rdev->ih.lock, flags);
3316
3317
if (rptr == wptr) {
3318
spin_unlock_irqrestore(&rdev->ih.lock, flags);
3319
return IRQ_NONE;
3320
}
3321
3322
restart_ih:
3323
/* display interrupts */
3324
r600_irq_ack(rdev);
3325
3326
rdev->ih.wptr = wptr;
3327
while (rptr != wptr) {
3328
/* wptr/rptr are in bytes! */
3329
ring_index = rptr / 4;
3330
src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3331
src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3332
3333
switch (src_id) {
3334
case 1: /* D1 vblank/vline */
3335
switch (src_data) {
3336
case 0: /* D1 vblank */
3337
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
3338
if (rdev->irq.crtc_vblank_int[0]) {
3339
drm_handle_vblank(rdev->ddev, 0);
3340
rdev->pm.vblank_sync = true;
3341
wake_up(&rdev->irq.vblank_queue);
3342
}
3343
if (rdev->irq.pflip[0])
3344
radeon_crtc_handle_flip(rdev, 0);
3345
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3346
DRM_DEBUG("IH: D1 vblank\n");
3347
}
3348
break;
3349
case 1: /* D1 vline */
3350
if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
3351
rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3352
DRM_DEBUG("IH: D1 vline\n");
3353
}
3354
break;
3355
default:
3356
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3357
break;
3358
}
3359
break;
3360
case 5: /* D2 vblank/vline */
3361
switch (src_data) {
3362
case 0: /* D2 vblank */
3363
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
3364
if (rdev->irq.crtc_vblank_int[1]) {
3365
drm_handle_vblank(rdev->ddev, 1);
3366
rdev->pm.vblank_sync = true;
3367
wake_up(&rdev->irq.vblank_queue);
3368
}
3369
if (rdev->irq.pflip[1])
3370
radeon_crtc_handle_flip(rdev, 1);
3371
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3372
DRM_DEBUG("IH: D2 vblank\n");
3373
}
3374
break;
3375
case 1: /* D1 vline */
3376
if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
3377
rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3378
DRM_DEBUG("IH: D2 vline\n");
3379
}
3380
break;
3381
default:
3382
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3383
break;
3384
}
3385
break;
3386
case 19: /* HPD/DAC hotplug */
3387
switch (src_data) {
3388
case 0:
3389
if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
3390
rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3391
queue_hotplug = true;
3392
DRM_DEBUG("IH: HPD1\n");
3393
}
3394
break;
3395
case 1:
3396
if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
3397
rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3398
queue_hotplug = true;
3399
DRM_DEBUG("IH: HPD2\n");
3400
}
3401
break;
3402
case 4:
3403
if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
3404
rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3405
queue_hotplug = true;
3406
DRM_DEBUG("IH: HPD3\n");
3407
}
3408
break;
3409
case 5:
3410
if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
3411
rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3412
queue_hotplug = true;
3413
DRM_DEBUG("IH: HPD4\n");
3414
}
3415
break;
3416
case 10:
3417
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
3418
rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3419
queue_hotplug = true;
3420
DRM_DEBUG("IH: HPD5\n");
3421
}
3422
break;
3423
case 12:
3424
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
3425
rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3426
queue_hotplug = true;
3427
DRM_DEBUG("IH: HPD6\n");
3428
}
3429
break;
3430
default:
3431
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3432
break;
3433
}
3434
break;
3435
case 21: /* HDMI */
3436
DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3437
r600_audio_schedule_polling(rdev);
3438
break;
3439
case 176: /* CP_INT in ring buffer */
3440
case 177: /* CP_INT in IB1 */
3441
case 178: /* CP_INT in IB2 */
3442
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3443
radeon_fence_process(rdev);
3444
break;
3445
case 181: /* CP EOP event */
3446
DRM_DEBUG("IH: CP EOP\n");
3447
radeon_fence_process(rdev);
3448
break;
3449
case 233: /* GUI IDLE */
3450
DRM_DEBUG("IH: GUI idle\n");
3451
rdev->pm.gui_idle = true;
3452
wake_up(&rdev->irq.idle_queue);
3453
break;
3454
default:
3455
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3456
break;
3457
}
3458
3459
/* wptr/rptr are in bytes! */
3460
rptr += 16;
3461
rptr &= rdev->ih.ptr_mask;
3462
}
3463
/* make sure wptr hasn't changed while processing */
3464
wptr = r600_get_ih_wptr(rdev);
3465
if (wptr != rdev->ih.wptr)
3466
goto restart_ih;
3467
if (queue_hotplug)
3468
schedule_work(&rdev->hotplug_work);
3469
rdev->ih.rptr = rptr;
3470
WREG32(IH_RB_RPTR, rdev->ih.rptr);
3471
spin_unlock_irqrestore(&rdev->ih.lock, flags);
3472
return IRQ_HANDLED;
3473
}
3474
3475
/*
3476
* Debugfs info
3477
*/
3478
#if defined(CONFIG_DEBUG_FS)
3479
3480
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3481
{
3482
struct drm_info_node *node = (struct drm_info_node *) m->private;
3483
struct drm_device *dev = node->minor->dev;
3484
struct radeon_device *rdev = dev->dev_private;
3485
unsigned count, i, j;
3486
3487
radeon_ring_free_size(rdev);
3488
count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3489
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3490
seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3491
seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3492
seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3493
seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3494
seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3495
seq_printf(m, "%u dwords in ring\n", count);
3496
i = rdev->cp.rptr;
3497
for (j = 0; j <= count; j++) {
3498
seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3499
i = (i + 1) & rdev->cp.ptr_mask;
3500
}
3501
return 0;
3502
}
3503
3504
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3505
{
3506
struct drm_info_node *node = (struct drm_info_node *) m->private;
3507
struct drm_device *dev = node->minor->dev;
3508
struct radeon_device *rdev = dev->dev_private;
3509
3510
DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3511
DREG32_SYS(m, rdev, VM_L2_STATUS);
3512
return 0;
3513
}
3514
3515
static struct drm_info_list r600_mc_info_list[] = {
3516
{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3517
{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3518
};
3519
#endif
3520
3521
int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3522
{
3523
#if defined(CONFIG_DEBUG_FS)
3524
return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3525
#else
3526
return 0;
3527
#endif
3528
}
3529
3530
/**
3531
* r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3532
* rdev: radeon device structure
3533
* bo: buffer object struct which userspace is waiting for idle
3534
*
3535
* Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3536
* through ring buffer, this leads to corruption in rendering, see
3537
* http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3538
* directly perform HDP flush by writing register through MMIO.
3539
*/
3540
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3541
{
3542
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3543
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3544
* This seems to cause problems on some AGP cards. Just use the old
3545
* method for them.
3546
*/
3547
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3548
rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3549
void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3550
u32 tmp;
3551
3552
WREG32(HDP_DEBUG1, 0);
3553
tmp = readl((void __iomem *)ptr);
3554
} else
3555
WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
3556
}
3557
3558
void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
3559
{
3560
u32 link_width_cntl, mask, target_reg;
3561
3562
if (rdev->flags & RADEON_IS_IGP)
3563
return;
3564
3565
if (!(rdev->flags & RADEON_IS_PCIE))
3566
return;
3567
3568
/* x2 cards have a special sequence */
3569
if (ASIC_IS_X2(rdev))
3570
return;
3571
3572
/* FIXME wait for idle */
3573
3574
switch (lanes) {
3575
case 0:
3576
mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
3577
break;
3578
case 1:
3579
mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
3580
break;
3581
case 2:
3582
mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
3583
break;
3584
case 4:
3585
mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
3586
break;
3587
case 8:
3588
mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
3589
break;
3590
case 12:
3591
mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
3592
break;
3593
case 16:
3594
default:
3595
mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
3596
break;
3597
}
3598
3599
link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3600
3601
if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
3602
(mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
3603
return;
3604
3605
if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
3606
return;
3607
3608
link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
3609
RADEON_PCIE_LC_RECONFIG_NOW |
3610
R600_PCIE_LC_RENEGOTIATE_EN |
3611
R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
3612
link_width_cntl |= mask;
3613
3614
WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3615
3616
/* some northbridges can renegotiate the link rather than requiring
3617
* a complete re-config.
3618
* e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)
3619
*/
3620
if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
3621
link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
3622
else
3623
link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
3624
3625
WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
3626
RADEON_PCIE_LC_RECONFIG_NOW));
3627
3628
if (rdev->family >= CHIP_RV770)
3629
target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
3630
else
3631
target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
3632
3633
/* wait for lane set to complete */
3634
link_width_cntl = RREG32(target_reg);
3635
while (link_width_cntl == 0xffffffff)
3636
link_width_cntl = RREG32(target_reg);
3637
3638
}
3639
3640
int r600_get_pcie_lanes(struct radeon_device *rdev)
3641
{
3642
u32 link_width_cntl;
3643
3644
if (rdev->flags & RADEON_IS_IGP)
3645
return 0;
3646
3647
if (!(rdev->flags & RADEON_IS_PCIE))
3648
return 0;
3649
3650
/* x2 cards have a special sequence */
3651
if (ASIC_IS_X2(rdev))
3652
return 0;
3653
3654
/* FIXME wait for idle */
3655
3656
link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
3657
3658
switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
3659
case RADEON_PCIE_LC_LINK_WIDTH_X0:
3660
return 0;
3661
case RADEON_PCIE_LC_LINK_WIDTH_X1:
3662
return 1;
3663
case RADEON_PCIE_LC_LINK_WIDTH_X2:
3664
return 2;
3665
case RADEON_PCIE_LC_LINK_WIDTH_X4:
3666
return 4;
3667
case RADEON_PCIE_LC_LINK_WIDTH_X8:
3668
return 8;
3669
case RADEON_PCIE_LC_LINK_WIDTH_X16:
3670
default:
3671
return 16;
3672
}
3673
}
3674
3675
static void r600_pcie_gen2_enable(struct radeon_device *rdev)
3676
{
3677
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
3678
u16 link_cntl2;
3679
3680
if (radeon_pcie_gen2 == 0)
3681
return;
3682
3683
if (rdev->flags & RADEON_IS_IGP)
3684
return;
3685
3686
if (!(rdev->flags & RADEON_IS_PCIE))
3687
return;
3688
3689
/* x2 cards have a special sequence */
3690
if (ASIC_IS_X2(rdev))
3691
return;
3692
3693
/* only RV6xx+ chips are supported */
3694
if (rdev->family <= CHIP_R600)
3695
return;
3696
3697
/* 55 nm r6xx asics */
3698
if ((rdev->family == CHIP_RV670) ||
3699
(rdev->family == CHIP_RV620) ||
3700
(rdev->family == CHIP_RV635)) {
3701
/* advertise upconfig capability */
3702
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3703
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3704
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3705
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3706
if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
3707
lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
3708
link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
3709
LC_RECONFIG_ARC_MISSING_ESCAPE);
3710
link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
3711
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3712
} else {
3713
link_width_cntl |= LC_UPCONFIGURE_DIS;
3714
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3715
}
3716
}
3717
3718
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3719
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
3720
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
3721
3722
/* 55 nm r6xx asics */
3723
if ((rdev->family == CHIP_RV670) ||
3724
(rdev->family == CHIP_RV620) ||
3725
(rdev->family == CHIP_RV635)) {
3726
WREG32(MM_CFGREGS_CNTL, 0x8);
3727
link_cntl2 = RREG32(0x4088);
3728
WREG32(MM_CFGREGS_CNTL, 0);
3729
/* not supported yet */
3730
if (link_cntl2 & SELECTABLE_DEEMPHASIS)
3731
return;
3732
}
3733
3734
speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
3735
speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
3736
speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
3737
speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
3738
speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
3739
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3740
3741
tmp = RREG32(0x541c);
3742
WREG32(0x541c, tmp | 0x8);
3743
WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
3744
link_cntl2 = RREG16(0x4088);
3745
link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
3746
link_cntl2 |= 0x2;
3747
WREG16(0x4088, link_cntl2);
3748
WREG32(MM_CFGREGS_CNTL, 0);
3749
3750
if ((rdev->family == CHIP_RV670) ||
3751
(rdev->family == CHIP_RV620) ||
3752
(rdev->family == CHIP_RV635)) {
3753
training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
3754
training_cntl &= ~LC_POINT_7_PLUS_EN;
3755
WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
3756
} else {
3757
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3758
speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
3759
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3760
}
3761
3762
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
3763
speed_cntl |= LC_GEN2_EN_STRAP;
3764
WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
3765
3766
} else {
3767
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
3768
/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
3769
if (1)
3770
link_width_cntl |= LC_UPCONFIGURE_DIS;
3771
else
3772
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
3773
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
3774
}
3775
}
3776
3777