2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
33 #include "radeon_drm.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device *rdev);
91 void r600_gpu_init(struct radeon_device *rdev);
92 void r600_fini(struct radeon_device *rdev);
93 void r600_irq_disable(struct radeon_device *rdev);
95 /* get temperature in millidegrees */
96 u32 rv6xx_get_temp(struct radeon_device *rdev)
98 u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
104 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
108 rdev->pm.dynpm_can_upclock = true;
109 rdev->pm.dynpm_can_downclock = true;
111 /* power state array is low to high, default is first */
112 if ((rdev->flags & RADEON_IS_IGP) || (rdev->family == CHIP_R600)) {
113 int min_power_state_index = 0;
115 if (rdev->pm.num_power_states > 2)
116 min_power_state_index = 1;
118 switch (rdev->pm.dynpm_planned_action) {
119 case DYNPM_ACTION_MINIMUM:
120 rdev->pm.requested_power_state_index = min_power_state_index;
121 rdev->pm.requested_clock_mode_index = 0;
122 rdev->pm.dynpm_can_downclock = false;
124 case DYNPM_ACTION_DOWNCLOCK:
125 if (rdev->pm.current_power_state_index == min_power_state_index) {
126 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
127 rdev->pm.dynpm_can_downclock = false;
129 if (rdev->pm.active_crtc_count > 1) {
130 for (i = 0; i < rdev->pm.num_power_states; i++) {
131 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
133 else if (i >= rdev->pm.current_power_state_index) {
134 rdev->pm.requested_power_state_index =
135 rdev->pm.current_power_state_index;
138 rdev->pm.requested_power_state_index = i;
143 if (rdev->pm.current_power_state_index == 0)
144 rdev->pm.requested_power_state_index =
145 rdev->pm.num_power_states - 1;
147 rdev->pm.requested_power_state_index =
148 rdev->pm.current_power_state_index - 1;
151 rdev->pm.requested_clock_mode_index = 0;
152 /* don't use the power state if crtcs are active and no display flag is set */
153 if ((rdev->pm.active_crtc_count > 0) &&
154 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
155 clock_info[rdev->pm.requested_clock_mode_index].flags &
156 RADEON_PM_MODE_NO_DISPLAY)) {
157 rdev->pm.requested_power_state_index++;
160 case DYNPM_ACTION_UPCLOCK:
161 if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
162 rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
163 rdev->pm.dynpm_can_upclock = false;
165 if (rdev->pm.active_crtc_count > 1) {
166 for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
167 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
169 else if (i <= rdev->pm.current_power_state_index) {
170 rdev->pm.requested_power_state_index =
171 rdev->pm.current_power_state_index;
174 rdev->pm.requested_power_state_index = i;
179 rdev->pm.requested_power_state_index =
180 rdev->pm.current_power_state_index + 1;
182 rdev->pm.requested_clock_mode_index = 0;
184 case DYNPM_ACTION_DEFAULT:
185 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
186 rdev->pm.requested_clock_mode_index = 0;
187 rdev->pm.dynpm_can_upclock = false;
189 case DYNPM_ACTION_NONE:
191 DRM_ERROR("Requested mode for not defined action\n");
195 /* XXX select a power state based on AC/DC, single/dualhead, etc. */
196 /* for now just select the first power state and switch between clock modes */
197 /* power state array is low to high, default is first (0) */
198 if (rdev->pm.active_crtc_count > 1) {
199 rdev->pm.requested_power_state_index = -1;
200 /* start at 1 as we don't want the default mode */
201 for (i = 1; i < rdev->pm.num_power_states; i++) {
202 if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
204 else if ((rdev->pm.power_state[i].type == POWER_STATE_TYPE_PERFORMANCE) ||
205 (rdev->pm.power_state[i].type == POWER_STATE_TYPE_BATTERY)) {
206 rdev->pm.requested_power_state_index = i;
210 /* if nothing selected, grab the default state. */
211 if (rdev->pm.requested_power_state_index == -1)
212 rdev->pm.requested_power_state_index = 0;
214 rdev->pm.requested_power_state_index = 1;
216 switch (rdev->pm.dynpm_planned_action) {
217 case DYNPM_ACTION_MINIMUM:
218 rdev->pm.requested_clock_mode_index = 0;
219 rdev->pm.dynpm_can_downclock = false;
221 case DYNPM_ACTION_DOWNCLOCK:
222 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
223 if (rdev->pm.current_clock_mode_index == 0) {
224 rdev->pm.requested_clock_mode_index = 0;
225 rdev->pm.dynpm_can_downclock = false;
227 rdev->pm.requested_clock_mode_index =
228 rdev->pm.current_clock_mode_index - 1;
230 rdev->pm.requested_clock_mode_index = 0;
231 rdev->pm.dynpm_can_downclock = false;
233 /* don't use the power state if crtcs are active and no display flag is set */
234 if ((rdev->pm.active_crtc_count > 0) &&
235 (rdev->pm.power_state[rdev->pm.requested_power_state_index].
236 clock_info[rdev->pm.requested_clock_mode_index].flags &
237 RADEON_PM_MODE_NO_DISPLAY)) {
238 rdev->pm.requested_clock_mode_index++;
241 case DYNPM_ACTION_UPCLOCK:
242 if (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index) {
243 if (rdev->pm.current_clock_mode_index ==
244 (rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1)) {
245 rdev->pm.requested_clock_mode_index = rdev->pm.current_clock_mode_index;
246 rdev->pm.dynpm_can_upclock = false;
248 rdev->pm.requested_clock_mode_index =
249 rdev->pm.current_clock_mode_index + 1;
251 rdev->pm.requested_clock_mode_index =
252 rdev->pm.power_state[rdev->pm.requested_power_state_index].num_clock_modes - 1;
253 rdev->pm.dynpm_can_upclock = false;
256 case DYNPM_ACTION_DEFAULT:
257 rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
258 rdev->pm.requested_clock_mode_index = 0;
259 rdev->pm.dynpm_can_upclock = false;
261 case DYNPM_ACTION_NONE:
263 DRM_ERROR("Requested mode for not defined action\n");
268 DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
269 rdev->pm.power_state[rdev->pm.requested_power_state_index].
270 clock_info[rdev->pm.requested_clock_mode_index].sclk,
271 rdev->pm.power_state[rdev->pm.requested_power_state_index].
272 clock_info[rdev->pm.requested_clock_mode_index].mclk,
273 rdev->pm.power_state[rdev->pm.requested_power_state_index].
277 static int r600_pm_get_type_index(struct radeon_device *rdev,
278 enum radeon_pm_state_type ps_type,
282 int found_instance = -1;
284 for (i = 0; i < rdev->pm.num_power_states; i++) {
285 if (rdev->pm.power_state[i].type == ps_type) {
287 if (found_instance == instance)
291 /* return default if no match */
292 return rdev->pm.default_power_state_index;
295 void rs780_pm_init_profile(struct radeon_device *rdev)
297 if (rdev->pm.num_power_states == 2) {
299 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
300 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
301 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
302 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
304 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
305 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
306 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
307 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
309 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
310 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
311 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
312 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
314 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
315 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
316 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
317 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
319 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
320 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
321 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
322 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
324 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
325 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
326 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
327 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
329 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
330 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 1;
331 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
332 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
333 } else if (rdev->pm.num_power_states == 3) {
335 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
336 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
337 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
338 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
340 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
341 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
342 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
343 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
345 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
346 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
347 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
348 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
350 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
351 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 2;
352 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
353 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
355 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 1;
356 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 1;
357 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
358 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
360 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 1;
361 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 1;
362 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
363 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
365 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 1;
366 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
367 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
368 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
371 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
372 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
373 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
374 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
376 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 2;
377 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 2;
378 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
379 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
381 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 2;
382 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 2;
383 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
384 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
386 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 2;
387 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 3;
388 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
389 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
391 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
392 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 0;
393 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
394 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
396 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
397 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 0;
398 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
399 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
401 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
402 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 3;
403 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
404 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
408 void r600_pm_init_profile(struct radeon_device *rdev)
410 if (rdev->family == CHIP_R600) {
413 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
414 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
415 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
416 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
418 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
419 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
420 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
421 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
423 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
424 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
425 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
426 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
428 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
429 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
430 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
431 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
433 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
434 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
435 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
436 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
438 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
439 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
440 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
441 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
443 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
444 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
445 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
446 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
448 if (rdev->pm.num_power_states < 4) {
450 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
451 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
452 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
453 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
455 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 1;
456 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 1;
457 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
458 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
460 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 1;
461 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 1;
462 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
463 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
465 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 1;
466 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 1;
467 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
468 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
470 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 2;
471 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 2;
472 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
473 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
475 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 2;
476 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 2;
477 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
478 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
480 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 2;
481 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 2;
482 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
483 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
488 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
489 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
491 if (rdev->flags & RADEON_IS_MOBILITY) {
492 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
493 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
495 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
497 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
499 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
500 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
501 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
502 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
503 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
504 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
507 if (rdev->flags & RADEON_IS_MOBILITY) {
508 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
509 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
510 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
511 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
512 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
515 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
516 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
517 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
518 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
519 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
520 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
523 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx =
524 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
525 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx =
526 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
527 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
528 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
530 if (rdev->flags & RADEON_IS_MOBILITY) {
531 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
532 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
533 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
534 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
535 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
536 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
538 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
539 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
540 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
541 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
542 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
543 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
546 if (rdev->flags & RADEON_IS_MOBILITY) {
547 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
548 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
549 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
550 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
551 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
554 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
555 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
556 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
557 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
558 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
559 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
562 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx =
563 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
564 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx =
565 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
566 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
567 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
572 void r600_pm_misc(struct radeon_device *rdev)
574 int req_ps_idx = rdev->pm.requested_power_state_index;
575 int req_cm_idx = rdev->pm.requested_clock_mode_index;
576 struct radeon_power_state *ps = &rdev->pm.power_state[req_ps_idx];
577 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
579 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
580 if (voltage->voltage != rdev->pm.current_vddc) {
581 radeon_atom_set_voltage(rdev, voltage->voltage);
582 rdev->pm.current_vddc = voltage->voltage;
583 DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
588 bool r600_gui_idle(struct radeon_device *rdev)
590 if (RREG32(GRBM_STATUS) & GUI_ACTIVE)
596 /* hpd for digital panel detect/disconnect */
597 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
599 bool connected = false;
601 if (ASIC_IS_DCE3(rdev)) {
604 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
608 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
612 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
616 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
621 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
625 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
634 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
638 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
642 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
652 void r600_hpd_set_polarity(struct radeon_device *rdev,
653 enum radeon_hpd_id hpd)
656 bool connected = r600_hpd_sense(rdev, hpd);
658 if (ASIC_IS_DCE3(rdev)) {
661 tmp = RREG32(DC_HPD1_INT_CONTROL);
663 tmp &= ~DC_HPDx_INT_POLARITY;
665 tmp |= DC_HPDx_INT_POLARITY;
666 WREG32(DC_HPD1_INT_CONTROL, tmp);
669 tmp = RREG32(DC_HPD2_INT_CONTROL);
671 tmp &= ~DC_HPDx_INT_POLARITY;
673 tmp |= DC_HPDx_INT_POLARITY;
674 WREG32(DC_HPD2_INT_CONTROL, tmp);
677 tmp = RREG32(DC_HPD3_INT_CONTROL);
679 tmp &= ~DC_HPDx_INT_POLARITY;
681 tmp |= DC_HPDx_INT_POLARITY;
682 WREG32(DC_HPD3_INT_CONTROL, tmp);
685 tmp = RREG32(DC_HPD4_INT_CONTROL);
687 tmp &= ~DC_HPDx_INT_POLARITY;
689 tmp |= DC_HPDx_INT_POLARITY;
690 WREG32(DC_HPD4_INT_CONTROL, tmp);
693 tmp = RREG32(DC_HPD5_INT_CONTROL);
695 tmp &= ~DC_HPDx_INT_POLARITY;
697 tmp |= DC_HPDx_INT_POLARITY;
698 WREG32(DC_HPD5_INT_CONTROL, tmp);
702 tmp = RREG32(DC_HPD6_INT_CONTROL);
704 tmp &= ~DC_HPDx_INT_POLARITY;
706 tmp |= DC_HPDx_INT_POLARITY;
707 WREG32(DC_HPD6_INT_CONTROL, tmp);
715 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
717 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
719 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
720 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
723 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
725 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
727 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
728 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
731 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
733 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
735 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
736 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
744 void r600_hpd_init(struct radeon_device *rdev)
746 struct drm_device *dev = rdev->ddev;
747 struct drm_connector *connector;
749 if (ASIC_IS_DCE3(rdev)) {
750 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
751 if (ASIC_IS_DCE32(rdev))
754 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
755 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
756 switch (radeon_connector->hpd.hpd) {
758 WREG32(DC_HPD1_CONTROL, tmp);
759 rdev->irq.hpd[0] = true;
762 WREG32(DC_HPD2_CONTROL, tmp);
763 rdev->irq.hpd[1] = true;
766 WREG32(DC_HPD3_CONTROL, tmp);
767 rdev->irq.hpd[2] = true;
770 WREG32(DC_HPD4_CONTROL, tmp);
771 rdev->irq.hpd[3] = true;
775 WREG32(DC_HPD5_CONTROL, tmp);
776 rdev->irq.hpd[4] = true;
779 WREG32(DC_HPD6_CONTROL, tmp);
780 rdev->irq.hpd[5] = true;
787 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
788 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
789 switch (radeon_connector->hpd.hpd) {
791 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
792 rdev->irq.hpd[0] = true;
795 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
796 rdev->irq.hpd[1] = true;
799 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
800 rdev->irq.hpd[2] = true;
807 if (rdev->irq.installed)
811 void r600_hpd_fini(struct radeon_device *rdev)
813 struct drm_device *dev = rdev->ddev;
814 struct drm_connector *connector;
816 if (ASIC_IS_DCE3(rdev)) {
817 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
818 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
819 switch (radeon_connector->hpd.hpd) {
821 WREG32(DC_HPD1_CONTROL, 0);
822 rdev->irq.hpd[0] = false;
825 WREG32(DC_HPD2_CONTROL, 0);
826 rdev->irq.hpd[1] = false;
829 WREG32(DC_HPD3_CONTROL, 0);
830 rdev->irq.hpd[2] = false;
833 WREG32(DC_HPD4_CONTROL, 0);
834 rdev->irq.hpd[3] = false;
838 WREG32(DC_HPD5_CONTROL, 0);
839 rdev->irq.hpd[4] = false;
842 WREG32(DC_HPD6_CONTROL, 0);
843 rdev->irq.hpd[5] = false;
850 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
851 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
852 switch (radeon_connector->hpd.hpd) {
854 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
855 rdev->irq.hpd[0] = false;
858 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
859 rdev->irq.hpd[1] = false;
862 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
863 rdev->irq.hpd[2] = false;
875 void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
880 /* flush hdp cache so updates hit vram */
881 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
882 !(rdev->flags & RADEON_IS_AGP)) {
883 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
886 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
887 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
888 * This seems to cause problems on some AGP cards. Just use the old
891 WREG32(HDP_DEBUG1, 0);
892 tmp = readl((void __iomem *)ptr);
894 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
896 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
897 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
898 WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
899 for (i = 0; i < rdev->usec_timeout; i++) {
901 tmp = RREG32(VM_CONTEXT0_REQUEST_RESPONSE);
902 tmp = (tmp & RESPONSE_TYPE_MASK) >> RESPONSE_TYPE_SHIFT;
904 printk(KERN_WARNING "[drm] r600 flush TLB failed\n");
914 int r600_pcie_gart_init(struct radeon_device *rdev)
918 if (rdev->gart.table.vram.robj) {
919 WARN(1, "R600 PCIE GART already initialized.\n");
922 /* Initialize common gart structure */
923 r = radeon_gart_init(rdev);
926 rdev->gart.table_size = rdev->gart.num_gpu_pages * 8;
927 return radeon_gart_table_vram_alloc(rdev);
930 int r600_pcie_gart_enable(struct radeon_device *rdev)
935 if (rdev->gart.table.vram.robj == NULL) {
936 dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
939 r = radeon_gart_table_vram_pin(rdev);
942 radeon_gart_restore(rdev);
945 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
946 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
947 EFFECTIVE_L2_QUEUE_SIZE(7));
948 WREG32(VM_L2_CNTL2, 0);
949 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
950 /* Setup TLB control */
951 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
952 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
953 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
954 ENABLE_WAIT_L2_QUERY;
955 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
956 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
957 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
958 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
959 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
960 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
961 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
962 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
963 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
964 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
965 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
966 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
967 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
968 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
969 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
970 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
971 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
972 WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
973 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
974 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
975 (u32)(rdev->dummy_page.addr >> 12));
976 for (i = 1; i < 7; i++)
977 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
979 r600_pcie_gart_tlb_flush(rdev);
980 rdev->gart.ready = true;
984 void r600_pcie_gart_disable(struct radeon_device *rdev)
989 /* Disable all tables */
990 for (i = 0; i < 7; i++)
991 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
993 /* Disable L2 cache */
994 WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
995 EFFECTIVE_L2_QUEUE_SIZE(7));
996 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
997 /* Setup L1 TLB control */
998 tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
999 ENABLE_WAIT_L2_QUERY;
1000 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1001 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1002 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1003 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1004 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1005 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1006 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1007 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1008 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp);
1009 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp);
1010 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1011 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1012 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
1013 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1014 if (rdev->gart.table.vram.robj) {
1015 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
1016 if (likely(r == 0)) {
1017 radeon_bo_kunmap(rdev->gart.table.vram.robj);
1018 radeon_bo_unpin(rdev->gart.table.vram.robj);
1019 radeon_bo_unreserve(rdev->gart.table.vram.robj);
1024 void r600_pcie_gart_fini(struct radeon_device *rdev)
1026 radeon_gart_fini(rdev);
1027 r600_pcie_gart_disable(rdev);
1028 radeon_gart_table_vram_free(rdev);
1031 void r600_agp_enable(struct radeon_device *rdev)
1036 /* Setup L2 cache */
1037 WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
1038 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
1039 EFFECTIVE_L2_QUEUE_SIZE(7));
1040 WREG32(VM_L2_CNTL2, 0);
1041 WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1));
1042 /* Setup TLB control */
1043 tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
1044 SYSTEM_ACCESS_MODE_NOT_IN_SYS |
1045 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
1046 ENABLE_WAIT_L2_QUERY;
1047 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp);
1048 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
1049 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING);
1050 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
1051 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp);
1052 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp);
1053 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp);
1054 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp);
1055 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp);
1056 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp);
1057 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp);
1058 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp);
1059 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1060 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
1061 for (i = 0; i < 7; i++)
1062 WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
1065 int r600_mc_wait_for_idle(struct radeon_device *rdev)
1070 for (i = 0; i < rdev->usec_timeout; i++) {
1071 /* read MC_STATUS */
1072 tmp = RREG32(R_000E50_SRBM_STATUS) & 0x3F00;
1080 static void r600_mc_program(struct radeon_device *rdev)
1082 struct rv515_mc_save save;
1086 /* Initialize HDP */
1087 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1088 WREG32((0x2c14 + j), 0x00000000);
1089 WREG32((0x2c18 + j), 0x00000000);
1090 WREG32((0x2c1c + j), 0x00000000);
1091 WREG32((0x2c20 + j), 0x00000000);
1092 WREG32((0x2c24 + j), 0x00000000);
1094 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
1096 rv515_mc_stop(rdev, &save);
1097 if (r600_mc_wait_for_idle(rdev)) {
1098 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1100 /* Lockout access through VGA aperture (doesn't exist before R600) */
1101 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
1102 /* Update configuration */
1103 if (rdev->flags & RADEON_IS_AGP) {
1104 if (rdev->mc.vram_start < rdev->mc.gtt_start) {
1105 /* VRAM before AGP */
1106 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1107 rdev->mc.vram_start >> 12);
1108 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1109 rdev->mc.gtt_end >> 12);
1111 /* VRAM after AGP */
1112 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1113 rdev->mc.gtt_start >> 12);
1114 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1115 rdev->mc.vram_end >> 12);
1118 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
1119 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
1121 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
1122 tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
1123 tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
1124 WREG32(MC_VM_FB_LOCATION, tmp);
1125 WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
1126 WREG32(HDP_NONSURFACE_INFO, (2 << 7));
1127 WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
1128 if (rdev->flags & RADEON_IS_AGP) {
1129 WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22);
1130 WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22);
1131 WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
1133 WREG32(MC_VM_AGP_BASE, 0);
1134 WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
1135 WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
1137 if (r600_mc_wait_for_idle(rdev)) {
1138 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1140 rv515_mc_resume(rdev, &save);
1141 /* we need to own VRAM, so turn off the VGA renderer here
1142 * to stop it overwriting our objects */
1143 rv515_vga_render_disable(rdev);
1147 * r600_vram_gtt_location - try to find VRAM & GTT location
1148 * @rdev: radeon device structure holding all necessary informations
1149 * @mc: memory controller structure holding memory informations
1151 * Function will place try to place VRAM at same place as in CPU (PCI)
1152 * address space as some GPU seems to have issue when we reprogram at
1153 * different address space.
1155 * If there is not enough space to fit the unvisible VRAM after the
1156 * aperture then we limit the VRAM size to the aperture.
1158 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
1159 * them to be in one from GPU point of view so that we can program GPU to
1160 * catch access outside them (weird GPU policy see ??).
1162 * This function will never fails, worst case are limiting VRAM or GTT.
1164 * Note: GTT start, end, size should be initialized before calling this
1165 * function on AGP platform.
1167 void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1169 u64 size_bf, size_af;
1171 if (mc->mc_vram_size > 0xE0000000) {
1172 /* leave room for at least 512M GTT */
1173 dev_warn(rdev->dev, "limiting VRAM\n");
1174 mc->real_vram_size = 0xE0000000;
1175 mc->mc_vram_size = 0xE0000000;
1177 if (rdev->flags & RADEON_IS_AGP) {
1178 size_bf = mc->gtt_start;
1179 size_af = 0xFFFFFFFF - mc->gtt_end + 1;
1180 if (size_bf > size_af) {
1181 if (mc->mc_vram_size > size_bf) {
1182 dev_warn(rdev->dev, "limiting VRAM\n");
1183 mc->real_vram_size = size_bf;
1184 mc->mc_vram_size = size_bf;
1186 mc->vram_start = mc->gtt_start - mc->mc_vram_size;
1188 if (mc->mc_vram_size > size_af) {
1189 dev_warn(rdev->dev, "limiting VRAM\n");
1190 mc->real_vram_size = size_af;
1191 mc->mc_vram_size = size_af;
1193 mc->vram_start = mc->gtt_end;
1195 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1196 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1197 mc->mc_vram_size >> 20, mc->vram_start,
1198 mc->vram_end, mc->real_vram_size >> 20);
1201 if (rdev->flags & RADEON_IS_IGP) {
1202 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1205 radeon_vram_location(rdev, &rdev->mc, base);
1206 rdev->mc.gtt_base_align = 0;
1207 radeon_gtt_location(rdev, mc);
1211 int r600_mc_init(struct radeon_device *rdev)
1214 int chansize, numchan;
1216 /* Get VRAM informations */
1217 rdev->mc.vram_is_ddr = true;
1218 tmp = RREG32(RAMCFG);
1219 if (tmp & CHANSIZE_OVERRIDE) {
1221 } else if (tmp & CHANSIZE_MASK) {
1226 tmp = RREG32(CHMAP);
1227 switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
1242 rdev->mc.vram_width = numchan * chansize;
1243 /* Could aper size report 0 ? */
1244 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
1245 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
1246 /* Setup GPU memory space */
1247 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1248 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1249 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1250 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1251 r600_vram_gtt_location(rdev, &rdev->mc);
1253 if (rdev->flags & RADEON_IS_IGP) {
1254 rs690_pm_info(rdev);
1255 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1257 radeon_update_bandwidth_info(rdev);
1261 /* We doesn't check that the GPU really needs a reset we simply do the
1262 * reset, it's up to the caller to determine if the GPU needs one. We
1263 * might add an helper function to check that.
1265 int r600_gpu_soft_reset(struct radeon_device *rdev)
1267 struct rv515_mc_save save;
1268 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1269 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1270 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1271 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1272 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1273 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1274 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1275 S_008010_GUI_ACTIVE(1);
1276 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1277 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1278 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1279 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1280 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1281 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1282 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1283 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1286 dev_info(rdev->dev, "GPU softreset \n");
1287 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1288 RREG32(R_008010_GRBM_STATUS));
1289 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1290 RREG32(R_008014_GRBM_STATUS2));
1291 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1292 RREG32(R_000E50_SRBM_STATUS));
1293 rv515_mc_stop(rdev, &save);
1294 if (r600_mc_wait_for_idle(rdev)) {
1295 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1297 /* Disable CP parsing/prefetching */
1298 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1299 /* Check if any of the rendering block is busy and reset it */
1300 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1301 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
1302 tmp = S_008020_SOFT_RESET_CR(1) |
1303 S_008020_SOFT_RESET_DB(1) |
1304 S_008020_SOFT_RESET_CB(1) |
1305 S_008020_SOFT_RESET_PA(1) |
1306 S_008020_SOFT_RESET_SC(1) |
1307 S_008020_SOFT_RESET_SMX(1) |
1308 S_008020_SOFT_RESET_SPI(1) |
1309 S_008020_SOFT_RESET_SX(1) |
1310 S_008020_SOFT_RESET_SH(1) |
1311 S_008020_SOFT_RESET_TC(1) |
1312 S_008020_SOFT_RESET_TA(1) |
1313 S_008020_SOFT_RESET_VC(1) |
1314 S_008020_SOFT_RESET_VGT(1);
1315 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1316 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1317 RREG32(R_008020_GRBM_SOFT_RESET);
1319 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1321 /* Reset CP (we always reset CP) */
1322 tmp = S_008020_SOFT_RESET_CP(1);
1323 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1324 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1325 RREG32(R_008020_GRBM_SOFT_RESET);
1327 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1328 /* Wait a little for things to settle down */
1330 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1331 RREG32(R_008010_GRBM_STATUS));
1332 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n",
1333 RREG32(R_008014_GRBM_STATUS2));
1334 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
1335 RREG32(R_000E50_SRBM_STATUS));
1336 rv515_mc_resume(rdev, &save);
1340 bool r600_gpu_is_lockup(struct radeon_device *rdev)
1347 srbm_status = RREG32(R_000E50_SRBM_STATUS);
1348 grbm_status = RREG32(R_008010_GRBM_STATUS);
1349 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1350 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1351 r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
1354 /* force CP activities */
1355 r = radeon_ring_lock(rdev, 2);
1358 radeon_ring_write(rdev, 0x80000000);
1359 radeon_ring_write(rdev, 0x80000000);
1360 radeon_ring_unlock_commit(rdev);
1362 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
1363 return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
1366 int r600_asic_reset(struct radeon_device *rdev)
1368 return r600_gpu_soft_reset(rdev);
1371 static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
1373 u32 backend_disable_mask)
1375 u32 backend_map = 0;
1376 u32 enabled_backends_mask;
1377 u32 enabled_backends_count;
1379 u32 swizzle_pipe[R6XX_MAX_PIPES];
1383 if (num_tile_pipes > R6XX_MAX_PIPES)
1384 num_tile_pipes = R6XX_MAX_PIPES;
1385 if (num_tile_pipes < 1)
1387 if (num_backends > R6XX_MAX_BACKENDS)
1388 num_backends = R6XX_MAX_BACKENDS;
1389 if (num_backends < 1)
1392 enabled_backends_mask = 0;
1393 enabled_backends_count = 0;
1394 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
1395 if (((backend_disable_mask >> i) & 1) == 0) {
1396 enabled_backends_mask |= (1 << i);
1397 ++enabled_backends_count;
1399 if (enabled_backends_count == num_backends)
1403 if (enabled_backends_count == 0) {
1404 enabled_backends_mask = 1;
1405 enabled_backends_count = 1;
1408 if (enabled_backends_count != num_backends)
1409 num_backends = enabled_backends_count;
1411 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1412 switch (num_tile_pipes) {
1414 swizzle_pipe[0] = 0;
1417 swizzle_pipe[0] = 0;
1418 swizzle_pipe[1] = 1;
1421 swizzle_pipe[0] = 0;
1422 swizzle_pipe[1] = 1;
1423 swizzle_pipe[2] = 2;
1426 swizzle_pipe[0] = 0;
1427 swizzle_pipe[1] = 1;
1428 swizzle_pipe[2] = 2;
1429 swizzle_pipe[3] = 3;
1432 swizzle_pipe[0] = 0;
1433 swizzle_pipe[1] = 1;
1434 swizzle_pipe[2] = 2;
1435 swizzle_pipe[3] = 3;
1436 swizzle_pipe[4] = 4;
1439 swizzle_pipe[0] = 0;
1440 swizzle_pipe[1] = 2;
1441 swizzle_pipe[2] = 4;
1442 swizzle_pipe[3] = 5;
1443 swizzle_pipe[4] = 1;
1444 swizzle_pipe[5] = 3;
1447 swizzle_pipe[0] = 0;
1448 swizzle_pipe[1] = 2;
1449 swizzle_pipe[2] = 4;
1450 swizzle_pipe[3] = 6;
1451 swizzle_pipe[4] = 1;
1452 swizzle_pipe[5] = 3;
1453 swizzle_pipe[6] = 5;
1456 swizzle_pipe[0] = 0;
1457 swizzle_pipe[1] = 2;
1458 swizzle_pipe[2] = 4;
1459 swizzle_pipe[3] = 6;
1460 swizzle_pipe[4] = 1;
1461 swizzle_pipe[5] = 3;
1462 swizzle_pipe[6] = 5;
1463 swizzle_pipe[7] = 7;
1468 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1469 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1470 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1472 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
1474 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
1480 int r600_count_pipe_bits(uint32_t val)
1484 for (i = 0; i < 32; i++) {
1491 void r600_gpu_init(struct radeon_device *rdev)
1496 u32 cc_rb_backend_disable;
1497 u32 cc_gc_shader_pipe_config;
1501 u32 sq_gpr_resource_mgmt_1 = 0;
1502 u32 sq_gpr_resource_mgmt_2 = 0;
1503 u32 sq_thread_resource_mgmt = 0;
1504 u32 sq_stack_resource_mgmt_1 = 0;
1505 u32 sq_stack_resource_mgmt_2 = 0;
1507 /* FIXME: implement */
1508 switch (rdev->family) {
1510 rdev->config.r600.max_pipes = 4;
1511 rdev->config.r600.max_tile_pipes = 8;
1512 rdev->config.r600.max_simds = 4;
1513 rdev->config.r600.max_backends = 4;
1514 rdev->config.r600.max_gprs = 256;
1515 rdev->config.r600.max_threads = 192;
1516 rdev->config.r600.max_stack_entries = 256;
1517 rdev->config.r600.max_hw_contexts = 8;
1518 rdev->config.r600.max_gs_threads = 16;
1519 rdev->config.r600.sx_max_export_size = 128;
1520 rdev->config.r600.sx_max_export_pos_size = 16;
1521 rdev->config.r600.sx_max_export_smx_size = 128;
1522 rdev->config.r600.sq_num_cf_insts = 2;
1526 rdev->config.r600.max_pipes = 2;
1527 rdev->config.r600.max_tile_pipes = 2;
1528 rdev->config.r600.max_simds = 3;
1529 rdev->config.r600.max_backends = 1;
1530 rdev->config.r600.max_gprs = 128;
1531 rdev->config.r600.max_threads = 192;
1532 rdev->config.r600.max_stack_entries = 128;
1533 rdev->config.r600.max_hw_contexts = 8;
1534 rdev->config.r600.max_gs_threads = 4;
1535 rdev->config.r600.sx_max_export_size = 128;
1536 rdev->config.r600.sx_max_export_pos_size = 16;
1537 rdev->config.r600.sx_max_export_smx_size = 128;
1538 rdev->config.r600.sq_num_cf_insts = 2;
1544 rdev->config.r600.max_pipes = 1;
1545 rdev->config.r600.max_tile_pipes = 1;
1546 rdev->config.r600.max_simds = 2;
1547 rdev->config.r600.max_backends = 1;
1548 rdev->config.r600.max_gprs = 128;
1549 rdev->config.r600.max_threads = 192;
1550 rdev->config.r600.max_stack_entries = 128;
1551 rdev->config.r600.max_hw_contexts = 4;
1552 rdev->config.r600.max_gs_threads = 4;
1553 rdev->config.r600.sx_max_export_size = 128;
1554 rdev->config.r600.sx_max_export_pos_size = 16;
1555 rdev->config.r600.sx_max_export_smx_size = 128;
1556 rdev->config.r600.sq_num_cf_insts = 1;
1559 rdev->config.r600.max_pipes = 4;
1560 rdev->config.r600.max_tile_pipes = 4;
1561 rdev->config.r600.max_simds = 4;
1562 rdev->config.r600.max_backends = 4;
1563 rdev->config.r600.max_gprs = 192;
1564 rdev->config.r600.max_threads = 192;
1565 rdev->config.r600.max_stack_entries = 256;
1566 rdev->config.r600.max_hw_contexts = 8;
1567 rdev->config.r600.max_gs_threads = 16;
1568 rdev->config.r600.sx_max_export_size = 128;
1569 rdev->config.r600.sx_max_export_pos_size = 16;
1570 rdev->config.r600.sx_max_export_smx_size = 128;
1571 rdev->config.r600.sq_num_cf_insts = 2;
1577 /* Initialize HDP */
1578 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
1579 WREG32((0x2c14 + j), 0x00000000);
1580 WREG32((0x2c18 + j), 0x00000000);
1581 WREG32((0x2c1c + j), 0x00000000);
1582 WREG32((0x2c20 + j), 0x00000000);
1583 WREG32((0x2c24 + j), 0x00000000);
1586 WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
1590 ramcfg = RREG32(RAMCFG);
1591 switch (rdev->config.r600.max_tile_pipes) {
1593 tiling_config |= PIPE_TILING(0);
1596 tiling_config |= PIPE_TILING(1);
1599 tiling_config |= PIPE_TILING(2);
1602 tiling_config |= PIPE_TILING(3);
1607 rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
1608 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1609 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1610 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1611 if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
1612 rdev->config.r600.tiling_group_size = 512;
1614 rdev->config.r600.tiling_group_size = 256;
1615 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1617 tiling_config |= ROW_TILING(3);
1618 tiling_config |= SAMPLE_SPLIT(3);
1620 tiling_config |= ROW_TILING(tmp);
1621 tiling_config |= SAMPLE_SPLIT(tmp);
1623 tiling_config |= BANK_SWAPS(1);
1625 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1626 cc_rb_backend_disable |=
1627 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
1629 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
1630 cc_gc_shader_pipe_config |=
1631 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
1632 cc_gc_shader_pipe_config |=
1633 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
1635 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
1636 (R6XX_MAX_BACKENDS -
1637 r600_count_pipe_bits((cc_rb_backend_disable &
1638 R6XX_MAX_BACKENDS_MASK) >> 16)),
1639 (cc_rb_backend_disable >> 16));
1640 rdev->config.r600.tile_config = tiling_config;
1641 tiling_config |= BACKEND_MAP(backend_map);
1642 WREG32(GB_TILING_CONFIG, tiling_config);
1643 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1644 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1647 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1648 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1649 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1651 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1652 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1653 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1655 /* Setup some CP states */
1656 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1657 WREG32(CP_MEQ_THRESHOLDS, (MEQ_END(0x40) | ROQ_END(0x40)));
1659 WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | SYNC_GRADIENT |
1660 SYNC_WALKER | SYNC_ALIGNER));
1661 /* Setup various GPU states */
1662 if (rdev->family == CHIP_RV670)
1663 WREG32(ARB_GDEC_RD_CNTL, 0x00000021);
1665 tmp = RREG32(SX_DEBUG_1);
1666 tmp |= SMX_EVENT_RELEASE;
1667 if ((rdev->family > CHIP_R600))
1668 tmp |= ENABLE_NEW_SMX_ADDRESS;
1669 WREG32(SX_DEBUG_1, tmp);
1671 if (((rdev->family) == CHIP_R600) ||
1672 ((rdev->family) == CHIP_RV630) ||
1673 ((rdev->family) == CHIP_RV610) ||
1674 ((rdev->family) == CHIP_RV620) ||
1675 ((rdev->family) == CHIP_RS780) ||
1676 ((rdev->family) == CHIP_RS880)) {
1677 WREG32(DB_DEBUG, PREZ_MUST_WAIT_FOR_POSTZ_DONE);
1679 WREG32(DB_DEBUG, 0);
1681 WREG32(DB_WATERMARKS, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1682 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1684 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1685 WREG32(VGT_NUM_INSTANCES, 0);
1687 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
1688 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(0));
1690 tmp = RREG32(SQ_MS_FIFO_SIZES);
1691 if (((rdev->family) == CHIP_RV610) ||
1692 ((rdev->family) == CHIP_RV620) ||
1693 ((rdev->family) == CHIP_RS780) ||
1694 ((rdev->family) == CHIP_RS880)) {
1695 tmp = (CACHE_FIFO_SIZE(0xa) |
1696 FETCH_FIFO_HIWATER(0xa) |
1697 DONE_FIFO_HIWATER(0xe0) |
1698 ALU_UPDATE_FIFO_HIWATER(0x8));
1699 } else if (((rdev->family) == CHIP_R600) ||
1700 ((rdev->family) == CHIP_RV630)) {
1701 tmp &= ~DONE_FIFO_HIWATER(0xff);
1702 tmp |= DONE_FIFO_HIWATER(0x4);
1704 WREG32(SQ_MS_FIFO_SIZES, tmp);
1706 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1707 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1709 sq_config = RREG32(SQ_CONFIG);
1710 sq_config &= ~(PS_PRIO(3) |
1714 sq_config |= (DX9_CONSTS |
1721 if ((rdev->family) == CHIP_R600) {
1722 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(124) |
1724 NUM_CLAUSE_TEMP_GPRS(4));
1725 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(0) |
1727 sq_thread_resource_mgmt = (NUM_PS_THREADS(136) |
1728 NUM_VS_THREADS(48) |
1731 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(128) |
1732 NUM_VS_STACK_ENTRIES(128));
1733 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(0) |
1734 NUM_ES_STACK_ENTRIES(0));
1735 } else if (((rdev->family) == CHIP_RV610) ||
1736 ((rdev->family) == CHIP_RV620) ||
1737 ((rdev->family) == CHIP_RS780) ||
1738 ((rdev->family) == CHIP_RS880)) {
1739 /* no vertex cache */
1740 sq_config &= ~VC_ENABLE;
1742 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1744 NUM_CLAUSE_TEMP_GPRS(2));
1745 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1747 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1748 NUM_VS_THREADS(78) |
1750 NUM_ES_THREADS(31));
1751 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1752 NUM_VS_STACK_ENTRIES(40));
1753 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1754 NUM_ES_STACK_ENTRIES(16));
1755 } else if (((rdev->family) == CHIP_RV630) ||
1756 ((rdev->family) == CHIP_RV635)) {
1757 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1759 NUM_CLAUSE_TEMP_GPRS(2));
1760 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(18) |
1762 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1763 NUM_VS_THREADS(78) |
1765 NUM_ES_THREADS(31));
1766 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(40) |
1767 NUM_VS_STACK_ENTRIES(40));
1768 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(32) |
1769 NUM_ES_STACK_ENTRIES(16));
1770 } else if ((rdev->family) == CHIP_RV670) {
1771 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(44) |
1773 NUM_CLAUSE_TEMP_GPRS(2));
1774 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(17) |
1776 sq_thread_resource_mgmt = (NUM_PS_THREADS(79) |
1777 NUM_VS_THREADS(78) |
1779 NUM_ES_THREADS(31));
1780 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(64) |
1781 NUM_VS_STACK_ENTRIES(64));
1782 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(64) |
1783 NUM_ES_STACK_ENTRIES(64));
1786 WREG32(SQ_CONFIG, sq_config);
1787 WREG32(SQ_GPR_RESOURCE_MGMT_1, sq_gpr_resource_mgmt_1);
1788 WREG32(SQ_GPR_RESOURCE_MGMT_2, sq_gpr_resource_mgmt_2);
1789 WREG32(SQ_THREAD_RESOURCE_MGMT, sq_thread_resource_mgmt);
1790 WREG32(SQ_STACK_RESOURCE_MGMT_1, sq_stack_resource_mgmt_1);
1791 WREG32(SQ_STACK_RESOURCE_MGMT_2, sq_stack_resource_mgmt_2);
1793 if (((rdev->family) == CHIP_RV610) ||
1794 ((rdev->family) == CHIP_RV620) ||
1795 ((rdev->family) == CHIP_RS780) ||
1796 ((rdev->family) == CHIP_RS880)) {
1797 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(TC_ONLY));
1799 WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC));
1802 /* More default values. 2D/3D driver should adjust as needed */
1803 WREG32(PA_SC_AA_SAMPLE_LOCS_2S, (S0_X(0xc) | S0_Y(0x4) |
1804 S1_X(0x4) | S1_Y(0xc)));
1805 WREG32(PA_SC_AA_SAMPLE_LOCS_4S, (S0_X(0xe) | S0_Y(0xe) |
1806 S1_X(0x2) | S1_Y(0x2) |
1807 S2_X(0xa) | S2_Y(0x6) |
1808 S3_X(0x6) | S3_Y(0xa)));
1809 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0, (S0_X(0xe) | S0_Y(0xb) |
1810 S1_X(0x4) | S1_Y(0xc) |
1811 S2_X(0x1) | S2_Y(0x6) |
1812 S3_X(0xa) | S3_Y(0xe)));
1813 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1, (S4_X(0x6) | S4_Y(0x1) |
1814 S5_X(0x0) | S5_Y(0x0) |
1815 S6_X(0xb) | S6_Y(0x4) |
1816 S7_X(0x7) | S7_Y(0x8)));
1818 WREG32(VGT_STRMOUT_EN, 0);
1819 tmp = rdev->config.r600.max_pipes * 16;
1820 switch (rdev->family) {
1836 WREG32(VGT_ES_PER_GS, 128);
1837 WREG32(VGT_GS_PER_ES, tmp);
1838 WREG32(VGT_GS_PER_VS, 2);
1839 WREG32(VGT_GS_VERTEX_REUSE, 16);
1841 /* more default values. 2D/3D driver should adjust as needed */
1842 WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
1843 WREG32(VGT_STRMOUT_EN, 0);
1845 WREG32(PA_SC_MODE_CNTL, 0);
1846 WREG32(PA_SC_AA_CONFIG, 0);
1847 WREG32(PA_SC_LINE_STIPPLE, 0);
1848 WREG32(SPI_INPUT_Z, 0);
1849 WREG32(SPI_PS_IN_CONTROL_0, NUM_INTERP(2));
1850 WREG32(CB_COLOR7_FRAG, 0);
1852 /* Clear render buffer base addresses */
1853 WREG32(CB_COLOR0_BASE, 0);
1854 WREG32(CB_COLOR1_BASE, 0);
1855 WREG32(CB_COLOR2_BASE, 0);
1856 WREG32(CB_COLOR3_BASE, 0);
1857 WREG32(CB_COLOR4_BASE, 0);
1858 WREG32(CB_COLOR5_BASE, 0);
1859 WREG32(CB_COLOR6_BASE, 0);
1860 WREG32(CB_COLOR7_BASE, 0);
1861 WREG32(CB_COLOR7_FRAG, 0);
1863 switch (rdev->family) {
1868 tmp = TC_L2_SIZE(8);
1872 tmp = TC_L2_SIZE(4);
1875 tmp = TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT;
1878 tmp = TC_L2_SIZE(0);
1881 WREG32(TC_CNTL, tmp);
1883 tmp = RREG32(HDP_HOST_PATH_CNTL);
1884 WREG32(HDP_HOST_PATH_CNTL, tmp);
1886 tmp = RREG32(ARB_POP);
1887 tmp |= ENABLE_TC128;
1888 WREG32(ARB_POP, tmp);
1890 WREG32(PA_SC_MULTI_CHIP_CNTL, 0);
1891 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1893 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1898 * Indirect registers accessor
1900 u32 r600_pciep_rreg(struct radeon_device *rdev, u32 reg)
1904 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1905 (void)RREG32(PCIE_PORT_INDEX);
1906 r = RREG32(PCIE_PORT_DATA);
1910 void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1912 WREG32(PCIE_PORT_INDEX, ((reg) & 0xff));
1913 (void)RREG32(PCIE_PORT_INDEX);
1914 WREG32(PCIE_PORT_DATA, (v));
1915 (void)RREG32(PCIE_PORT_DATA);
1921 void r600_cp_stop(struct radeon_device *rdev)
1923 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1924 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1927 int r600_init_microcode(struct radeon_device *rdev)
1929 struct platform_device *pdev;
1930 const char *chip_name;
1931 const char *rlc_chip_name;
1932 size_t pfp_req_size, me_req_size, rlc_req_size;
1938 pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
1941 printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
1945 switch (rdev->family) {
1948 rlc_chip_name = "R600";
1951 chip_name = "RV610";
1952 rlc_chip_name = "R600";
1955 chip_name = "RV630";
1956 rlc_chip_name = "R600";
1959 chip_name = "RV620";
1960 rlc_chip_name = "R600";
1963 chip_name = "RV635";
1964 rlc_chip_name = "R600";
1967 chip_name = "RV670";
1968 rlc_chip_name = "R600";
1972 chip_name = "RS780";
1973 rlc_chip_name = "R600";
1976 chip_name = "RV770";
1977 rlc_chip_name = "R700";
1981 chip_name = "RV730";
1982 rlc_chip_name = "R700";
1985 chip_name = "RV710";
1986 rlc_chip_name = "R700";
1989 chip_name = "CEDAR";
1990 rlc_chip_name = "CEDAR";
1993 chip_name = "REDWOOD";
1994 rlc_chip_name = "REDWOOD";
1997 chip_name = "JUNIPER";
1998 rlc_chip_name = "JUNIPER";
2002 chip_name = "CYPRESS";
2003 rlc_chip_name = "CYPRESS";
2008 if (rdev->family >= CHIP_CEDAR) {
2009 pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
2010 me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
2011 rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
2012 } else if (rdev->family >= CHIP_RV770) {
2013 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
2014 me_req_size = R700_PM4_UCODE_SIZE * 4;
2015 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
2017 pfp_req_size = PFP_UCODE_SIZE * 4;
2018 me_req_size = PM4_UCODE_SIZE * 12;
2019 rlc_req_size = RLC_UCODE_SIZE * 4;
2022 DRM_INFO("Loading %s Microcode\n", chip_name);
2024 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
2025 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
2028 if (rdev->pfp_fw->size != pfp_req_size) {
2030 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2031 rdev->pfp_fw->size, fw_name);
2036 snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
2037 err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
2040 if (rdev->me_fw->size != me_req_size) {
2042 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
2043 rdev->me_fw->size, fw_name);
2047 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
2048 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
2051 if (rdev->rlc_fw->size != rlc_req_size) {
2053 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
2054 rdev->rlc_fw->size, fw_name);
2059 platform_device_unregister(pdev);
2064 "r600_cp: Failed to load firmware \"%s\"\n",
2066 release_firmware(rdev->pfp_fw);
2067 rdev->pfp_fw = NULL;
2068 release_firmware(rdev->me_fw);
2070 release_firmware(rdev->rlc_fw);
2071 rdev->rlc_fw = NULL;
2076 static int r600_cp_load_microcode(struct radeon_device *rdev)
2078 const __be32 *fw_data;
2081 if (!rdev->me_fw || !rdev->pfp_fw)
2086 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2089 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2090 RREG32(GRBM_SOFT_RESET);
2092 WREG32(GRBM_SOFT_RESET, 0);
2094 WREG32(CP_ME_RAM_WADDR, 0);
2096 fw_data = (const __be32 *)rdev->me_fw->data;
2097 WREG32(CP_ME_RAM_WADDR, 0);
2098 for (i = 0; i < PM4_UCODE_SIZE * 3; i++)
2099 WREG32(CP_ME_RAM_DATA,
2100 be32_to_cpup(fw_data++));
2102 fw_data = (const __be32 *)rdev->pfp_fw->data;
2103 WREG32(CP_PFP_UCODE_ADDR, 0);
2104 for (i = 0; i < PFP_UCODE_SIZE; i++)
2105 WREG32(CP_PFP_UCODE_DATA,
2106 be32_to_cpup(fw_data++));
2108 WREG32(CP_PFP_UCODE_ADDR, 0);
2109 WREG32(CP_ME_RAM_WADDR, 0);
2110 WREG32(CP_ME_RAM_RADDR, 0);
2114 int r600_cp_start(struct radeon_device *rdev)
2119 r = radeon_ring_lock(rdev, 7);
2121 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2124 radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
2125 radeon_ring_write(rdev, 0x1);
2126 if (rdev->family >= CHIP_RV770) {
2127 radeon_ring_write(rdev, 0x0);
2128 radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
2130 radeon_ring_write(rdev, 0x3);
2131 radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
2133 radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2134 radeon_ring_write(rdev, 0);
2135 radeon_ring_write(rdev, 0);
2136 radeon_ring_unlock_commit(rdev);
2139 WREG32(R_0086D8_CP_ME_CNTL, cp_me);
2143 int r600_cp_resume(struct radeon_device *rdev)
2150 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
2151 RREG32(GRBM_SOFT_RESET);
2153 WREG32(GRBM_SOFT_RESET, 0);
2155 /* Set ring buffer size */
2156 rb_bufsz = drm_order(rdev->cp.ring_size / 8);
2157 tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
2159 tmp |= BUF_SWAP_32BIT;
2161 WREG32(CP_RB_CNTL, tmp);
2162 WREG32(CP_SEM_WAIT_TIMER, 0x4);
2164 /* Set the write pointer delay */
2165 WREG32(CP_RB_WPTR_DELAY, 0);
2167 /* Initialize the ring buffer's read and write pointers */
2168 WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
2169 WREG32(CP_RB_RPTR_WR, 0);
2170 WREG32(CP_RB_WPTR, 0);
2171 WREG32(CP_RB_RPTR_ADDR, rdev->cp.gpu_addr & 0xFFFFFFFF);
2172 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->cp.gpu_addr));
2174 WREG32(CP_RB_CNTL, tmp);
2176 WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
2177 WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
2179 rdev->cp.rptr = RREG32(CP_RB_RPTR);
2180 rdev->cp.wptr = RREG32(CP_RB_WPTR);
2182 r600_cp_start(rdev);
2183 rdev->cp.ready = true;
2184 r = radeon_ring_test(rdev);
2186 rdev->cp.ready = false;
2192 void r600_cp_commit(struct radeon_device *rdev)
2194 WREG32(CP_RB_WPTR, rdev->cp.wptr);
2195 (void)RREG32(CP_RB_WPTR);
2198 void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
2202 /* Align ring size */
2203 rb_bufsz = drm_order(ring_size / 8);
2204 ring_size = (1 << (rb_bufsz + 1)) * 4;
2205 rdev->cp.ring_size = ring_size;
2206 rdev->cp.align_mask = 16 - 1;
2209 void r600_cp_fini(struct radeon_device *rdev)
2212 radeon_ring_fini(rdev);
2217 * GPU scratch registers helpers function.
2219 void r600_scratch_init(struct radeon_device *rdev)
2223 rdev->scratch.num_reg = 7;
2224 for (i = 0; i < rdev->scratch.num_reg; i++) {
2225 rdev->scratch.free[i] = true;
2226 rdev->scratch.reg[i] = SCRATCH_REG0 + (i * 4);
2230 int r600_ring_test(struct radeon_device *rdev)
2237 r = radeon_scratch_get(rdev, &scratch);
2239 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r);
2242 WREG32(scratch, 0xCAFEDEAD);
2243 r = radeon_ring_lock(rdev, 3);
2245 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
2246 radeon_scratch_free(rdev, scratch);
2249 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2250 radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2251 radeon_ring_write(rdev, 0xDEADBEEF);
2252 radeon_ring_unlock_commit(rdev);
2253 for (i = 0; i < rdev->usec_timeout; i++) {
2254 tmp = RREG32(scratch);
2255 if (tmp == 0xDEADBEEF)
2259 if (i < rdev->usec_timeout) {
2260 DRM_INFO("ring test succeeded in %d usecs\n", i);
2262 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
2266 radeon_scratch_free(rdev, scratch);
2270 void r600_wb_disable(struct radeon_device *rdev)
2274 WREG32(SCRATCH_UMSK, 0);
2275 if (rdev->wb.wb_obj) {
2276 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2277 if (unlikely(r != 0))
2279 radeon_bo_kunmap(rdev->wb.wb_obj);
2280 radeon_bo_unpin(rdev->wb.wb_obj);
2281 radeon_bo_unreserve(rdev->wb.wb_obj);
2285 void r600_wb_fini(struct radeon_device *rdev)
2287 r600_wb_disable(rdev);
2288 if (rdev->wb.wb_obj) {
2289 radeon_bo_unref(&rdev->wb.wb_obj);
2291 rdev->wb.wb_obj = NULL;
2295 int r600_wb_enable(struct radeon_device *rdev)
2299 if (rdev->wb.wb_obj == NULL) {
2300 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
2301 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
2303 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
2306 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
2307 if (unlikely(r != 0)) {
2311 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
2312 &rdev->wb.gpu_addr);
2314 radeon_bo_unreserve(rdev->wb.wb_obj);
2315 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
2319 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
2320 radeon_bo_unreserve(rdev->wb.wb_obj);
2322 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
2327 WREG32(SCRATCH_ADDR, (rdev->wb.gpu_addr >> 8) & 0xFFFFFFFF);
2328 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + 1024) & 0xFFFFFFFC);
2329 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + 1024) & 0xFF);
2330 WREG32(SCRATCH_UMSK, 0xff);
2334 void r600_fence_ring_emit(struct radeon_device *rdev,
2335 struct radeon_fence *fence)
2337 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
2339 radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
2340 radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
2341 /* wait for 3D idle clean */
2342 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2343 radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2344 radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
2345 /* Emit fence sequence & fire IRQ */
2346 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2347 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
2348 radeon_ring_write(rdev, fence->seq);
2349 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
2350 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
2351 radeon_ring_write(rdev, RB_INT_STAT);
2354 int r600_copy_blit(struct radeon_device *rdev,
2355 uint64_t src_offset, uint64_t dst_offset,
2356 unsigned num_pages, struct radeon_fence *fence)
2360 mutex_lock(&rdev->r600_blit.mutex);
2361 rdev->r600_blit.vb_ib = NULL;
2362 r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
2364 if (rdev->r600_blit.vb_ib)
2365 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
2366 mutex_unlock(&rdev->r600_blit.mutex);
2369 r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
2370 r600_blit_done_copy(rdev, fence);
2371 mutex_unlock(&rdev->r600_blit.mutex);
2375 int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2376 uint32_t tiling_flags, uint32_t pitch,
2377 uint32_t offset, uint32_t obj_size)
2379 /* FIXME: implement */
2383 void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2385 /* FIXME: implement */
2389 bool r600_card_posted(struct radeon_device *rdev)
2393 /* first check CRTCs */
2394 reg = RREG32(D1CRTC_CONTROL) |
2395 RREG32(D2CRTC_CONTROL);
2399 /* then check MEM_SIZE, in case the crtcs are off */
2400 if (RREG32(CONFIG_MEMSIZE))
2406 int r600_startup(struct radeon_device *rdev)
2410 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
2411 r = r600_init_microcode(rdev);
2413 DRM_ERROR("Failed to load firmware!\n");
2418 r600_mc_program(rdev);
2419 if (rdev->flags & RADEON_IS_AGP) {
2420 r600_agp_enable(rdev);
2422 r = r600_pcie_gart_enable(rdev);
2426 r600_gpu_init(rdev);
2427 r = r600_blit_init(rdev);
2429 r600_blit_fini(rdev);
2430 rdev->asic->copy = NULL;
2431 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
2433 /* pin copy shader into vram */
2434 if (rdev->r600_blit.shader_obj) {
2435 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2436 if (unlikely(r != 0))
2438 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
2439 &rdev->r600_blit.shader_gpu_addr);
2440 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2442 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
2447 r = r600_irq_init(rdev);
2449 DRM_ERROR("radeon: IH init failed (%d).\n", r);
2450 radeon_irq_kms_fini(rdev);
2455 r = radeon_ring_init(rdev, rdev->cp.ring_size);
2458 r = r600_cp_load_microcode(rdev);
2461 r = r600_cp_resume(rdev);
2464 /* write back buffer are not vital so don't worry about failure */
2465 r600_wb_enable(rdev);
2469 void r600_vga_set_state(struct radeon_device *rdev, bool state)
2473 temp = RREG32(CONFIG_CNTL);
2474 if (state == false) {
2480 WREG32(CONFIG_CNTL, temp);
2483 int r600_resume(struct radeon_device *rdev)
2487 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
2488 * posting will perform necessary task to bring back GPU into good
2492 atom_asic_init(rdev->mode_info.atom_context);
2494 r = r600_startup(rdev);
2496 DRM_ERROR("r600 startup failed on resume\n");
2500 r = r600_ib_test(rdev);
2502 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
2506 r = r600_audio_init(rdev);
2508 DRM_ERROR("radeon: audio resume failed\n");
2515 int r600_suspend(struct radeon_device *rdev)
2519 r600_audio_fini(rdev);
2520 /* FIXME: we should wait for ring to be empty */
2522 rdev->cp.ready = false;
2523 r600_irq_suspend(rdev);
2524 r600_wb_disable(rdev);
2525 r600_pcie_gart_disable(rdev);
2526 /* unpin shaders bo */
2527 if (rdev->r600_blit.shader_obj) {
2528 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
2530 radeon_bo_unpin(rdev->r600_blit.shader_obj);
2531 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
2537 /* Plan is to move initialization in that function and use
2538 * helper function so that radeon_device_init pretty much
2539 * do nothing more than calling asic specific function. This
2540 * should also allow to remove a bunch of callback function
2543 int r600_init(struct radeon_device *rdev)
2547 r = radeon_dummy_page_init(rdev);
2550 if (r600_debugfs_mc_info_init(rdev)) {
2551 DRM_ERROR("Failed to register debugfs file for mc !\n");
2553 /* This don't do much */
2554 r = radeon_gem_init(rdev);
2558 if (!radeon_get_bios(rdev)) {
2559 if (ASIC_IS_AVIVO(rdev))
2562 /* Must be an ATOMBIOS */
2563 if (!rdev->is_atom_bios) {
2564 dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2567 r = radeon_atombios_init(rdev);
2570 /* Post card if necessary */
2571 if (!r600_card_posted(rdev)) {
2573 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2576 DRM_INFO("GPU not posted. posting now...\n");
2577 atom_asic_init(rdev->mode_info.atom_context);
2579 /* Initialize scratch registers */
2580 r600_scratch_init(rdev);
2581 /* Initialize surface registers */
2582 radeon_surface_init(rdev);
2583 /* Initialize clocks */
2584 radeon_get_clock_info(rdev->ddev);
2586 r = radeon_fence_driver_init(rdev);
2589 if (rdev->flags & RADEON_IS_AGP) {
2590 r = radeon_agp_init(rdev);
2592 radeon_agp_disable(rdev);
2594 r = r600_mc_init(rdev);
2597 /* Memory manager */
2598 r = radeon_bo_init(rdev);
2602 r = radeon_irq_kms_init(rdev);
2606 rdev->cp.ring_obj = NULL;
2607 r600_ring_init(rdev, 1024 * 1024);
2609 rdev->ih.ring_obj = NULL;
2610 r600_ih_ring_init(rdev, 64 * 1024);
2612 r = r600_pcie_gart_init(rdev);
2616 rdev->accel_working = true;
2617 r = r600_startup(rdev);
2619 dev_err(rdev->dev, "disabling GPU acceleration\n");
2622 r600_irq_fini(rdev);
2623 radeon_irq_kms_fini(rdev);
2624 r600_pcie_gart_fini(rdev);
2625 rdev->accel_working = false;
2627 if (rdev->accel_working) {
2628 r = radeon_ib_pool_init(rdev);
2630 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
2631 rdev->accel_working = false;
2633 r = r600_ib_test(rdev);
2635 dev_err(rdev->dev, "IB test failed (%d).\n", r);
2636 rdev->accel_working = false;
2641 r = r600_audio_init(rdev);
2643 return r; /* TODO error handling */
2647 void r600_fini(struct radeon_device *rdev)
2649 r600_audio_fini(rdev);
2650 r600_blit_fini(rdev);
2653 r600_irq_fini(rdev);
2654 radeon_irq_kms_fini(rdev);
2655 r600_pcie_gart_fini(rdev);
2656 radeon_agp_fini(rdev);
2657 radeon_gem_fini(rdev);
2658 radeon_fence_driver_fini(rdev);
2659 radeon_bo_fini(rdev);
2660 radeon_atombios_fini(rdev);
2663 radeon_dummy_page_fini(rdev);
2670 void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2672 /* FIXME: implement */
2673 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2674 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC);
2675 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2676 radeon_ring_write(rdev, ib->length_dw);
2679 int r600_ib_test(struct radeon_device *rdev)
2681 struct radeon_ib *ib;
2687 r = radeon_scratch_get(rdev, &scratch);
2689 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r);
2692 WREG32(scratch, 0xCAFEDEAD);
2693 r = radeon_ib_get(rdev, &ib);
2695 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
2698 ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
2699 ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
2700 ib->ptr[2] = 0xDEADBEEF;
2701 ib->ptr[3] = PACKET2(0);
2702 ib->ptr[4] = PACKET2(0);
2703 ib->ptr[5] = PACKET2(0);
2704 ib->ptr[6] = PACKET2(0);
2705 ib->ptr[7] = PACKET2(0);
2706 ib->ptr[8] = PACKET2(0);
2707 ib->ptr[9] = PACKET2(0);
2708 ib->ptr[10] = PACKET2(0);
2709 ib->ptr[11] = PACKET2(0);
2710 ib->ptr[12] = PACKET2(0);
2711 ib->ptr[13] = PACKET2(0);
2712 ib->ptr[14] = PACKET2(0);
2713 ib->ptr[15] = PACKET2(0);
2715 r = radeon_ib_schedule(rdev, ib);
2717 radeon_scratch_free(rdev, scratch);
2718 radeon_ib_free(rdev, &ib);
2719 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
2722 r = radeon_fence_wait(ib->fence, false);
2724 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
2727 for (i = 0; i < rdev->usec_timeout; i++) {
2728 tmp = RREG32(scratch);
2729 if (tmp == 0xDEADBEEF)
2733 if (i < rdev->usec_timeout) {
2734 DRM_INFO("ib test succeeded in %u usecs\n", i);
2736 DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
2740 radeon_scratch_free(rdev, scratch);
2741 radeon_ib_free(rdev, &ib);
2748 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2749 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2750 * writing to the ring and the GPU consuming, the GPU writes to the ring
2751 * and host consumes. As the host irq handler processes interrupts, it
2752 * increments the rptr. When the rptr catches up with the wptr, all the
2753 * current interrupts have been processed.
2756 void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2760 /* Align ring size */
2761 rb_bufsz = drm_order(ring_size / 4);
2762 ring_size = (1 << rb_bufsz) * 4;
2763 rdev->ih.ring_size = ring_size;
2764 rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
2768 static int r600_ih_ring_alloc(struct radeon_device *rdev)
2772 /* Allocate ring buffer */
2773 if (rdev->ih.ring_obj == NULL) {
2774 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2776 RADEON_GEM_DOMAIN_GTT,
2777 &rdev->ih.ring_obj);
2779 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2782 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2783 if (unlikely(r != 0))
2785 r = radeon_bo_pin(rdev->ih.ring_obj,
2786 RADEON_GEM_DOMAIN_GTT,
2787 &rdev->ih.gpu_addr);
2789 radeon_bo_unreserve(rdev->ih.ring_obj);
2790 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2793 r = radeon_bo_kmap(rdev->ih.ring_obj,
2794 (void **)&rdev->ih.ring);
2795 radeon_bo_unreserve(rdev->ih.ring_obj);
2797 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2804 static void r600_ih_ring_fini(struct radeon_device *rdev)
2807 if (rdev->ih.ring_obj) {
2808 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2809 if (likely(r == 0)) {
2810 radeon_bo_kunmap(rdev->ih.ring_obj);
2811 radeon_bo_unpin(rdev->ih.ring_obj);
2812 radeon_bo_unreserve(rdev->ih.ring_obj);
2814 radeon_bo_unref(&rdev->ih.ring_obj);
2815 rdev->ih.ring = NULL;
2816 rdev->ih.ring_obj = NULL;
2820 void r600_rlc_stop(struct radeon_device *rdev)
2823 if ((rdev->family >= CHIP_RV770) &&
2824 (rdev->family <= CHIP_RV740)) {
2825 /* r7xx asics need to soft reset RLC before halting */
2826 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2827 RREG32(SRBM_SOFT_RESET);
2829 WREG32(SRBM_SOFT_RESET, 0);
2830 RREG32(SRBM_SOFT_RESET);
2833 WREG32(RLC_CNTL, 0);
2836 static void r600_rlc_start(struct radeon_device *rdev)
2838 WREG32(RLC_CNTL, RLC_ENABLE);
2841 static int r600_rlc_init(struct radeon_device *rdev)
2844 const __be32 *fw_data;
2849 r600_rlc_stop(rdev);
2851 WREG32(RLC_HB_BASE, 0);
2852 WREG32(RLC_HB_CNTL, 0);
2853 WREG32(RLC_HB_RPTR, 0);
2854 WREG32(RLC_HB_WPTR, 0);
2855 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2856 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2857 WREG32(RLC_MC_CNTL, 0);
2858 WREG32(RLC_UCODE_CNTL, 0);
2860 fw_data = (const __be32 *)rdev->rlc_fw->data;
2861 if (rdev->family >= CHIP_CEDAR) {
2862 for (i = 0; i < EVERGREEN_RLC_UCODE_SIZE; i++) {
2863 WREG32(RLC_UCODE_ADDR, i);
2864 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2866 } else if (rdev->family >= CHIP_RV770) {
2867 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2868 WREG32(RLC_UCODE_ADDR, i);
2869 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2872 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2873 WREG32(RLC_UCODE_ADDR, i);
2874 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2877 WREG32(RLC_UCODE_ADDR, 0);
2879 r600_rlc_start(rdev);
2884 static void r600_enable_interrupts(struct radeon_device *rdev)
2886 u32 ih_cntl = RREG32(IH_CNTL);
2887 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2889 ih_cntl |= ENABLE_INTR;
2890 ih_rb_cntl |= IH_RB_ENABLE;
2891 WREG32(IH_CNTL, ih_cntl);
2892 WREG32(IH_RB_CNTL, ih_rb_cntl);
2893 rdev->ih.enabled = true;
2896 void r600_disable_interrupts(struct radeon_device *rdev)
2898 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2899 u32 ih_cntl = RREG32(IH_CNTL);
2901 ih_rb_cntl &= ~IH_RB_ENABLE;
2902 ih_cntl &= ~ENABLE_INTR;
2903 WREG32(IH_RB_CNTL, ih_rb_cntl);
2904 WREG32(IH_CNTL, ih_cntl);
2905 /* set rptr, wptr to 0 */
2906 WREG32(IH_RB_RPTR, 0);
2907 WREG32(IH_RB_WPTR, 0);
2908 rdev->ih.enabled = false;
2913 static void r600_disable_interrupt_state(struct radeon_device *rdev)
2917 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2918 WREG32(GRBM_INT_CNTL, 0);
2919 WREG32(DxMODE_INT_MASK, 0);
2920 if (ASIC_IS_DCE3(rdev)) {
2921 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2922 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2923 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2924 WREG32(DC_HPD1_INT_CONTROL, tmp);
2925 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2926 WREG32(DC_HPD2_INT_CONTROL, tmp);
2927 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2928 WREG32(DC_HPD3_INT_CONTROL, tmp);
2929 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2930 WREG32(DC_HPD4_INT_CONTROL, tmp);
2931 if (ASIC_IS_DCE32(rdev)) {
2932 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2933 WREG32(DC_HPD5_INT_CONTROL, tmp);
2934 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2935 WREG32(DC_HPD6_INT_CONTROL, tmp);
2938 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2939 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2940 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2941 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2942 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2943 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2944 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2945 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2949 int r600_irq_init(struct radeon_device *rdev)
2953 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2956 ret = r600_ih_ring_alloc(rdev);
2961 r600_disable_interrupts(rdev);
2964 ret = r600_rlc_init(rdev);
2966 r600_ih_ring_fini(rdev);
2970 /* setup interrupt control */
2971 /* set dummy read address to ring address */
2972 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2973 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2974 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2975 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2977 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2978 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2979 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2980 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2982 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2983 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2985 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2986 IH_WPTR_OVERFLOW_CLEAR |
2988 /* WPTR writeback, not yet */
2989 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2990 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2991 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2993 WREG32(IH_RB_CNTL, ih_rb_cntl);
2995 /* set rptr, wptr to 0 */
2996 WREG32(IH_RB_RPTR, 0);
2997 WREG32(IH_RB_WPTR, 0);
2999 /* Default settings for IH_CNTL (disabled at first) */
3000 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
3001 /* RPTR_REARM only works if msi's are enabled */
3002 if (rdev->msi_enabled)
3003 ih_cntl |= RPTR_REARM;
3006 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
3008 WREG32(IH_CNTL, ih_cntl);
3010 /* force the active interrupt state to all disabled */
3011 if (rdev->family >= CHIP_CEDAR)
3012 evergreen_disable_interrupt_state(rdev);
3014 r600_disable_interrupt_state(rdev);
3017 r600_enable_interrupts(rdev);
3022 void r600_irq_suspend(struct radeon_device *rdev)
3024 r600_irq_disable(rdev);
3025 r600_rlc_stop(rdev);
3028 void r600_irq_fini(struct radeon_device *rdev)
3030 r600_irq_suspend(rdev);
3031 r600_ih_ring_fini(rdev);
3034 int r600_irq_set(struct radeon_device *rdev)
3036 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3038 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
3039 u32 grbm_int_cntl = 0;
3042 if (!rdev->irq.installed) {
3043 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
3046 /* don't enable anything if the ih is disabled */
3047 if (!rdev->ih.enabled) {
3048 r600_disable_interrupts(rdev);
3049 /* force the active interrupt state to all disabled */
3050 r600_disable_interrupt_state(rdev);
3054 hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3055 if (ASIC_IS_DCE3(rdev)) {
3056 hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3057 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3058 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3059 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3060 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
3061 if (ASIC_IS_DCE32(rdev)) {
3062 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3063 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3066 hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
3067 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
3068 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
3069 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
3072 if (rdev->irq.sw_int) {
3073 DRM_DEBUG("r600_irq_set: sw int\n");
3074 cp_int_cntl |= RB_INT_ENABLE;
3076 if (rdev->irq.crtc_vblank_int[0]) {
3077 DRM_DEBUG("r600_irq_set: vblank 0\n");
3078 mode_int |= D1MODE_VBLANK_INT_MASK;
3080 if (rdev->irq.crtc_vblank_int[1]) {
3081 DRM_DEBUG("r600_irq_set: vblank 1\n");
3082 mode_int |= D2MODE_VBLANK_INT_MASK;
3084 if (rdev->irq.hpd[0]) {
3085 DRM_DEBUG("r600_irq_set: hpd 1\n");
3086 hpd1 |= DC_HPDx_INT_EN;
3088 if (rdev->irq.hpd[1]) {
3089 DRM_DEBUG("r600_irq_set: hpd 2\n");
3090 hpd2 |= DC_HPDx_INT_EN;
3092 if (rdev->irq.hpd[2]) {
3093 DRM_DEBUG("r600_irq_set: hpd 3\n");
3094 hpd3 |= DC_HPDx_INT_EN;
3096 if (rdev->irq.hpd[3]) {
3097 DRM_DEBUG("r600_irq_set: hpd 4\n");
3098 hpd4 |= DC_HPDx_INT_EN;
3100 if (rdev->irq.hpd[4]) {
3101 DRM_DEBUG("r600_irq_set: hpd 5\n");
3102 hpd5 |= DC_HPDx_INT_EN;
3104 if (rdev->irq.hpd[5]) {
3105 DRM_DEBUG("r600_irq_set: hpd 6\n");
3106 hpd6 |= DC_HPDx_INT_EN;
3108 if (rdev->irq.hdmi[0]) {
3109 DRM_DEBUG("r600_irq_set: hdmi 1\n");
3110 hdmi1 |= R600_HDMI_INT_EN;
3112 if (rdev->irq.hdmi[1]) {
3113 DRM_DEBUG("r600_irq_set: hdmi 2\n");
3114 hdmi2 |= R600_HDMI_INT_EN;
3116 if (rdev->irq.gui_idle) {
3117 DRM_DEBUG("gui idle\n");
3118 grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
3121 WREG32(CP_INT_CNTL, cp_int_cntl);
3122 WREG32(DxMODE_INT_MASK, mode_int);
3123 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3124 WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
3125 if (ASIC_IS_DCE3(rdev)) {
3126 WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
3127 WREG32(DC_HPD1_INT_CONTROL, hpd1);
3128 WREG32(DC_HPD2_INT_CONTROL, hpd2);
3129 WREG32(DC_HPD3_INT_CONTROL, hpd3);
3130 WREG32(DC_HPD4_INT_CONTROL, hpd4);
3131 if (ASIC_IS_DCE32(rdev)) {
3132 WREG32(DC_HPD5_INT_CONTROL, hpd5);
3133 WREG32(DC_HPD6_INT_CONTROL, hpd6);
3136 WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
3137 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
3138 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
3139 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
3145 static inline void r600_irq_ack(struct radeon_device *rdev,
3148 u32 *disp_int_cont2)
3152 if (ASIC_IS_DCE3(rdev)) {
3153 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
3154 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
3155 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
3157 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
3158 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
3159 *disp_int_cont2 = 0;
3162 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
3163 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3164 if (*disp_int & LB_D1_VLINE_INTERRUPT)
3165 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3166 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
3167 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
3168 if (*disp_int & LB_D2_VLINE_INTERRUPT)
3169 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
3170 if (*disp_int & DC_HPD1_INTERRUPT) {
3171 if (ASIC_IS_DCE3(rdev)) {
3172 tmp = RREG32(DC_HPD1_INT_CONTROL);
3173 tmp |= DC_HPDx_INT_ACK;
3174 WREG32(DC_HPD1_INT_CONTROL, tmp);
3176 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
3177 tmp |= DC_HPDx_INT_ACK;
3178 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
3181 if (*disp_int & DC_HPD2_INTERRUPT) {
3182 if (ASIC_IS_DCE3(rdev)) {
3183 tmp = RREG32(DC_HPD2_INT_CONTROL);
3184 tmp |= DC_HPDx_INT_ACK;
3185 WREG32(DC_HPD2_INT_CONTROL, tmp);
3187 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
3188 tmp |= DC_HPDx_INT_ACK;
3189 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
3192 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
3193 if (ASIC_IS_DCE3(rdev)) {
3194 tmp = RREG32(DC_HPD3_INT_CONTROL);
3195 tmp |= DC_HPDx_INT_ACK;
3196 WREG32(DC_HPD3_INT_CONTROL, tmp);
3198 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
3199 tmp |= DC_HPDx_INT_ACK;
3200 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
3203 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
3204 tmp = RREG32(DC_HPD4_INT_CONTROL);
3205 tmp |= DC_HPDx_INT_ACK;
3206 WREG32(DC_HPD4_INT_CONTROL, tmp);
3208 if (ASIC_IS_DCE32(rdev)) {
3209 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
3210 tmp = RREG32(DC_HPD5_INT_CONTROL);
3211 tmp |= DC_HPDx_INT_ACK;
3212 WREG32(DC_HPD5_INT_CONTROL, tmp);
3214 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
3215 tmp = RREG32(DC_HPD5_INT_CONTROL);
3216 tmp |= DC_HPDx_INT_ACK;
3217 WREG32(DC_HPD6_INT_CONTROL, tmp);
3220 if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3221 WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3223 if (ASIC_IS_DCE3(rdev)) {
3224 if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3225 WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3228 if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
3229 WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
3234 void r600_irq_disable(struct radeon_device *rdev)
3236 u32 disp_int, disp_int_cont, disp_int_cont2;
3238 r600_disable_interrupts(rdev);
3239 /* Wait and acknowledge irq */
3241 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3242 r600_disable_interrupt_state(rdev);
3245 static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3249 /* XXX use writeback */
3250 wptr = RREG32(IH_RB_WPTR);
3252 if (wptr & RB_OVERFLOW) {
3253 /* When a ring buffer overflow happen start parsing interrupt
3254 * from the last not overwritten vector (wptr + 16). Hopefully
3255 * this should allow us to catchup.
3257 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3258 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
3259 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
3260 tmp = RREG32(IH_RB_CNTL);
3261 tmp |= IH_WPTR_OVERFLOW_CLEAR;
3262 WREG32(IH_RB_CNTL, tmp);
3264 return (wptr & rdev->ih.ptr_mask);
3268 * Each IV ring entry is 128 bits:
3269 * [7:0] - interrupt source id
3271 * [59:32] - interrupt source data
3272 * [127:60] - reserved
3274 * The basic interrupt vector entries
3275 * are decoded as follows:
3276 * src_id src_data description
3281 * 19 0 FP Hot plug detection A
3282 * 19 1 FP Hot plug detection B
3283 * 19 2 DAC A auto-detection
3284 * 19 3 DAC B auto-detection
3290 * 181 - EOP Interrupt
3293 * Note, these are based on r600 and may need to be
3294 * adjusted or added to on newer asics
3297 int r600_irq_process(struct radeon_device *rdev)
3299 u32 wptr = r600_get_ih_wptr(rdev);
3300 u32 rptr = rdev->ih.rptr;
3301 u32 src_id, src_data;
3302 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
3303 unsigned long flags;
3304 bool queue_hotplug = false;
3306 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3307 if (!rdev->ih.enabled)
3310 spin_lock_irqsave(&rdev->ih.lock, flags);
3313 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3316 if (rdev->shutdown) {
3317 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3322 /* display interrupts */
3323 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
3325 rdev->ih.wptr = wptr;
3326 while (rptr != wptr) {
3327 /* wptr/rptr are in bytes! */
3328 ring_index = rptr / 4;
3329 src_id = rdev->ih.ring[ring_index] & 0xff;
3330 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
3333 case 1: /* D1 vblank/vline */
3335 case 0: /* D1 vblank */
3336 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
3337 drm_handle_vblank(rdev->ddev, 0);
3338 rdev->pm.vblank_sync = true;
3339 wake_up(&rdev->irq.vblank_queue);
3340 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3341 DRM_DEBUG("IH: D1 vblank\n");
3344 case 1: /* D1 vline */
3345 if (disp_int & LB_D1_VLINE_INTERRUPT) {
3346 disp_int &= ~LB_D1_VLINE_INTERRUPT;
3347 DRM_DEBUG("IH: D1 vline\n");
3351 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3355 case 5: /* D2 vblank/vline */
3357 case 0: /* D2 vblank */
3358 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
3359 drm_handle_vblank(rdev->ddev, 1);
3360 rdev->pm.vblank_sync = true;
3361 wake_up(&rdev->irq.vblank_queue);
3362 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3363 DRM_DEBUG("IH: D2 vblank\n");
3366 case 1: /* D1 vline */
3367 if (disp_int & LB_D2_VLINE_INTERRUPT) {
3368 disp_int &= ~LB_D2_VLINE_INTERRUPT;
3369 DRM_DEBUG("IH: D2 vline\n");
3373 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3377 case 19: /* HPD/DAC hotplug */
3380 if (disp_int & DC_HPD1_INTERRUPT) {
3381 disp_int &= ~DC_HPD1_INTERRUPT;
3382 queue_hotplug = true;
3383 DRM_DEBUG("IH: HPD1\n");
3387 if (disp_int & DC_HPD2_INTERRUPT) {
3388 disp_int &= ~DC_HPD2_INTERRUPT;
3389 queue_hotplug = true;
3390 DRM_DEBUG("IH: HPD2\n");
3394 if (disp_int_cont & DC_HPD3_INTERRUPT) {
3395 disp_int_cont &= ~DC_HPD3_INTERRUPT;
3396 queue_hotplug = true;
3397 DRM_DEBUG("IH: HPD3\n");
3401 if (disp_int_cont & DC_HPD4_INTERRUPT) {
3402 disp_int_cont &= ~DC_HPD4_INTERRUPT;
3403 queue_hotplug = true;
3404 DRM_DEBUG("IH: HPD4\n");
3408 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
3409 disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3410 queue_hotplug = true;
3411 DRM_DEBUG("IH: HPD5\n");
3415 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
3416 disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3417 queue_hotplug = true;
3418 DRM_DEBUG("IH: HPD6\n");
3422 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3427 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
3428 r600_audio_schedule_polling(rdev);
3430 case 176: /* CP_INT in ring buffer */
3431 case 177: /* CP_INT in IB1 */
3432 case 178: /* CP_INT in IB2 */
3433 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
3434 radeon_fence_process(rdev);
3436 case 181: /* CP EOP event */
3437 DRM_DEBUG("IH: CP EOP\n");
3439 case 233: /* GUI IDLE */
3440 DRM_DEBUG("IH: CP EOP\n");
3441 rdev->pm.gui_idle = true;
3442 wake_up(&rdev->irq.idle_queue);
3445 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3449 /* wptr/rptr are in bytes! */
3451 rptr &= rdev->ih.ptr_mask;
3453 /* make sure wptr hasn't changed while processing */
3454 wptr = r600_get_ih_wptr(rdev);
3455 if (wptr != rdev->ih.wptr)
3458 queue_work(rdev->wq, &rdev->hotplug_work);
3459 rdev->ih.rptr = rptr;
3460 WREG32(IH_RB_RPTR, rdev->ih.rptr);
3461 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3468 #if defined(CONFIG_DEBUG_FS)
3470 static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
3472 struct drm_info_node *node = (struct drm_info_node *) m->private;
3473 struct drm_device *dev = node->minor->dev;
3474 struct radeon_device *rdev = dev->dev_private;
3475 unsigned count, i, j;
3477 radeon_ring_free_size(rdev);
3478 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
3479 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
3480 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
3481 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
3482 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
3483 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
3484 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
3485 seq_printf(m, "%u dwords in ring\n", count);
3487 for (j = 0; j <= count; j++) {
3488 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
3489 i = (i + 1) & rdev->cp.ptr_mask;
3494 static int r600_debugfs_mc_info(struct seq_file *m, void *data)
3496 struct drm_info_node *node = (struct drm_info_node *) m->private;
3497 struct drm_device *dev = node->minor->dev;
3498 struct radeon_device *rdev = dev->dev_private;
3500 DREG32_SYS(m, rdev, R_000E50_SRBM_STATUS);
3501 DREG32_SYS(m, rdev, VM_L2_STATUS);
3505 static struct drm_info_list r600_mc_info_list[] = {
3506 {"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
3507 {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
3511 int r600_debugfs_mc_info_init(struct radeon_device *rdev)
3513 #if defined(CONFIG_DEBUG_FS)
3514 return radeon_debugfs_add_files(rdev, r600_mc_info_list, ARRAY_SIZE(r600_mc_info_list));
3521 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3522 * rdev: radeon device structure
3523 * bo: buffer object struct which userspace is waiting for idle
3525 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3526 * through ring buffer, this leads to corruption in rendering, see
3527 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3528 * directly perform HDP flush by writing register through MMIO.
3530 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
3532 /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
3533 * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
3534 * This seems to cause problems on some AGP cards. Just use the old
3537 if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
3538 rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) {
3539 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3542 WREG32(HDP_DEBUG1, 0);
3543 tmp = readl((void __iomem *)ptr);
3545 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);