2 * Copyright 2009 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Alex Deucher <alexander.deucher@amd.com>
28 #include "radeon_drm.h"
29 #include "radeon_drv.h"
31 #include "r600_blit_shaders.h"
33 #define DI_PT_RECTLIST 0x11
34 #define DI_INDEX_SIZE_16_BIT 0x0
35 #define DI_SRC_SEL_AUTO_INDEX 0x2
39 #define FMT_8_8_8_8 0x1a
41 #define COLOR_5_6_5 0x8
42 #define COLOR_8_8_8_8 0x1a
45 set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 gpu_addr)
56 cb_color_info = ((format << 2) | (1 << 27));
58 slice = ((w * h) / 64) - 1;
60 if (((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_R600) &&
61 ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV770)) {
63 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
64 OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
65 OUT_RING(gpu_addr >> 8);
66 OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE, 0));
70 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
71 OUT_RING((R600_CB_COLOR0_BASE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
72 OUT_RING(gpu_addr >> 8);
75 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
76 OUT_RING((R600_CB_COLOR0_SIZE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
77 OUT_RING((pitch << 0) | (slice << 10));
79 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
80 OUT_RING((R600_CB_COLOR0_VIEW - R600_SET_CONTEXT_REG_OFFSET) >> 2);
83 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
84 OUT_RING((R600_CB_COLOR0_INFO - R600_SET_CONTEXT_REG_OFFSET) >> 2);
85 OUT_RING(cb_color_info);
87 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
88 OUT_RING((R600_CB_COLOR0_TILE - R600_SET_CONTEXT_REG_OFFSET) >> 2);
91 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
92 OUT_RING((R600_CB_COLOR0_FRAG - R600_SET_CONTEXT_REG_OFFSET) >> 2);
95 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
96 OUT_RING((R600_CB_COLOR0_MASK - R600_SET_CONTEXT_REG_OFFSET) >> 2);
103 cp_set_surface_sync(drm_radeon_private_t *dev_priv,
104 u32 sync_type, u32 size, u64 mc_addr)
110 if (size == 0xffffffff)
111 cp_coher_size = 0xffffffff;
113 cp_coher_size = ((size + 255) >> 8);
116 OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
118 OUT_RING(cp_coher_size);
119 OUT_RING((mc_addr >> 8));
120 OUT_RING(10); /* poll interval */
125 set_shaders(struct drm_device *dev)
127 drm_radeon_private_t *dev_priv = dev->dev_private;
131 uint32_t sq_pgm_resources;
136 vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset);
137 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
139 for (i = 0; i < r6xx_vs_size; i++)
140 vs[i] = cpu_to_le32(r6xx_vs[i]);
141 for (i = 0; i < r6xx_ps_size; i++)
142 ps[i] = cpu_to_le32(r6xx_ps[i]);
144 dev_priv->blit_vb->used = 512;
146 gpu_addr = dev_priv->gart_buffers_offset + dev_priv->blit_vb->offset;
148 /* setup shader regs */
149 sq_pgm_resources = (1 << 0);
153 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
154 OUT_RING((R600_SQ_PGM_START_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
155 OUT_RING(gpu_addr >> 8);
157 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
158 OUT_RING((R600_SQ_PGM_RESOURCES_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
159 OUT_RING(sq_pgm_resources);
161 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
162 OUT_RING((R600_SQ_PGM_CF_OFFSET_VS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
166 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
167 OUT_RING((R600_SQ_PGM_START_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
168 OUT_RING((gpu_addr + 256) >> 8);
170 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
171 OUT_RING((R600_SQ_PGM_RESOURCES_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
172 OUT_RING(sq_pgm_resources | (1 << 28));
174 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
175 OUT_RING((R600_SQ_PGM_EXPORTS_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
178 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 1));
179 OUT_RING((R600_SQ_PGM_CF_OFFSET_PS - R600_SET_CONTEXT_REG_OFFSET) >> 2);
183 cp_set_surface_sync(dev_priv,
184 R600_SH_ACTION_ENA, 512, gpu_addr);
188 set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
190 uint32_t sq_vtx_constant_word2;
194 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
196 sq_vtx_constant_word2 |= (2 << 30);
200 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
202 OUT_RING(gpu_addr & 0xffffffff);
204 OUT_RING(sq_vtx_constant_word2);
208 OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER << 30);
211 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
212 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
213 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
214 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
215 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
216 cp_set_surface_sync(dev_priv,
217 R600_TC_ACTION_ENA, 48, gpu_addr);
219 cp_set_surface_sync(dev_priv,
220 R600_VC_ACTION_ENA, 48, gpu_addr);
224 set_tex_resource(drm_radeon_private_t *dev_priv,
225 int format, int w, int h, int pitch, u64 gpu_addr)
227 uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
234 sq_tex_resource_word0 = (1 << 0);
235 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
238 sq_tex_resource_word1 = (format << 26);
239 sq_tex_resource_word1 |= ((h - 1) << 0);
241 sq_tex_resource_word4 = ((1 << 14) |
248 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
250 OUT_RING(sq_tex_resource_word0);
251 OUT_RING(sq_tex_resource_word1);
252 OUT_RING(gpu_addr >> 8);
253 OUT_RING(gpu_addr >> 8);
254 OUT_RING(sq_tex_resource_word4);
256 OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE << 30);
262 set_scissors(drm_radeon_private_t *dev_priv, int x1, int y1, int x2, int y2)
268 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
269 OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
270 OUT_RING((x1 << 0) | (y1 << 16));
271 OUT_RING((x2 << 0) | (y2 << 16));
273 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
274 OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
275 OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
276 OUT_RING((x2 << 0) | (y2 << 16));
278 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG, 2));
279 OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL - R600_SET_CONTEXT_REG_OFFSET) >> 2);
280 OUT_RING((x1 << 0) | (y1 << 16) | (1 << 31));
281 OUT_RING((x2 << 0) | (y2 << 16));
286 draw_auto(drm_radeon_private_t *dev_priv)
292 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
293 OUT_RING((R600_VGT_PRIMITIVE_TYPE - R600_SET_CONFIG_REG_OFFSET) >> 2);
294 OUT_RING(DI_PT_RECTLIST);
296 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
298 OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
300 OUT_RING(DI_INDEX_SIZE_16_BIT);
303 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
306 OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
308 OUT_RING(DI_SRC_SEL_AUTO_INDEX);
315 set_default_state(drm_radeon_private_t *dev_priv)
318 u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
319 u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
320 int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
321 int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
322 int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
325 switch ((dev_priv->flags & RADEON_FAMILY_MASK)) {
332 num_ps_threads = 136;
336 num_ps_stack_entries = 128;
337 num_vs_stack_entries = 128;
338 num_gs_stack_entries = 0;
339 num_es_stack_entries = 0;
348 num_ps_threads = 144;
352 num_ps_stack_entries = 40;
353 num_vs_stack_entries = 40;
354 num_gs_stack_entries = 32;
355 num_es_stack_entries = 16;
367 num_ps_threads = 136;
371 num_ps_stack_entries = 40;
372 num_vs_stack_entries = 40;
373 num_gs_stack_entries = 32;
374 num_es_stack_entries = 16;
382 num_ps_threads = 136;
386 num_ps_stack_entries = 40;
387 num_vs_stack_entries = 40;
388 num_gs_stack_entries = 32;
389 num_es_stack_entries = 16;
397 num_ps_threads = 188;
401 num_ps_stack_entries = 256;
402 num_vs_stack_entries = 256;
403 num_gs_stack_entries = 0;
404 num_es_stack_entries = 0;
413 num_ps_threads = 188;
417 num_ps_stack_entries = 128;
418 num_vs_stack_entries = 128;
419 num_gs_stack_entries = 0;
420 num_es_stack_entries = 0;
428 num_ps_threads = 144;
432 num_ps_stack_entries = 128;
433 num_vs_stack_entries = 128;
434 num_gs_stack_entries = 0;
435 num_es_stack_entries = 0;
439 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV610) ||
440 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV620) ||
441 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS780) ||
442 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS880) ||
443 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV710))
446 sq_config = R600_VC_ENABLE;
448 sq_config |= (R600_DX9_CONSTS |
449 R600_ALU_INST_PREFER_VECTOR |
455 sq_gpr_resource_mgmt_1 = (R600_NUM_PS_GPRS(num_ps_gprs) |
456 R600_NUM_VS_GPRS(num_vs_gprs) |
457 R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
458 sq_gpr_resource_mgmt_2 = (R600_NUM_GS_GPRS(num_gs_gprs) |
459 R600_NUM_ES_GPRS(num_es_gprs));
460 sq_thread_resource_mgmt = (R600_NUM_PS_THREADS(num_ps_threads) |
461 R600_NUM_VS_THREADS(num_vs_threads) |
462 R600_NUM_GS_THREADS(num_gs_threads) |
463 R600_NUM_ES_THREADS(num_es_threads));
464 sq_stack_resource_mgmt_1 = (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
465 R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
466 sq_stack_resource_mgmt_2 = (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
467 R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries));
469 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) {
470 BEGIN_RING(r7xx_default_size + 10);
471 for (i = 0; i < r7xx_default_size; i++)
472 OUT_RING(r7xx_default_state[i]);
474 BEGIN_RING(r6xx_default_size + 10);
475 for (i = 0; i < r6xx_default_size; i++)
476 OUT_RING(r6xx_default_state[i]);
478 OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
479 OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
481 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 6));
482 OUT_RING((R600_SQ_CONFIG - R600_SET_CONFIG_REG_OFFSET) >> 2);
484 OUT_RING(sq_gpr_resource_mgmt_1);
485 OUT_RING(sq_gpr_resource_mgmt_2);
486 OUT_RING(sq_thread_resource_mgmt);
487 OUT_RING(sq_stack_resource_mgmt_1);
488 OUT_RING(sq_stack_resource_mgmt_2);
492 static uint32_t i2f(uint32_t input)
494 u32 result, i, exponent, fraction;
496 if ((input & 0x3fff) == 0)
497 result = 0; /* 0 is a special case */
499 exponent = 140; /* exponent biased by 127; */
500 fraction = (input & 0x3fff) << 10; /* cheat and only
501 handle numbers below 2^^15 */
502 for (i = 0; i < 14; i++) {
503 if (fraction & 0x800000)
506 fraction = fraction << 1; /* keep
507 shifting left until top bit = 1 */
508 exponent = exponent - 1;
511 result = exponent << 23 | (fraction & 0x7fffff); /* mask
512 off top bit; assumed 1 */
518 static int r600_nomm_get_vb(struct drm_device *dev)
520 drm_radeon_private_t *dev_priv = dev->dev_private;
521 dev_priv->blit_vb = radeon_freelist_get(dev);
522 if (!dev_priv->blit_vb) {
523 DRM_ERROR("Unable to allocate vertex buffer for blit\n");
529 static void r600_nomm_put_vb(struct drm_device *dev)
531 drm_radeon_private_t *dev_priv = dev->dev_private;
533 dev_priv->blit_vb->used = 0;
534 radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb);
537 static void *r600_nomm_get_vb_ptr(struct drm_device *dev)
539 drm_radeon_private_t *dev_priv = dev->dev_private;
540 return (((char *)dev->agp_buffer_map->handle +
541 dev_priv->blit_vb->offset + dev_priv->blit_vb->used));
545 r600_prepare_blit_copy(struct drm_device *dev, struct drm_file *file_priv)
547 drm_radeon_private_t *dev_priv = dev->dev_private;
551 ret = r600_nomm_get_vb(dev);
555 dev_priv->blit_vb->file_priv = file_priv;
557 set_default_state(dev_priv);
565 r600_done_blit_copy(struct drm_device *dev)
567 drm_radeon_private_t *dev_priv = dev->dev_private;
572 OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
573 OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT);
574 /* wait for 3D idle clean */
575 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
576 OUT_RING((R600_WAIT_UNTIL - R600_SET_CONFIG_REG_OFFSET) >> 2);
577 OUT_RING(RADEON_WAIT_3D_IDLE | RADEON_WAIT_3D_IDLECLEAN);
582 r600_nomm_put_vb(dev);
586 r600_blit_copy(struct drm_device *dev,
587 uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
590 drm_radeon_private_t *dev_priv = dev->dev_private;
595 vb = r600_nomm_get_vb_ptr(dev);
597 if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
601 int cur_size = size_bytes;
602 int src_x = src_gpu_addr & 255;
603 int dst_x = dst_gpu_addr & 255;
605 src_gpu_addr = src_gpu_addr & ~255;
606 dst_gpu_addr = dst_gpu_addr & ~255;
608 if (!src_x && !dst_x) {
609 h = (cur_size / max_bytes);
615 cur_size = max_bytes;
617 if (cur_size > max_bytes)
618 cur_size = max_bytes;
619 if (cur_size > (max_bytes - dst_x))
620 cur_size = (max_bytes - dst_x);
621 if (cur_size > (max_bytes - src_x))
622 cur_size = (max_bytes - src_x);
625 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
627 r600_nomm_put_vb(dev);
628 r600_nomm_get_vb(dev);
629 if (!dev_priv->blit_vb)
632 vb = r600_nomm_get_vb_ptr(dev);
645 vb[8] = i2f(dst_x + cur_size);
647 vb[10] = i2f(src_x + cur_size);
651 set_tex_resource(dev_priv, FMT_8,
652 src_x + cur_size, h, src_x + cur_size,
655 cp_set_surface_sync(dev_priv,
656 R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
659 set_render_target(dev_priv, COLOR_8,
664 set_scissors(dev_priv, dst_x, 0, dst_x + cur_size, h);
666 /* Vertex buffer setup */
667 vb_addr = dev_priv->gart_buffers_offset +
668 dev_priv->blit_vb->offset +
669 dev_priv->blit_vb->used;
670 set_vtx_resource(dev_priv, vb_addr);
675 cp_set_surface_sync(dev_priv,
676 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
677 cur_size * h, dst_gpu_addr);
680 dev_priv->blit_vb->used += 12 * 4;
682 src_gpu_addr += cur_size * h;
683 dst_gpu_addr += cur_size * h;
684 size_bytes -= cur_size * h;
687 max_bytes = 8192 * 4;
690 int cur_size = size_bytes;
691 int src_x = (src_gpu_addr & 255);
692 int dst_x = (dst_gpu_addr & 255);
694 src_gpu_addr = src_gpu_addr & ~255;
695 dst_gpu_addr = dst_gpu_addr & ~255;
697 if (!src_x && !dst_x) {
698 h = (cur_size / max_bytes);
704 cur_size = max_bytes;
706 if (cur_size > max_bytes)
707 cur_size = max_bytes;
708 if (cur_size > (max_bytes - dst_x))
709 cur_size = (max_bytes - dst_x);
710 if (cur_size > (max_bytes - src_x))
711 cur_size = (max_bytes - src_x);
714 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
715 r600_nomm_put_vb(dev);
716 r600_nomm_get_vb(dev);
717 if (!dev_priv->blit_vb)
721 vb = r600_nomm_get_vb_ptr(dev);
724 vb[0] = i2f(dst_x / 4);
726 vb[2] = i2f(src_x / 4);
729 vb[4] = i2f(dst_x / 4);
731 vb[6] = i2f(src_x / 4);
734 vb[8] = i2f((dst_x + cur_size) / 4);
736 vb[10] = i2f((src_x + cur_size) / 4);
740 set_tex_resource(dev_priv, FMT_8_8_8_8,
741 (src_x + cur_size) / 4,
742 h, (src_x + cur_size) / 4,
745 cp_set_surface_sync(dev_priv,
746 R600_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
749 set_render_target(dev_priv, COLOR_8_8_8_8,
750 (dst_x + cur_size) / 4, h,
754 set_scissors(dev_priv, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
756 /* Vertex buffer setup */
757 vb_addr = dev_priv->gart_buffers_offset +
758 dev_priv->blit_vb->offset +
759 dev_priv->blit_vb->used;
760 set_vtx_resource(dev_priv, vb_addr);
765 cp_set_surface_sync(dev_priv,
766 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
767 cur_size * h, dst_gpu_addr);
770 dev_priv->blit_vb->used += 12 * 4;
772 src_gpu_addr += cur_size * h;
773 dst_gpu_addr += cur_size * h;
774 size_bytes -= cur_size * h;
780 r600_blit_swap(struct drm_device *dev,
781 uint64_t src_gpu_addr, uint64_t dst_gpu_addr,
782 int sx, int sy, int dx, int dy,
783 int w, int h, int src_pitch, int dst_pitch, int cpp)
785 drm_radeon_private_t *dev_priv = dev->dev_private;
786 int cb_format, tex_format;
787 int sx2, sy2, dx2, dy2;
791 if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) {
793 r600_nomm_put_vb(dev);
794 r600_nomm_get_vb(dev);
795 if (!dev_priv->blit_vb)
800 vb = r600_nomm_get_vb_ptr(dev);
824 cb_format = COLOR_8_8_8_8;
825 tex_format = FMT_8_8_8_8;
828 cb_format = COLOR_5_6_5;
829 tex_format = FMT_5_6_5;
838 set_tex_resource(dev_priv, tex_format,
840 sy2, src_pitch / cpp,
843 cp_set_surface_sync(dev_priv,
844 R600_TC_ACTION_ENA, src_pitch * sy2, src_gpu_addr);
847 set_render_target(dev_priv, cb_format,
848 dst_pitch / cpp, dy2,
852 set_scissors(dev_priv, dx, dy, dx2, dy2);
854 /* Vertex buffer setup */
855 vb_addr = dev_priv->gart_buffers_offset +
856 dev_priv->blit_vb->offset +
857 dev_priv->blit_vb->used;
858 set_vtx_resource(dev_priv, vb_addr);
863 cp_set_surface_sync(dev_priv,
864 R600_CB_ACTION_ENA | R600_CB0_DEST_BASE_ENA,
865 dst_pitch * dy2, dst_gpu_addr);
867 dev_priv->blit_vb->used += 12 * 4;