2 * Copyright (C) 2008-2009 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
18 * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
19 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Richard Li <RichardZ.Li@amd.com>, <richardradeon@gmail.com>
25 * CooperYuan <cooper.yuan@amd.com>, <cooperyuan@gmail.com>
28 #include "main/glheader.h"
29 #include "main/state.h"
30 #include "main/imports.h"
31 #include "main/enums.h"
32 #include "main/macros.h"
33 #include "main/context.h"
35 #include "main/simple_list.h"
36 #include "main/api_arrayelt.h"
37 #include "swrast/swrast.h"
38 #include "swrast_setup/swrast_setup.h"
42 #include "tnl/t_vp_build.h"
43 #include "tnl/t_context.h"
44 #include "tnl/t_vertex.h"
45 #include "vbo/vbo_context.h"
47 #include "r600_context.h"
48 #include "r600_cmdbuf.h"
52 #include "r700_vertprog.h"
53 #include "r700_fragprog.h"
54 #include "r700_state.h"
56 #include "radeon_buffer_objects.h"
57 #include "radeon_common_context.h"
59 void r700WaitForIdle(context_t *context);
60 void r700WaitForIdleClean(context_t *context);
61 static unsigned int r700PrimitiveType(int prim);
62 GLboolean r700SyncSurf(context_t *context,
63 struct radeon_bo *pbo,
65 uint32_t write_domain,
68 void r700WaitForIdle(context_t *context)
70 BATCH_LOCALS(&context->radeon);
71 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
72 BEGIN_BATCH_NO_AUTOSTATE(3);
74 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
75 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
76 R600_OUT_BATCH(WAIT_3D_IDLE_bit);
82 void r700WaitForIdleClean(context_t *context)
84 BATCH_LOCALS(&context->radeon);
85 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
86 BEGIN_BATCH_NO_AUTOSTATE(5);
88 R600_OUT_BATCH(CP_PACKET3(R600_IT_EVENT_WRITE, 0));
89 R600_OUT_BATCH(CACHE_FLUSH_AND_INV_EVENT);
91 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CONFIG_REG, 1));
92 R600_OUT_BATCH(mmWAIT_UNTIL - ASIC_CONFIG_BASE_INDEX);
93 R600_OUT_BATCH(WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
99 void r700Start3D(context_t *context)
101 BATCH_LOCALS(&context->radeon);
102 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
103 if (context->radeon.radeonScreen->chip_family < CHIP_FAMILY_RV770)
105 BEGIN_BATCH_NO_AUTOSTATE(2);
106 R600_OUT_BATCH(CP_PACKET3(R600_IT_START_3D_CMDBUF, 0));
111 BEGIN_BATCH_NO_AUTOSTATE(3);
112 R600_OUT_BATCH(CP_PACKET3(R600_IT_CONTEXT_CONTROL, 1));
113 R600_OUT_BATCH(0x80000000);
114 R600_OUT_BATCH(0x80000000);
120 GLboolean r700SyncSurf(context_t *context,
121 struct radeon_bo *pbo,
122 uint32_t read_domain,
123 uint32_t write_domain,
126 BATCH_LOCALS(&context->radeon);
127 radeon_print(RADEON_RENDER | RADEON_STATE, RADEON_TRACE, "%s\n", __func__);
128 uint32_t cp_coher_size;
133 if (pbo->size == 0xffffffff)
134 cp_coher_size = 0xffffffff;
136 cp_coher_size = ((pbo->size + 255) >> 8);
138 BEGIN_BATCH_NO_AUTOSTATE(5 + 2);
139 R600_OUT_BATCH(CP_PACKET3(R600_IT_SURFACE_SYNC, 3));
140 R600_OUT_BATCH(sync_type);
141 R600_OUT_BATCH(cp_coher_size);
144 R600_OUT_BATCH_RELOC(0,
147 read_domain, write_domain, 0);
154 static unsigned int r700PrimitiveType(int prim)
156 switch (prim & PRIM_MODE_MASK)
159 return DI_PT_POINTLIST;
162 return DI_PT_LINELIST;
165 return DI_PT_LINESTRIP;
168 return DI_PT_LINELOOP;
171 return DI_PT_TRILIST;
173 case GL_TRIANGLE_STRIP:
174 return DI_PT_TRISTRIP;
176 case GL_TRIANGLE_FAN:
180 return DI_PT_QUADLIST;
183 return DI_PT_QUADSTRIP;
186 return DI_PT_POLYGON;
195 static int r700NumVerts(int num_verts, int prim)
199 switch (prim & PRIM_MODE_MASK) {
204 verts_off = num_verts % 2;
208 verts_off = num_verts;
212 verts_off = num_verts;
215 verts_off = num_verts % 3;
217 case GL_TRIANGLE_STRIP:
219 verts_off = num_verts;
221 case GL_TRIANGLE_FAN:
223 verts_off = num_verts;
226 verts_off = num_verts % 4;
230 verts_off = num_verts;
232 verts_off = num_verts % 2;
236 verts_off = num_verts;
244 return num_verts - verts_off;
247 static void r700RunRenderPrimitive(struct gl_context * ctx, int start, int end,
248 int prim, GLint basevertex)
250 context_t *context = R700_CONTEXT(ctx);
251 BATCH_LOCALS(&context->radeon);
252 int type, total_emit;
254 uint32_t vgt_draw_initiator = 0;
255 uint32_t vgt_index_type = 0;
256 uint32_t vgt_primitive_type = 0;
257 uint32_t vgt_num_indices = 0;
259 type = r700PrimitiveType(prim);
260 num_indices = r700NumVerts(end - start, prim);
262 radeon_print(RADEON_RENDER, RADEON_TRACE,
263 "%s type %x num_indices %d\n",
264 __func__, type, num_indices);
266 if (type < 0 || num_indices <= 0)
269 SETfield(vgt_primitive_type, type,
270 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
272 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
274 if(GL_TRUE != context->ind_buf.is_32bit)
276 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
279 /* 16-bit indexes are packed in a 32-bit value */
280 SETfield(vgt_index_type,
286 SWAP_MODE_shift, SWAP_MODE_mask);
289 vgt_num_indices = num_indices;
290 SETfield(vgt_draw_initiator, DI_SRC_SEL_DMA, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
291 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
293 total_emit = 3 /* VGT_PRIMITIVE_TYPE */
294 + 2 /* VGT_INDEX_TYPE */
295 + 2 /* NUM_INSTANCES */
296 + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
297 + 5 + 2; /* DRAW_INDEX */
299 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
301 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
302 R600_OUT_BATCH(vgt_primitive_type);
304 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
305 R600_OUT_BATCH(vgt_index_type);
307 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
310 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 2));
311 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
312 R600_OUT_BATCH(basevertex); //VTX_BASE_VTX_LOC
313 R600_OUT_BATCH(0); //VTX_START_INST_LOC
315 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX, 3));
316 R600_OUT_BATCH(context->ind_buf.bo_offset);
318 R600_OUT_BATCH(vgt_num_indices);
319 R600_OUT_BATCH(vgt_draw_initiator);
320 R600_OUT_BATCH_RELOC(context->ind_buf.bo_offset,
322 context->ind_buf.bo_offset,
323 RADEON_GEM_DOMAIN_GTT, 0, 0);
328 static void r700RunRenderPrimitiveImmediate(struct gl_context * ctx, int start, int end, int prim)
330 context_t *context = R700_CONTEXT(ctx);
331 BATCH_LOCALS(&context->radeon);
333 uint32_t num_indices, total_emit = 0;
334 uint32_t vgt_draw_initiator = 0;
335 uint32_t vgt_index_type = 0;
336 uint32_t vgt_primitive_type = 0;
337 uint32_t vgt_num_indices = 0;
339 type = r700PrimitiveType(prim);
340 num_indices = r700NumVerts(end - start, prim);
342 radeon_print(RADEON_RENDER, RADEON_TRACE,
343 "%s type %x num_indices %d\n",
344 __func__, type, num_indices);
346 if (type < 0 || num_indices <= 0)
349 SETfield(vgt_primitive_type, type,
350 VGT_PRIMITIVE_TYPE__PRIM_TYPE_shift, VGT_PRIMITIVE_TYPE__PRIM_TYPE_mask);
352 if (num_indices > 0xffff)
354 SETfield(vgt_index_type, DI_INDEX_SIZE_32_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
358 SETfield(vgt_index_type, DI_INDEX_SIZE_16_BIT, INDEX_TYPE_shift, INDEX_TYPE_mask);
361 /* 16-bit indexes are packed in a 32-bit value */
362 SETfield(vgt_index_type,
368 SWAP_MODE_shift, SWAP_MODE_mask);
370 vgt_num_indices = num_indices;
371 SETfield(vgt_draw_initiator, DI_MAJOR_MODE_0, MAJOR_MODE_shift, MAJOR_MODE_mask);
373 SETfield(vgt_draw_initiator, DI_SRC_SEL_AUTO_INDEX, SOURCE_SELECT_shift, SOURCE_SELECT_mask);
375 total_emit += 3 /* VGT_PRIMITIVE_TYPE */
376 + 2 /* VGT_INDEX_TYPE */
377 + 2 /* NUM_INSTANCES */
378 + 4 /* VTX_BASE_VTX_LOC + VTX_START_INST_LOC */
381 BEGIN_BATCH_NO_AUTOSTATE(total_emit);
383 R600_OUT_BATCH_REGSEQ(VGT_PRIMITIVE_TYPE, 1);
384 R600_OUT_BATCH(vgt_primitive_type);
386 R600_OUT_BATCH(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
387 R600_OUT_BATCH(vgt_index_type);
389 R600_OUT_BATCH(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
392 R600_OUT_BATCH(CP_PACKET3(R600_IT_SET_CTL_CONST, 2));
393 R600_OUT_BATCH(mmSQ_VTX_BASE_VTX_LOC - ASIC_CTL_CONST_BASE_INDEX);
394 R600_OUT_BATCH(start); //VTX_BASE_VTX_LOC
395 R600_OUT_BATCH(0); //VTX_START_INST_LOC
398 R600_OUT_BATCH(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO, 1));
399 R600_OUT_BATCH(vgt_num_indices);
400 R600_OUT_BATCH(vgt_draw_initiator);
406 /* start 3d, idle, cb/db flush */
407 #define PRE_EMIT_STATE_BUFSZ 5 + 5 + 14
409 static GLuint r700PredictRenderSize(struct gl_context* ctx,
410 const struct _mesa_prim *prim,
411 const struct _mesa_index_buffer *ib,
414 context_t *context = R700_CONTEXT(ctx);
419 dwords = PRE_EMIT_STATE_BUFSZ;
421 dwords += nr_prims * 18;
423 for (i = 0; i < nr_prims; ++i)
429 state_size = radeonCountStateEmitSize(&context->radeon);
430 flushed = rcommonEnsureCmdBufSpace(&context->radeon,
434 dwords += radeonCountStateEmitSize(&context->radeon);
436 dwords += state_size;
438 radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
443 #define CONVERT( TYPE, MACRO ) do { \
446 if (input->Normalized) { \
447 for (i = 0; i < count; i++) { \
448 const TYPE *in = (TYPE *)src_ptr; \
449 for (j = 0; j < sz; j++) { \
450 *dst_ptr++ = MACRO(*in); \
456 for (i = 0; i < count; i++) { \
457 const TYPE *in = (TYPE *)src_ptr; \
458 for (j = 0; j < sz; j++) { \
459 *dst_ptr++ = (GLfloat)(*in); \
468 * Convert attribute data type to float
469 * If the attribute uses named buffer object replace the bo with newly allocated bo
471 static void r700ConvertAttrib(struct gl_context *ctx, int count,
472 const struct gl_client_array *input,
473 struct StreamDesc *attr)
475 context_t *context = R700_CONTEXT(ctx);
476 const GLvoid *src_ptr;
477 GLboolean mapped_named_bo = GL_FALSE;
481 stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
483 /* Convert value for first element only */
484 if (input->StrideB == 0)
489 if (input->BufferObj->Name)
491 if (!input->BufferObj->Pointer)
493 ctx->Driver.MapBufferRange(ctx, 0, input->BufferObj->Size,
494 GL_MAP_READ_BIT, input->BufferObj);
495 mapped_named_bo = GL_TRUE;
498 src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
502 src_ptr = input->Ptr;
505 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset,
506 sizeof(GLfloat) * input->Size * count, 32);
508 radeon_bo_map(attr->bo, 1);
510 dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
512 assert(src_ptr != NULL);
517 CONVERT(GLdouble, (GLfloat));
519 case GL_UNSIGNED_INT:
520 CONVERT(GLuint, UINT_TO_FLOAT);
523 CONVERT(GLint, INT_TO_FLOAT);
525 case GL_UNSIGNED_SHORT:
526 CONVERT(GLushort, USHORT_TO_FLOAT);
529 CONVERT(GLshort, SHORT_TO_FLOAT);
531 case GL_UNSIGNED_BYTE:
532 assert(input->Format != GL_BGRA);
533 CONVERT(GLubyte, UBYTE_TO_FLOAT);
536 CONVERT(GLbyte, BYTE_TO_FLOAT);
543 radeon_bo_unmap(attr->bo);
547 ctx->Driver.UnmapBuffer(ctx, input->BufferObj);
552 static void r700AlignDataToDword(struct gl_context *ctx,
553 const struct gl_client_array *input,
555 struct StreamDesc *attr)
557 context_t *context = R700_CONTEXT(ctx);
558 const int dst_stride = (input->StrideB + 3) & ~3;
559 const int size = getTypeSize(input->Type) * input->Size * count;
560 GLboolean mapped_named_bo = GL_FALSE;
562 radeonAllocDmaRegion(&context->radeon, &attr->bo, &attr->bo_offset, size, 32);
564 radeon_bo_map(attr->bo, 1);
566 if (!input->BufferObj->Pointer)
568 ctx->Driver.MapBufferRange(ctx, 0, input->BufferObj->Size,
569 GL_MAP_READ_BIT, input->BufferObj);
570 mapped_named_bo = GL_TRUE;
574 GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
575 GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
578 for (i = 0; i < count; ++i)
580 memcpy(dst_ptr, src_ptr, input->StrideB);
581 src_ptr += input->StrideB;
582 dst_ptr += dst_stride;
586 radeon_bo_unmap(attr->bo);
589 ctx->Driver.UnmapBuffer(ctx, input->BufferObj);
592 attr->stride = dst_stride;
596 static void r700SetupStreams(struct gl_context *ctx, const struct gl_client_array *input[], int count)
598 context_t *context = R700_CONTEXT(ctx);
603 R600_STATECHANGE(context, vtx);
605 for(index = 0; index < context->nNumActiveAos; index++)
607 struct radeon_aos *aos = &context->radeon.tcl.aos[index];
608 i = context->stream_desc[index].element;
610 stride = (input[i]->StrideB == 0) ? getTypeSize(input[i]->Type) * input[i]->Size : input[i]->StrideB;
612 if (input[i]->Type == GL_DOUBLE || input[i]->Type == GL_UNSIGNED_INT || input[i]->Type == GL_INT
614 || getTypeSize(input[i]->Type) != 4
619 r700ConvertAttrib(ctx, count, input[i], &context->stream_desc[index]);
623 if (input[i]->BufferObj->Name)
625 context->stream_desc[index].stride = input[i]->StrideB;
626 context->stream_desc[index].bo_offset = (intptr_t) input[i]->Ptr;
627 context->stream_desc[index].bo = get_radeon_buffer_object(input[i]->BufferObj)->bo;
628 context->stream_desc[index].is_named_bo = GL_TRUE;
633 int local_count = count;
636 if (input[i]->StrideB == 0)
638 size = getTypeSize(input[i]->Type) * input[i]->Size;
643 size = getTypeSize(input[i]->Type) * input[i]->Size * local_count;
646 radeonAllocDmaRegion(&context->radeon, &context->stream_desc[index].bo,
647 &context->stream_desc[index].bo_offset, size, 32);
649 radeon_bo_map(context->stream_desc[index].bo, 1);
650 assert(context->stream_desc[index].bo->ptr != NULL);
653 dst = (uint32_t *)ADD_POINTERS(context->stream_desc[index].bo->ptr,
654 context->stream_desc[index].bo_offset);
656 switch (context->stream_desc[index].dwords)
659 radeonEmitVec4(dst, input[i]->Ptr, input[i]->StrideB, local_count);
662 radeonEmitVec8(dst, input[i]->Ptr, input[i]->StrideB, local_count);
665 radeonEmitVec12(dst, input[i]->Ptr, input[i]->StrideB, local_count);
668 radeonEmitVec16(dst, input[i]->Ptr, input[i]->StrideB, local_count);
674 radeon_bo_unmap(context->stream_desc[index].bo);
678 aos->count = context->stream_desc[index].stride == 0 ? 1 : count;
679 aos->stride = context->stream_desc[index].stride / sizeof(float);
680 aos->components = context->stream_desc[index].dwords;
681 aos->bo = context->stream_desc[index].bo;
682 aos->offset = context->stream_desc[index].bo_offset;
684 if(context->stream_desc[index].is_named_bo)
686 radeon_cs_space_add_persistent_bo(context->radeon.cmdbuf.cs,
687 context->stream_desc[index].bo,
688 RADEON_GEM_DOMAIN_GTT, 0);
692 ret = radeon_cs_space_check_with_bo(context->radeon.cmdbuf.cs,
693 first_elem(&context->radeon.dma.reserved)->bo,
694 RADEON_GEM_DOMAIN_GTT, 0);
697 static void r700FreeData(struct gl_context *ctx)
699 /* Need to zero tcl.aos[n].bo and tcl.elt_dma_bo
700 * to prevent double unref in radeonReleaseArrays
701 * called during context destroy
703 context_t *context = R700_CONTEXT(ctx);
707 for (i = 0; i < context->nNumActiveAos; i++)
709 if (!context->stream_desc[i].is_named_bo)
711 radeon_bo_unref(context->stream_desc[i].bo);
713 context->radeon.tcl.aos[i].bo = NULL;
716 if (context->ind_buf.bo != NULL)
718 radeon_bo_unref(context->ind_buf.bo);
722 static void r700FixupIndexBuffer(struct gl_context *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
724 context_t *context = R700_CONTEXT(ctx);
728 GLboolean mapped_named_bo = GL_FALSE;
730 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
732 ctx->Driver.MapBufferRange(ctx, 0, mesa_ind_buf->obj->Size,
733 GL_MAP_READ_BIT, mesa_ind_buf->obj);
734 mapped_named_bo = GL_TRUE;
735 assert(mesa_ind_buf->obj->Pointer != NULL);
737 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
739 if (mesa_ind_buf->type == GL_UNSIGNED_BYTE)
741 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
742 GLubyte *in = (GLubyte *)src_ptr;
744 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
745 &context->ind_buf.bo_offset, size, 4);
747 radeon_bo_map(context->ind_buf.bo, 1);
748 assert(context->ind_buf.bo->ptr != NULL);
749 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
751 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
753 *out++ = in[i] | in[i + 1] << 16;
756 if (i < mesa_ind_buf->count)
761 radeon_bo_unmap(context->ind_buf.bo);
765 { /* if (mesa_ind_buf->type == GL_UNSIGNED_SHORT) */
766 GLushort *in = (GLushort *)src_ptr;
767 GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
769 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
770 &context->ind_buf.bo_offset, size, 4);
772 radeon_bo_map(context->ind_buf.bo, 1);
773 assert(context->ind_buf.bo->ptr != NULL);
774 out = (GLuint *)ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
776 for (i = 0; i + 1 < mesa_ind_buf->count; i += 2)
778 *out++ = in[i] | in[i + 1] << 16;
781 if (i < mesa_ind_buf->count)
785 radeon_bo_unmap(context->ind_buf.bo);
789 context->ind_buf.is_32bit = GL_FALSE;
790 context->ind_buf.count = mesa_ind_buf->count;
794 ctx->Driver.UnmapBuffer(ctx, mesa_ind_buf->obj);
798 static void r700SetupIndexBuffer(struct gl_context *ctx, const struct _mesa_index_buffer *mesa_ind_buf)
800 context_t *context = R700_CONTEXT(ctx);
803 context->ind_buf.bo = NULL;
808 if (mesa_ind_buf->type == GL_UNSIGNED_INT)
810 if (mesa_ind_buf->type != GL_UNSIGNED_BYTE)
813 const GLvoid *src_ptr;
815 GLboolean mapped_named_bo = GL_FALSE;
817 if (mesa_ind_buf->obj->Name && !mesa_ind_buf->obj->Pointer)
819 ctx->Driver.MapBufferRange(ctx, 0, mesa_ind_buf->obj->Size,
820 GL_MAP_READ_BIT, mesa_ind_buf->obj);
821 assert(mesa_ind_buf->obj->Pointer != NULL);
822 mapped_named_bo = GL_TRUE;
825 src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
827 const GLuint size = mesa_ind_buf->count * getTypeSize(mesa_ind_buf->type);
829 radeonAllocDmaRegion(&context->radeon, &context->ind_buf.bo,
830 &context->ind_buf.bo_offset, size, 4);
831 radeon_bo_map(context->ind_buf.bo, 1);
832 assert(context->ind_buf.bo->ptr != NULL);
833 dst_ptr = ADD_POINTERS(context->ind_buf.bo->ptr, context->ind_buf.bo_offset);
835 memcpy(dst_ptr, src_ptr, size);
837 radeon_bo_unmap(context->ind_buf.bo);
838 context->ind_buf.is_32bit = (mesa_ind_buf->type == GL_UNSIGNED_INT);
839 context->ind_buf.count = mesa_ind_buf->count;
843 ctx->Driver.UnmapBuffer(ctx, mesa_ind_buf->obj);
848 r700FixupIndexBuffer(ctx, mesa_ind_buf);
852 static GLboolean check_fallbacks(struct gl_context *ctx)
854 if (ctx->RenderMode != GL_RENDER)
860 static GLboolean r700TryDrawPrims(struct gl_context *ctx,
861 const struct gl_client_array *arrays[],
862 const struct _mesa_prim *prim,
864 const struct _mesa_index_buffer *ib,
868 context_t *context = R700_CONTEXT(ctx);
869 radeonContextPtr radeon = &context->radeon;
871 struct radeon_renderbuffer *rrb;
874 _mesa_update_state( ctx );
876 if (check_fallbacks(ctx))
879 _tnl_UpdateFixedFunctionProgram(ctx);
880 r700SetVertexFormat(ctx, arrays, max_index + 1);
881 /* shaders need to be updated before buffers are validated */
882 r700UpdateShaders(ctx);
883 if (!r600ValidateBuffers(ctx))
886 /* always emit CB base to prevent
887 * lock ups on some chips.
889 R600_STATECHANGE(context, cb_target);
890 /* mark vtx as dirty since it changes per-draw */
891 R600_STATECHANGE(context, vtx);
893 r700SetScissor(context);
894 r700SetupVertexProgram(ctx);
895 r700SetupFragmentProgram(ctx);
896 r700UpdateShaderStates(ctx);
898 GLuint emit_end = r700PredictRenderSize(ctx, prim, ib, nr_prims)
899 + context->radeon.cmdbuf.cs->cdw;
901 r700SetupIndexBuffer(ctx, ib);
902 r700SetupStreams(ctx, arrays, max_index + 1);
904 radeonEmitState(radeon);
906 radeon_debug_add_indent();
907 for (i = 0; i < nr_prims; ++i)
909 if (context->ind_buf.bo)
910 r700RunRenderPrimitive(ctx,
912 prim[i].start + prim[i].count,
916 r700RunRenderPrimitiveImmediate(ctx,
918 prim[i].start + prim[i].count,
921 radeon_debug_remove_indent();
923 /* Flush render op cached for last several quads. */
924 /* XXX drm should handle this in fence submit */
925 r700WaitForIdleClean(context);
927 rrb = radeon_get_colorbuffer(&context->radeon);
929 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
930 CB_ACTION_ENA_bit | (1 << (id + 6)));
932 rrb = radeon_get_depthbuffer(&context->radeon);
934 r700SyncSurf(context, rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM,
935 DB_ACTION_ENA_bit | DB_DEST_BASE_ENA_bit);
939 if (emit_end < context->radeon.cmdbuf.cs->cdw)
941 WARN_ONCE("Rendering was %d commands larger than predicted size."
942 " We might overflow command buffer.\n", context->radeon.cmdbuf.cs->cdw - emit_end);
948 static void r700DrawPrims(struct gl_context *ctx,
949 const struct gl_client_array *arrays[],
950 const struct _mesa_prim *prim,
952 const struct _mesa_index_buffer *ib,
953 GLboolean index_bounds_valid,
957 GLboolean retval = GL_FALSE;
959 context_t *context = R700_CONTEXT(ctx);
960 radeonContextPtr radeon = &context->radeon;
961 radeon_prepare_render(radeon);
963 /* This check should get folded into just the places that
964 * min/max index are really needed.
967 if (!vbo_all_varyings_in_vbos(arrays)) {
968 if (!index_bounds_valid)
969 vbo_get_minmax_index(ctx, prim, ib, &min_index, &max_index);
970 /* do we want to rebase, minimizes the
971 * amount of data to upload? */
973 vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r700DrawPrims );
977 /* Make an attempt at drawing */
978 retval = r700TryDrawPrims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
980 /* If failed run tnl pipeline - it should take care of fallbacks */
982 _swsetup_Wakeup(ctx);
983 _tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
987 void r700InitDraw(struct gl_context *ctx)
989 struct vbo_context *vbo = vbo_context(ctx);
992 vbo->draw_prims = r700DrawPrims;