bf93d4173ea601225291f935fcdbe05e38c3ac07
[profile/ivi/mesa.git] / src / gallium / drivers / r600 / r600_hw_context.c
1 /*
2  * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Jerome Glisse
25  */
26 #include "r600_hw_context_priv.h"
27 #include "r600d.h"
28 #include "util/u_memory.h"
29 #include <errno.h>
30
31 /* Get backends mask */
32 void r600_get_backend_mask(struct r600_context *ctx)
33 {
34         struct radeon_winsys_cs *cs = ctx->cs;
35         struct r600_resource *buffer;
36         uint32_t *results;
37         unsigned num_backends = ctx->screen->info.r600_num_backends;
38         unsigned i, mask = 0;
39         uint64_t va;
40
41         /* if backend_map query is supported by the kernel */
42         if (ctx->screen->info.r600_backend_map_valid) {
43                 unsigned num_tile_pipes = ctx->screen->info.r600_num_tile_pipes;
44                 unsigned backend_map = ctx->screen->info.r600_backend_map;
45                 unsigned item_width, item_mask;
46
47                 if (ctx->chip_class >= EVERGREEN) {
48                         item_width = 4;
49                         item_mask = 0x7;
50                 } else {
51                         item_width = 2;
52                         item_mask = 0x3;
53                 }
54
55                 while(num_tile_pipes--) {
56                         i = backend_map & item_mask;
57                         mask |= (1<<i);
58                         backend_map >>= item_width;
59                 }
60                 if (mask != 0) {
61                         ctx->backend_mask = mask;
62                         return;
63                 }
64         }
65
66         /* otherwise backup path for older kernels */
67
68         /* create buffer for event data */
69         buffer = (struct r600_resource*)
70                 pipe_buffer_create(&ctx->screen->screen, PIPE_BIND_CUSTOM,
71                                    PIPE_USAGE_STAGING, ctx->max_db*16);
72         if (!buffer)
73                 goto err;
74
75         va = r600_resource_va(&ctx->screen->screen, (void*)buffer);
76
77         /* initialize buffer with zeroes */
78         results = ctx->ws->buffer_map(buffer->cs_buf, ctx->cs, PIPE_TRANSFER_WRITE);
79         if (results) {
80                 memset(results, 0, ctx->max_db * 4 * 4);
81                 ctx->ws->buffer_unmap(buffer->cs_buf);
82
83                 /* emit EVENT_WRITE for ZPASS_DONE */
84                 cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 2, 0);
85                 cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_ZPASS_DONE) | EVENT_INDEX(1);
86                 cs->buf[cs->cdw++] = va;
87                 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFF;
88
89                 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
90                 cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, buffer, RADEON_USAGE_WRITE);
91
92                 /* analyze results */
93                 results = ctx->ws->buffer_map(buffer->cs_buf, ctx->cs, PIPE_TRANSFER_READ);
94                 if (results) {
95                         for(i = 0; i < ctx->max_db; i++) {
96                                 /* at least highest bit will be set if backend is used */
97                                 if (results[i*4 + 1])
98                                         mask |= (1<<i);
99                         }
100                         ctx->ws->buffer_unmap(buffer->cs_buf);
101                 }
102         }
103
104         pipe_resource_reference((struct pipe_resource**)&buffer, NULL);
105
106         if (mask != 0) {
107                 ctx->backend_mask = mask;
108                 return;
109         }
110
111 err:
112         /* fallback to old method - set num_backends lower bits to 1 */
113         ctx->backend_mask = (~((uint32_t)0))>>(32-num_backends);
114         return;
115 }
116
117 void r600_context_ps_partial_flush(struct r600_context *ctx)
118 {
119         struct radeon_winsys_cs *cs = ctx->cs;
120
121         if (!(ctx->flags & R600_CONTEXT_DRAW_PENDING))
122                 return;
123
124         cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
125         cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4);
126
127         ctx->flags &= ~R600_CONTEXT_DRAW_PENDING;
128 }
129
130 static void r600_init_block(struct r600_context *ctx,
131                             struct r600_block *block,
132                             const struct r600_reg *reg, int index, int nreg,
133                             unsigned opcode, unsigned offset_base)
134 {
135         int i = index;
136         int j, n = nreg;
137
138         /* initialize block */
139         if (opcode == PKT3_SET_RESOURCE) {
140                 block->flags = BLOCK_FLAG_RESOURCE;
141                 block->status |= R600_BLOCK_STATUS_RESOURCE_DIRTY; /* dirty all blocks at start */
142         } else {
143                 block->flags = 0;
144                 block->status |= R600_BLOCK_STATUS_DIRTY; /* dirty all blocks at start */
145         }
146         block->start_offset = reg[i].offset;
147         block->pm4[block->pm4_ndwords++] = PKT3(opcode, n, 0);
148         block->pm4[block->pm4_ndwords++] = (block->start_offset - offset_base) >> 2;
149         block->reg = &block->pm4[block->pm4_ndwords];
150         block->pm4_ndwords += n;
151         block->nreg = n;
152         block->nreg_dirty = n;
153         LIST_INITHEAD(&block->list);
154         LIST_INITHEAD(&block->enable_list);
155
156         for (j = 0; j < n; j++) {
157                 if (reg[i+j].flags & REG_FLAG_DIRTY_ALWAYS) {
158                         block->flags |= REG_FLAG_DIRTY_ALWAYS;
159                 }
160                 if (reg[i+j].flags & REG_FLAG_ENABLE_ALWAYS) {
161                         if (!(block->status & R600_BLOCK_STATUS_ENABLED)) {
162                                 block->status |= R600_BLOCK_STATUS_ENABLED;
163                                 LIST_ADDTAIL(&block->enable_list, &ctx->enable_list);
164                                 LIST_ADDTAIL(&block->list,&ctx->dirty);
165                         }
166                 }
167                 if (reg[i+j].flags & REG_FLAG_FLUSH_CHANGE) {
168                         block->flags |= REG_FLAG_FLUSH_CHANGE;
169                 }
170
171                 if (reg[i+j].flags & REG_FLAG_NEED_BO) {
172                         block->nbo++;
173                         assert(block->nbo < R600_BLOCK_MAX_BO);
174                         block->pm4_bo_index[j] = block->nbo;
175                         block->pm4[block->pm4_ndwords++] = PKT3(PKT3_NOP, 0, 0);
176                         block->pm4[block->pm4_ndwords++] = 0x00000000;
177                         block->reloc[block->nbo].bo_pm4_index = block->pm4_ndwords - 1;
178                 }
179                 if ((ctx->family > CHIP_R600) &&
180                     (ctx->family < CHIP_RV770) && reg[i+j].flags & REG_FLAG_RV6XX_SBU) {
181                         block->pm4[block->pm4_ndwords++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0);
182                         block->pm4[block->pm4_ndwords++] = reg[i+j].sbu_flags;
183                 }
184         }
185         /* check that we stay in limit */
186         assert(block->pm4_ndwords < R600_BLOCK_MAX_REG);
187 }
188
189 int r600_context_add_block(struct r600_context *ctx, const struct r600_reg *reg, unsigned nreg,
190                            unsigned opcode, unsigned offset_base)
191 {
192         struct r600_block *block;
193         struct r600_range *range;
194         int offset;
195
196         for (unsigned i = 0, n = 0; i < nreg; i += n) {
197                 /* ignore new block balise */
198                 if (reg[i].offset == GROUP_FORCE_NEW_BLOCK) {
199                         n = 1;
200                         continue;
201                 }
202
203                 /* ignore regs not on R600 on R600 */
204                 if ((reg[i].flags & REG_FLAG_NOT_R600) && ctx->family == CHIP_R600) {
205                         n = 1;
206                         continue;
207                 }
208
209                 /* register that need relocation are in their own group */
210                 /* find number of consecutive registers */
211                 n = 0;
212                 offset = reg[i].offset;
213                 while (reg[i + n].offset == offset) {
214                         n++;
215                         offset += 4;
216                         if ((n + i) >= nreg)
217                                 break;
218                         if (n >= (R600_BLOCK_MAX_REG - 2))
219                                 break;
220                 }
221
222                 /* allocate new block */
223                 block = calloc(1, sizeof(struct r600_block));
224                 if (block == NULL) {
225                         return -ENOMEM;
226                 }
227                 ctx->nblocks++;
228                 for (int j = 0; j < n; j++) {
229                         range = &ctx->range[CTX_RANGE_ID(reg[i + j].offset)];
230                         /* create block table if it doesn't exist */
231                         if (!range->blocks)
232                                 range->blocks = calloc(1 << HASH_SHIFT, sizeof(void *));
233                         if (!range->blocks)
234                                 return -1;
235
236                         range->blocks[CTX_BLOCK_ID(reg[i + j].offset)] = block;
237                 }
238
239                 r600_init_block(ctx, block, reg, i, n, opcode, offset_base);
240
241         }
242         return 0;
243 }
244
245 /* R600/R700 configuration */
246 static const struct r600_reg r600_config_reg_list[] = {
247         {R_008958_VGT_PRIMITIVE_TYPE, 0, 0},
248         {R_008C04_SQ_GPR_RESOURCE_MGMT_1, REG_FLAG_ENABLE_ALWAYS | REG_FLAG_FLUSH_CHANGE, 0},
249         {R_009508_TA_CNTL_AUX, REG_FLAG_ENABLE_ALWAYS | REG_FLAG_FLUSH_CHANGE, 0},
250 };
251
252 static const struct r600_reg r600_ctl_const_list[] = {
253         {R_03CFF4_SQ_VTX_START_INST_LOC, 0, 0},
254 };
255
256 static const struct r600_reg r600_context_reg_list[] = {
257         {R_028A4C_PA_SC_MODE_CNTL, 0, 0},
258         {GROUP_FORCE_NEW_BLOCK, 0, 0},
259         {R_028040_CB_COLOR0_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(0)},
260         {GROUP_FORCE_NEW_BLOCK, 0, 0},
261         {R_0280A0_CB_COLOR0_INFO, REG_FLAG_NEED_BO, 0},
262         {R_028060_CB_COLOR0_SIZE, 0, 0},
263         {R_028080_CB_COLOR0_VIEW, 0, 0},
264         {GROUP_FORCE_NEW_BLOCK, 0, 0},
265         {R_0280E0_CB_COLOR0_FRAG, REG_FLAG_NEED_BO, 0},
266         {GROUP_FORCE_NEW_BLOCK, 0, 0},
267         {R_0280C0_CB_COLOR0_TILE, REG_FLAG_NEED_BO, 0},
268         {GROUP_FORCE_NEW_BLOCK, 0, 0},
269         {R_028044_CB_COLOR1_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(1)},
270         {GROUP_FORCE_NEW_BLOCK, 0, 0},
271         {R_0280A4_CB_COLOR1_INFO, REG_FLAG_NEED_BO, 0},
272         {R_028064_CB_COLOR1_SIZE, 0, 0},
273         {R_028084_CB_COLOR1_VIEW, 0, 0},
274         {GROUP_FORCE_NEW_BLOCK, 0, 0},
275         {R_0280E4_CB_COLOR1_FRAG, REG_FLAG_NEED_BO, 0},
276         {GROUP_FORCE_NEW_BLOCK, 0, 0},
277         {R_0280C4_CB_COLOR1_TILE, REG_FLAG_NEED_BO, 0},
278         {GROUP_FORCE_NEW_BLOCK, 0, 0},
279         {R_028048_CB_COLOR2_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(2)},
280         {GROUP_FORCE_NEW_BLOCK, 0, 0},
281         {R_0280A8_CB_COLOR2_INFO, REG_FLAG_NEED_BO, 0},
282         {R_028068_CB_COLOR2_SIZE, 0, 0},
283         {R_028088_CB_COLOR2_VIEW, 0, 0},
284         {GROUP_FORCE_NEW_BLOCK, 0, 0},
285         {R_0280E8_CB_COLOR2_FRAG, REG_FLAG_NEED_BO, 0},
286         {GROUP_FORCE_NEW_BLOCK, 0, 0},
287         {R_0280C8_CB_COLOR2_TILE, REG_FLAG_NEED_BO, 0},
288         {GROUP_FORCE_NEW_BLOCK, 0, 0},
289         {R_02804C_CB_COLOR3_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(3)},
290         {GROUP_FORCE_NEW_BLOCK, 0, 0},
291         {R_0280AC_CB_COLOR3_INFO, REG_FLAG_NEED_BO, 0},
292         {R_02806C_CB_COLOR3_SIZE, 0, 0},
293         {R_02808C_CB_COLOR3_VIEW, 0, 0},
294         {GROUP_FORCE_NEW_BLOCK, 0, 0},
295         {R_0280EC_CB_COLOR3_FRAG, REG_FLAG_NEED_BO, 0},
296         {GROUP_FORCE_NEW_BLOCK, 0, 0},
297         {R_0280CC_CB_COLOR3_TILE, REG_FLAG_NEED_BO, 0},
298         {GROUP_FORCE_NEW_BLOCK, 0, 0},
299         {R_028050_CB_COLOR4_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(4)},
300         {GROUP_FORCE_NEW_BLOCK, 0, 0},
301         {R_0280B0_CB_COLOR4_INFO, REG_FLAG_NEED_BO, 0},
302         {R_028070_CB_COLOR4_SIZE, 0, 0},
303         {R_028090_CB_COLOR4_VIEW, 0, 0},
304         {GROUP_FORCE_NEW_BLOCK, 0, 0},
305         {R_0280F0_CB_COLOR4_FRAG, REG_FLAG_NEED_BO, 0},
306         {GROUP_FORCE_NEW_BLOCK, 0, 0},
307         {R_0280D0_CB_COLOR4_TILE, REG_FLAG_NEED_BO, 0},
308         {GROUP_FORCE_NEW_BLOCK, 0, 0},
309         {R_028054_CB_COLOR5_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(5)},
310         {GROUP_FORCE_NEW_BLOCK, 0, 0},
311         {R_0280B4_CB_COLOR5_INFO, REG_FLAG_NEED_BO, 0},
312         {R_028074_CB_COLOR5_SIZE, 0, 0},
313         {R_028094_CB_COLOR5_VIEW, 0, 0},
314         {GROUP_FORCE_NEW_BLOCK, 0, 0},
315         {R_0280F4_CB_COLOR5_FRAG, REG_FLAG_NEED_BO, 0},
316         {GROUP_FORCE_NEW_BLOCK, 0, 0},
317         {R_0280D4_CB_COLOR5_TILE, REG_FLAG_NEED_BO, 0},
318         {R_028058_CB_COLOR6_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(6)},
319         {R_0280B8_CB_COLOR6_INFO, REG_FLAG_NEED_BO, 0},
320         {R_028078_CB_COLOR6_SIZE, 0, 0},
321         {R_028098_CB_COLOR6_VIEW, 0, 0},
322         {GROUP_FORCE_NEW_BLOCK, 0, 0},
323         {R_0280F8_CB_COLOR6_FRAG, REG_FLAG_NEED_BO, 0},
324         {GROUP_FORCE_NEW_BLOCK, 0, 0},
325         {R_0280D8_CB_COLOR6_TILE, REG_FLAG_NEED_BO, 0},
326         {GROUP_FORCE_NEW_BLOCK, 0, 0},
327         {R_02805C_CB_COLOR7_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_COLOR(7)},
328         {GROUP_FORCE_NEW_BLOCK, 0, 0},
329         {R_0280BC_CB_COLOR7_INFO, REG_FLAG_NEED_BO, 0},
330         {R_02807C_CB_COLOR7_SIZE, 0, 0},
331         {R_02809C_CB_COLOR7_VIEW, 0, 0},
332         {R_0280FC_CB_COLOR7_FRAG, REG_FLAG_NEED_BO, 0},
333         {R_0280DC_CB_COLOR7_TILE, REG_FLAG_NEED_BO, 0},
334         {R_028120_CB_CLEAR_RED, 0, 0},
335         {R_028124_CB_CLEAR_GREEN, 0, 0},
336         {R_028128_CB_CLEAR_BLUE, 0, 0},
337         {R_02812C_CB_CLEAR_ALPHA, 0, 0},
338         {R_028410_SX_ALPHA_TEST_CONTROL, 0, 0},
339         {R_028414_CB_BLEND_RED, 0, 0},
340         {R_028418_CB_BLEND_GREEN, 0, 0},
341         {R_02841C_CB_BLEND_BLUE, 0, 0},
342         {R_028420_CB_BLEND_ALPHA, 0, 0},
343         {R_028424_CB_FOG_RED, 0, 0},
344         {R_028428_CB_FOG_GREEN, 0, 0},
345         {R_02842C_CB_FOG_BLUE, 0, 0},
346         {R_028430_DB_STENCILREFMASK, 0, 0},
347         {R_028434_DB_STENCILREFMASK_BF, 0, 0},
348         {R_028438_SX_ALPHA_REF, 0, 0},
349         {R_028780_CB_BLEND0_CONTROL, REG_FLAG_NOT_R600, 0},
350         {R_028784_CB_BLEND1_CONTROL, REG_FLAG_NOT_R600, 0},
351         {R_028788_CB_BLEND2_CONTROL, REG_FLAG_NOT_R600, 0},
352         {R_02878C_CB_BLEND3_CONTROL, REG_FLAG_NOT_R600, 0},
353         {R_028790_CB_BLEND4_CONTROL, REG_FLAG_NOT_R600, 0},
354         {R_028794_CB_BLEND5_CONTROL, REG_FLAG_NOT_R600, 0},
355         {R_028798_CB_BLEND6_CONTROL, REG_FLAG_NOT_R600, 0},
356         {R_02879C_CB_BLEND7_CONTROL, REG_FLAG_NOT_R600, 0},
357         {R_0287A0_CB_SHADER_CONTROL, 0, 0},
358         {R_028800_DB_DEPTH_CONTROL, 0, 0},
359         {R_028804_CB_BLEND_CONTROL, 0, 0},
360         {R_02880C_DB_SHADER_CONTROL, 0, 0},
361         {R_02800C_DB_DEPTH_BASE, REG_FLAG_NEED_BO|REG_FLAG_RV6XX_SBU, SURFACE_BASE_UPDATE_DEPTH},
362         {R_028000_DB_DEPTH_SIZE, 0, 0},
363         {R_028004_DB_DEPTH_VIEW, 0, 0},
364         {GROUP_FORCE_NEW_BLOCK, 0, 0},
365         {R_028010_DB_DEPTH_INFO, REG_FLAG_NEED_BO, 0},
366         {R_028A6C_VGT_GS_OUT_PRIM_TYPE, 0, 0},
367         {R_028D24_DB_HTILE_SURFACE, 0, 0},
368         {R_028D34_DB_PREFETCH_LIMIT, 0, 0},
369         {R_028204_PA_SC_WINDOW_SCISSOR_TL, 0, 0},
370         {R_028208_PA_SC_WINDOW_SCISSOR_BR, 0, 0},
371         {R_028250_PA_SC_VPORT_SCISSOR_0_TL, 0, 0},
372         {R_028254_PA_SC_VPORT_SCISSOR_0_BR, 0, 0},
373         {R_02843C_PA_CL_VPORT_XSCALE_0, 0, 0},
374         {R_028440_PA_CL_VPORT_XOFFSET_0, 0, 0},
375         {R_028444_PA_CL_VPORT_YSCALE_0, 0, 0},
376         {R_028448_PA_CL_VPORT_YOFFSET_0, 0, 0},
377         {R_02844C_PA_CL_VPORT_ZSCALE_0, 0, 0},
378         {R_028450_PA_CL_VPORT_ZOFFSET_0, 0, 0},
379         {R_0286D4_SPI_INTERP_CONTROL_0, 0, 0},
380         {R_028810_PA_CL_CLIP_CNTL, 0, 0},
381         {R_028814_PA_SU_SC_MODE_CNTL, 0, 0},
382         {R_02881C_PA_CL_VS_OUT_CNTL, 0, 0},
383         {R_028A00_PA_SU_POINT_SIZE, 0, 0},
384         {R_028A04_PA_SU_POINT_MINMAX, 0, 0},
385         {R_028A08_PA_SU_LINE_CNTL, 0, 0},
386         {R_028A0C_PA_SC_LINE_STIPPLE, 0, 0},
387         {R_028C08_PA_SU_VTX_CNTL, 0, 0},
388         {R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 0, 0},
389         {R_028DFC_PA_SU_POLY_OFFSET_CLAMP, 0, 0},
390         {R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 0, 0},
391         {R_028E04_PA_SU_POLY_OFFSET_FRONT_OFFSET, 0, 0},
392         {R_028E08_PA_SU_POLY_OFFSET_BACK_SCALE, 0, 0},
393         {R_028E0C_PA_SU_POLY_OFFSET_BACK_OFFSET, 0, 0},
394         {R_028E20_PA_CL_UCP0_X, 0, 0},
395         {R_028E24_PA_CL_UCP0_Y, 0, 0},
396         {R_028E28_PA_CL_UCP0_Z, 0, 0},
397         {R_028E2C_PA_CL_UCP0_W, 0, 0},
398         {R_028E30_PA_CL_UCP1_X, 0, 0},
399         {R_028E34_PA_CL_UCP1_Y, 0, 0},
400         {R_028E38_PA_CL_UCP1_Z, 0, 0},
401         {R_028E3C_PA_CL_UCP1_W, 0, 0},
402         {R_028E40_PA_CL_UCP2_X, 0, 0},
403         {R_028E44_PA_CL_UCP2_Y, 0, 0},
404         {R_028E48_PA_CL_UCP2_Z, 0, 0},
405         {R_028E4C_PA_CL_UCP2_W, 0, 0},
406         {R_028E50_PA_CL_UCP3_X, 0, 0},
407         {R_028E54_PA_CL_UCP3_Y, 0, 0},
408         {R_028E58_PA_CL_UCP3_Z, 0, 0},
409         {R_028E5C_PA_CL_UCP3_W, 0, 0},
410         {R_028E60_PA_CL_UCP4_X, 0, 0},
411         {R_028E64_PA_CL_UCP4_Y, 0, 0},
412         {R_028E68_PA_CL_UCP4_Z, 0, 0},
413         {R_028E6C_PA_CL_UCP4_W, 0, 0},
414         {R_028E70_PA_CL_UCP5_X, 0, 0},
415         {R_028E74_PA_CL_UCP5_Y, 0, 0},
416         {R_028E78_PA_CL_UCP5_Z, 0, 0},
417         {R_028E7C_PA_CL_UCP5_W, 0, 0},
418         {R_028350_SX_MISC, 0, 0},
419         {R_028380_SQ_VTX_SEMANTIC_0, 0, 0},
420         {R_028384_SQ_VTX_SEMANTIC_1, 0, 0},
421         {R_028388_SQ_VTX_SEMANTIC_2, 0, 0},
422         {R_02838C_SQ_VTX_SEMANTIC_3, 0, 0},
423         {R_028390_SQ_VTX_SEMANTIC_4, 0, 0},
424         {R_028394_SQ_VTX_SEMANTIC_5, 0, 0},
425         {R_028398_SQ_VTX_SEMANTIC_6, 0, 0},
426         {R_02839C_SQ_VTX_SEMANTIC_7, 0, 0},
427         {R_0283A0_SQ_VTX_SEMANTIC_8, 0, 0},
428         {R_0283A4_SQ_VTX_SEMANTIC_9, 0, 0},
429         {R_0283A8_SQ_VTX_SEMANTIC_10, 0, 0},
430         {R_0283AC_SQ_VTX_SEMANTIC_11, 0, 0},
431         {R_0283B0_SQ_VTX_SEMANTIC_12, 0, 0},
432         {R_0283B4_SQ_VTX_SEMANTIC_13, 0, 0},
433         {R_0283B8_SQ_VTX_SEMANTIC_14, 0, 0},
434         {R_0283BC_SQ_VTX_SEMANTIC_15, 0, 0},
435         {R_0283C0_SQ_VTX_SEMANTIC_16, 0, 0},
436         {R_0283C4_SQ_VTX_SEMANTIC_17, 0, 0},
437         {R_0283C8_SQ_VTX_SEMANTIC_18, 0, 0},
438         {R_0283CC_SQ_VTX_SEMANTIC_19, 0, 0},
439         {R_0283D0_SQ_VTX_SEMANTIC_20, 0, 0},
440         {R_0283D4_SQ_VTX_SEMANTIC_21, 0, 0},
441         {R_0283D8_SQ_VTX_SEMANTIC_22, 0, 0},
442         {R_0283DC_SQ_VTX_SEMANTIC_23, 0, 0},
443         {R_0283E0_SQ_VTX_SEMANTIC_24, 0, 0},
444         {R_0283E4_SQ_VTX_SEMANTIC_25, 0, 0},
445         {R_0283E8_SQ_VTX_SEMANTIC_26, 0, 0},
446         {R_0283EC_SQ_VTX_SEMANTIC_27, 0, 0},
447         {R_0283F0_SQ_VTX_SEMANTIC_28, 0, 0},
448         {R_0283F4_SQ_VTX_SEMANTIC_29, 0, 0},
449         {R_0283F8_SQ_VTX_SEMANTIC_30, 0, 0},
450         {R_0283FC_SQ_VTX_SEMANTIC_31, 0, 0},
451         {R_028614_SPI_VS_OUT_ID_0, 0, 0},
452         {R_028618_SPI_VS_OUT_ID_1, 0, 0},
453         {R_02861C_SPI_VS_OUT_ID_2, 0, 0},
454         {R_028620_SPI_VS_OUT_ID_3, 0, 0},
455         {R_028624_SPI_VS_OUT_ID_4, 0, 0},
456         {R_028628_SPI_VS_OUT_ID_5, 0, 0},
457         {R_02862C_SPI_VS_OUT_ID_6, 0, 0},
458         {R_028630_SPI_VS_OUT_ID_7, 0, 0},
459         {R_028634_SPI_VS_OUT_ID_8, 0, 0},
460         {R_028638_SPI_VS_OUT_ID_9, 0, 0},
461         {R_0286C4_SPI_VS_OUT_CONFIG, 0, 0},
462         {GROUP_FORCE_NEW_BLOCK, 0, 0},
463         {R_028858_SQ_PGM_START_VS, REG_FLAG_NEED_BO, 0},
464         {GROUP_FORCE_NEW_BLOCK, 0, 0},
465         {R_028868_SQ_PGM_RESOURCES_VS, 0, 0},
466         {GROUP_FORCE_NEW_BLOCK, 0, 0},
467         {R_028894_SQ_PGM_START_FS, REG_FLAG_NEED_BO, 0},
468         {GROUP_FORCE_NEW_BLOCK, 0, 0},
469         {R_0288A4_SQ_PGM_RESOURCES_FS, 0, 0},
470         {R_0288DC_SQ_PGM_CF_OFFSET_FS, 0, 0},
471         {R_028644_SPI_PS_INPUT_CNTL_0, 0, 0},
472         {R_028648_SPI_PS_INPUT_CNTL_1, 0, 0},
473         {R_02864C_SPI_PS_INPUT_CNTL_2, 0, 0},
474         {R_028650_SPI_PS_INPUT_CNTL_3, 0, 0},
475         {R_028654_SPI_PS_INPUT_CNTL_4, 0, 0},
476         {R_028658_SPI_PS_INPUT_CNTL_5, 0, 0},
477         {R_02865C_SPI_PS_INPUT_CNTL_6, 0, 0},
478         {R_028660_SPI_PS_INPUT_CNTL_7, 0, 0},
479         {R_028664_SPI_PS_INPUT_CNTL_8, 0, 0},
480         {R_028668_SPI_PS_INPUT_CNTL_9, 0, 0},
481         {R_02866C_SPI_PS_INPUT_CNTL_10, 0, 0},
482         {R_028670_SPI_PS_INPUT_CNTL_11, 0, 0},
483         {R_028674_SPI_PS_INPUT_CNTL_12, 0, 0},
484         {R_028678_SPI_PS_INPUT_CNTL_13, 0, 0},
485         {R_02867C_SPI_PS_INPUT_CNTL_14, 0, 0},
486         {R_028680_SPI_PS_INPUT_CNTL_15, 0, 0},
487         {R_028684_SPI_PS_INPUT_CNTL_16, 0, 0},
488         {R_028688_SPI_PS_INPUT_CNTL_17, 0, 0},
489         {R_02868C_SPI_PS_INPUT_CNTL_18, 0, 0},
490         {R_028690_SPI_PS_INPUT_CNTL_19, 0, 0},
491         {R_028694_SPI_PS_INPUT_CNTL_20, 0, 0},
492         {R_028698_SPI_PS_INPUT_CNTL_21, 0, 0},
493         {R_02869C_SPI_PS_INPUT_CNTL_22, 0, 0},
494         {R_0286A0_SPI_PS_INPUT_CNTL_23, 0, 0},
495         {R_0286A4_SPI_PS_INPUT_CNTL_24, 0, 0},
496         {R_0286A8_SPI_PS_INPUT_CNTL_25, 0, 0},
497         {R_0286AC_SPI_PS_INPUT_CNTL_26, 0, 0},
498         {R_0286B0_SPI_PS_INPUT_CNTL_27, 0, 0},
499         {R_0286B4_SPI_PS_INPUT_CNTL_28, 0, 0},
500         {R_0286B8_SPI_PS_INPUT_CNTL_29, 0, 0},
501         {R_0286BC_SPI_PS_INPUT_CNTL_30, 0, 0},
502         {R_0286C0_SPI_PS_INPUT_CNTL_31, 0, 0},
503         {R_0286CC_SPI_PS_IN_CONTROL_0, 0, 0},
504         {R_0286D0_SPI_PS_IN_CONTROL_1, 0, 0},
505         {R_0286D8_SPI_INPUT_Z, 0, 0},
506         {GROUP_FORCE_NEW_BLOCK, 0, 0},
507         {R_028840_SQ_PGM_START_PS, REG_FLAG_NEED_BO, 0},
508         {GROUP_FORCE_NEW_BLOCK, 0, 0},
509         {R_028850_SQ_PGM_RESOURCES_PS, 0, 0},
510         {R_028854_SQ_PGM_EXPORTS_PS, 0, 0},
511         {R_028408_VGT_INDX_OFFSET, 0, 0},
512         {R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 0, 0},
513         {R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, 0, 0},
514 };
515
516 /* SHADER RESOURCE R600/R700 */
517 int r600_resource_init(struct r600_context *ctx, struct r600_range *range, unsigned offset, unsigned nblocks, unsigned stride, struct r600_reg *reg, int nreg, unsigned offset_base)
518 {
519         int i;
520         struct r600_block *block;
521         range->blocks = calloc(nblocks, sizeof(struct r600_block *));
522         if (range->blocks == NULL)
523                 return -ENOMEM;
524
525         reg[0].offset += offset;
526         for (i = 0; i < nblocks; i++) {
527                 block = calloc(1, sizeof(struct r600_block));
528                 if (block == NULL) {
529                         return -ENOMEM;
530                 }
531                 ctx->nblocks++;
532                 range->blocks[i] = block;
533                 r600_init_block(ctx, block, reg, 0, nreg, PKT3_SET_RESOURCE, offset_base);
534
535                 reg[0].offset += stride;
536         }
537         return 0;
538 }
539
540       
541 static int r600_resource_range_init(struct r600_context *ctx, struct r600_range *range, unsigned offset, unsigned nblocks, unsigned stride)
542 {
543         struct r600_reg r600_shader_resource[] = {
544                 {R_038000_RESOURCE0_WORD0, REG_FLAG_NEED_BO, 0},
545                 {R_038004_RESOURCE0_WORD1, REG_FLAG_NEED_BO, 0},
546                 {R_038008_RESOURCE0_WORD2, 0, 0},
547                 {R_03800C_RESOURCE0_WORD3, 0, 0},
548                 {R_038010_RESOURCE0_WORD4, 0, 0},
549                 {R_038014_RESOURCE0_WORD5, 0, 0},
550                 {R_038018_RESOURCE0_WORD6, 0, 0},
551         };
552         unsigned nreg = Elements(r600_shader_resource);
553
554         return r600_resource_init(ctx, range, offset, nblocks, stride, r600_shader_resource, nreg, R600_RESOURCE_OFFSET);
555 }
556
557 /* SHADER SAMPLER R600/R700/EG/CM */
558 int r600_state_sampler_init(struct r600_context *ctx, uint32_t offset)
559 {
560         struct r600_reg r600_shader_sampler[] = {
561                 {R_03C000_SQ_TEX_SAMPLER_WORD0_0, 0, 0},
562                 {R_03C004_SQ_TEX_SAMPLER_WORD1_0, 0, 0},
563                 {R_03C008_SQ_TEX_SAMPLER_WORD2_0, 0, 0},
564         };
565         unsigned nreg = Elements(r600_shader_sampler);
566
567         for (int i = 0; i < nreg; i++) {
568                 r600_shader_sampler[i].offset += offset;
569         }
570         return r600_context_add_block(ctx, r600_shader_sampler, nreg, PKT3_SET_SAMPLER, R600_SAMPLER_OFFSET);
571 }
572
573 /* SHADER SAMPLER BORDER R600/R700 */
574 static int r600_state_sampler_border_init(struct r600_context *ctx, uint32_t offset)
575 {
576         struct r600_reg r600_shader_sampler_border[] = {
577                 {R_00A400_TD_PS_SAMPLER0_BORDER_RED, 0, 0},
578                 {R_00A404_TD_PS_SAMPLER0_BORDER_GREEN, 0, 0},
579                 {R_00A408_TD_PS_SAMPLER0_BORDER_BLUE, 0, 0},
580                 {R_00A40C_TD_PS_SAMPLER0_BORDER_ALPHA, 0, 0},
581         };
582         unsigned nreg = Elements(r600_shader_sampler_border);
583
584         for (int i = 0; i < nreg; i++) {
585                 r600_shader_sampler_border[i].offset += offset;
586         }
587         return r600_context_add_block(ctx, r600_shader_sampler_border, nreg, PKT3_SET_CONFIG_REG, R600_CONFIG_REG_OFFSET);
588 }
589
590 static int r600_loop_const_init(struct r600_context *ctx, uint32_t offset)
591 {
592         unsigned nreg = 32;
593         struct r600_reg r600_loop_consts[32];
594         int i;
595
596         for (i = 0; i < nreg; i++) {
597                 r600_loop_consts[i].offset = R600_LOOP_CONST_OFFSET + ((offset + i) * 4);
598                 r600_loop_consts[i].flags = REG_FLAG_DIRTY_ALWAYS;
599                 r600_loop_consts[i].sbu_flags = 0;
600         }
601         return r600_context_add_block(ctx, r600_loop_consts, nreg, PKT3_SET_LOOP_CONST, R600_LOOP_CONST_OFFSET);
602 }
603
604 static void r600_free_resource_range(struct r600_context *ctx, struct r600_range *range, int nblocks)
605 {
606         struct r600_block *block;
607         int i;
608
609         if (!range->blocks) {
610                 return; /* nothing to do */
611         }
612
613         for (i = 0; i < nblocks; i++) {
614                 block = range->blocks[i];
615                 if (block) {
616                         for (int k = 1; k <= block->nbo; k++)
617                                 pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL);
618                         free(block);
619                 }
620         }
621         free(range->blocks);
622 }
623
624 /* initialize */
625 void r600_context_fini(struct r600_context *ctx)
626 {
627         struct r600_block *block;
628         struct r600_range *range;
629
630         if (ctx->range) {
631                 for (int i = 0; i < NUM_RANGES; i++) {
632                         if (!ctx->range[i].blocks)
633                                 continue;
634                         for (int j = 0; j < (1 << HASH_SHIFT); j++) {
635                                 block = ctx->range[i].blocks[j];
636                                 if (block) {
637                                         for (int k = 0, offset = block->start_offset; k < block->nreg; k++, offset += 4) {
638                                                 range = &ctx->range[CTX_RANGE_ID(offset)];
639                                                 range->blocks[CTX_BLOCK_ID(offset)] = NULL;
640                                         }
641                                         for (int k = 1; k <= block->nbo; k++) {
642                                                 pipe_resource_reference((struct pipe_resource**)&block->reloc[k].bo, NULL);
643                                         }
644                                         free(block);
645                                 }
646                         }
647                         free(ctx->range[i].blocks);
648                 }
649         }
650         r600_free_resource_range(ctx, &ctx->ps_resources, ctx->num_ps_resources);
651         r600_free_resource_range(ctx, &ctx->vs_resources, ctx->num_vs_resources);
652         free(ctx->blocks);
653 }
654
655 static void r600_add_resource_block(struct r600_context *ctx, struct r600_range *range, int num_blocks, int *index)
656 {
657         int c = *index;
658         for (int j = 0; j < num_blocks; j++) {
659                 if (!range->blocks[j])
660                         continue;
661
662                 ctx->blocks[c++] = range->blocks[j];
663         }
664         *index = c;
665 }
666
667 int r600_setup_block_table(struct r600_context *ctx)
668 {
669         /* setup block table */
670         int c = 0;
671         ctx->blocks = calloc(ctx->nblocks, sizeof(void*));
672         if (!ctx->blocks)
673                 return -ENOMEM;
674         for (int i = 0; i < NUM_RANGES; i++) {
675                 if (!ctx->range[i].blocks)
676                         continue;
677                 for (int j = 0, add; j < (1 << HASH_SHIFT); j++) {
678                         if (!ctx->range[i].blocks[j])
679                                 continue;
680
681                         add = 1;
682                         for (int k = 0; k < c; k++) {
683                                 if (ctx->blocks[k] == ctx->range[i].blocks[j]) {
684                                         add = 0;
685                                         break;
686                                 }
687                         }
688                         if (add) {
689                                 assert(c < ctx->nblocks);
690                                 ctx->blocks[c++] = ctx->range[i].blocks[j];
691                                 j += (ctx->range[i].blocks[j]->nreg) - 1;
692                         }
693                 }
694         }
695
696         r600_add_resource_block(ctx, &ctx->ps_resources, ctx->num_ps_resources, &c);
697         r600_add_resource_block(ctx, &ctx->vs_resources, ctx->num_vs_resources, &c);
698         return 0;
699 }
700
701 int r600_context_init(struct r600_context *ctx)
702 {
703         int r;
704
705         /* add blocks */
706         r = r600_context_add_block(ctx, r600_config_reg_list,
707                                    Elements(r600_config_reg_list), PKT3_SET_CONFIG_REG, R600_CONFIG_REG_OFFSET);
708         if (r)
709                 goto out_err;
710         r = r600_context_add_block(ctx, r600_context_reg_list,
711                                    Elements(r600_context_reg_list), PKT3_SET_CONTEXT_REG, R600_CONTEXT_REG_OFFSET);
712         if (r)
713                 goto out_err;
714         r = r600_context_add_block(ctx, r600_ctl_const_list,
715                                    Elements(r600_ctl_const_list), PKT3_SET_CTL_CONST, R600_CTL_CONST_OFFSET);
716         if (r)
717                 goto out_err;
718
719         /* PS SAMPLER BORDER */
720         for (int j = 0, offset = 0; j < 18; j++, offset += 0x10) {
721                 r = r600_state_sampler_border_init(ctx, offset);
722                 if (r)
723                         goto out_err;
724         }
725
726         /* VS SAMPLER BORDER */
727         for (int j = 0, offset = 0x200; j < 18; j++, offset += 0x10) {
728                 r = r600_state_sampler_border_init(ctx, offset);
729                 if (r)
730                         goto out_err;
731         }
732         /* PS SAMPLER */
733         for (int j = 0, offset = 0; j < 18; j++, offset += 0xC) {
734                 r = r600_state_sampler_init(ctx, offset);
735                 if (r)
736                         goto out_err;
737         }
738         /* VS SAMPLER */
739         for (int j = 0, offset = 0xD8; j < 18; j++, offset += 0xC) {
740                 r = r600_state_sampler_init(ctx, offset);
741                 if (r)
742                         goto out_err;
743         }
744
745         ctx->num_ps_resources = 160;
746         ctx->num_vs_resources = 160;
747         r = r600_resource_range_init(ctx, &ctx->ps_resources, 0, 160, 0x1c);
748         if (r)
749                 goto out_err;
750         r = r600_resource_range_init(ctx, &ctx->vs_resources, 0x1180, 160, 0x1c);
751         if (r)
752                 goto out_err;
753
754         /* PS loop const */
755         r600_loop_const_init(ctx, 0);
756         /* VS loop const */
757         r600_loop_const_init(ctx, 32);
758
759         r = r600_setup_block_table(ctx);
760         if (r)
761                 goto out_err;
762
763         ctx->max_db = 4;
764         return 0;
765 out_err:
766         r600_context_fini(ctx);
767         return r;
768 }
769
770 void r600_need_cs_space(struct r600_context *ctx, unsigned num_dw,
771                         boolean count_draw_in)
772 {
773         struct r600_atom *state;
774
775         /* The number of dwords we already used in the CS so far. */
776         num_dw += ctx->cs->cdw;
777
778         if (count_draw_in) {
779                 /* The number of dwords all the dirty states would take. */
780                 LIST_FOR_EACH_ENTRY(state, &ctx->dirty_states, head) {
781                         num_dw += state->num_dw;
782                 }
783
784                 num_dw += ctx->pm4_dirty_cdwords;
785
786                 /* The upper-bound of how much a draw command would take. */
787                 num_dw += R600_MAX_DRAW_CS_DWORDS;
788         }
789
790         /* Count in queries_suspend. */
791         num_dw += ctx->num_cs_dw_nontimer_queries_suspend;
792         num_dw += ctx->num_cs_dw_timer_queries_suspend;
793
794         /* Count in streamout_end at the end of CS. */
795         num_dw += ctx->num_cs_dw_streamout_end;
796
797         /* Count in render_condition(NULL) at the end of CS. */
798         if (ctx->predicate_drawing) {
799                 num_dw += 3;
800         }
801
802         /* Count in framebuffer cache flushes at the end of CS. */
803         num_dw += 7; /* one SURFACE_SYNC and CACHE_FLUSH_AND_INV (r6xx-only) */
804
805         /* Save 16 dwords for the fence mechanism. */
806         num_dw += 16;
807
808         /* Flush if there's not enough space. */
809         if (num_dw > RADEON_MAX_CMDBUF_DWORDS) {
810                 r600_flush(&ctx->context, NULL, RADEON_FLUSH_ASYNC);
811         }
812 }
813
814 void r600_context_dirty_block(struct r600_context *ctx,
815                               struct r600_block *block,
816                               int dirty, int index)
817 {
818         if ((index + 1) > block->nreg_dirty)
819                 block->nreg_dirty = index + 1;
820
821         if ((dirty != (block->status & R600_BLOCK_STATUS_DIRTY)) || !(block->status & R600_BLOCK_STATUS_ENABLED)) {
822                 block->status |= R600_BLOCK_STATUS_DIRTY;
823                 ctx->pm4_dirty_cdwords += block->pm4_ndwords;
824                 if (!(block->status & R600_BLOCK_STATUS_ENABLED)) {
825                         block->status |= R600_BLOCK_STATUS_ENABLED;
826                         LIST_ADDTAIL(&block->enable_list, &ctx->enable_list);
827                 }
828                 LIST_ADDTAIL(&block->list,&ctx->dirty);
829
830                 if (block->flags & REG_FLAG_FLUSH_CHANGE) {
831                         r600_context_ps_partial_flush(ctx);
832                 }
833         }
834 }
835
836 /**
837  * If reg needs a reloc, this function will add it to its block's reloc list.
838  * @return true if reg needs a reloc, false otherwise
839  */
840 static bool r600_reg_set_block_reloc(struct r600_pipe_reg *reg)
841 {
842         unsigned reloc_id;
843
844         if (!reg->block->pm4_bo_index[reg->id]) {
845                 return false;
846         }
847         /* find relocation */
848         reloc_id = reg->block->pm4_bo_index[reg->id];
849         pipe_resource_reference(
850                 (struct pipe_resource**)&reg->block->reloc[reloc_id].bo,
851                 &reg->bo->b.b);
852         reg->block->reloc[reloc_id].bo_usage = reg->bo_usage;
853         return true;
854 }
855
856 /**
857  * This function will emit all the registers in state directly to the command
858  * stream allowing you to bypass the r600_context dirty list.
859  *
860  * This is used for dispatching compute shaders to avoid mixing compute and
861  * 3D states in the context's dirty list.
862  *
863  * @param pkt_flags Should be either 0 or RADEON_CP_PACKET3_COMPUTE_MODE.  This
864  * value will be passed on to r600_context_block_emit_dirty an or'd against
865  * the PKT3 headers.
866  */
867 void r600_context_pipe_state_emit(struct r600_context *ctx,
868                           struct r600_pipe_state *state,
869                           unsigned pkt_flags)
870 {
871         unsigned i;
872
873         /* Mark all blocks as dirty: 
874          * Since two registers can be in the same block, we need to make sure
875          * we mark all the blocks dirty before we emit any of them.  If we were
876          * to mark blocks dirty and emit them in the same loop, like this:
877          *
878          * foreach (reg in state->regs) {
879          *     mark_dirty(reg->block)
880          *     emit_block(reg->block)
881          * }
882          *
883          * Then if we have two registers in this state that are in the same
884          * block, we would end up emitting that block twice.
885          */
886         for (i = 0; i < state->nregs; i++) {
887                 struct r600_pipe_reg *reg = &state->regs[i];
888                 /* Mark all the registers in the block as dirty */
889                 reg->block->nreg_dirty = reg->block->nreg;
890                 reg->block->status |= R600_BLOCK_STATUS_DIRTY;
891                 /* Update the reloc for this register if necessary. */
892                 r600_reg_set_block_reloc(reg);
893         }
894
895         /* Emit the registers writes */
896         for (i = 0; i < state->nregs; i++) {
897                 struct r600_pipe_reg *reg = &state->regs[i];
898                 if (reg->block->status & R600_BLOCK_STATUS_DIRTY) {
899                         r600_context_block_emit_dirty(ctx, reg->block, pkt_flags);
900                 }
901         }
902 }
903
904 void r600_context_pipe_state_set(struct r600_context *ctx, struct r600_pipe_state *state)
905 {
906         struct r600_block *block;
907         int dirty;
908         for (int i = 0; i < state->nregs; i++) {
909                 unsigned id;
910                 struct r600_pipe_reg *reg = &state->regs[i];
911
912                 block = reg->block;
913                 id = reg->id;
914
915                 dirty = block->status & R600_BLOCK_STATUS_DIRTY;
916
917                 if (reg->value != block->reg[id]) {
918                         block->reg[id] = reg->value;
919                         dirty |= R600_BLOCK_STATUS_DIRTY;
920                 }
921                 if (block->flags & REG_FLAG_DIRTY_ALWAYS)
922                         dirty |= R600_BLOCK_STATUS_DIRTY;
923                 if (r600_reg_set_block_reloc(reg)) {
924                         /* always force dirty for relocs for now */
925                         dirty |= R600_BLOCK_STATUS_DIRTY;
926                 }
927
928                 if (dirty)
929                         r600_context_dirty_block(ctx, block, dirty, id);
930         }
931 }
932
933 static void r600_context_dirty_resource_block(struct r600_context *ctx,
934                                               struct r600_block *block,
935                                               int dirty, int index)
936 {
937         block->nreg_dirty = index + 1;
938
939         if ((dirty != (block->status & R600_BLOCK_STATUS_RESOURCE_DIRTY)) || !(block->status & R600_BLOCK_STATUS_ENABLED)) {
940                 block->status |= R600_BLOCK_STATUS_RESOURCE_DIRTY;
941                 ctx->pm4_dirty_cdwords += block->pm4_ndwords;
942                 if (!(block->status & R600_BLOCK_STATUS_ENABLED)) {
943                         block->status |= R600_BLOCK_STATUS_ENABLED;
944                         LIST_ADDTAIL(&block->enable_list, &ctx->enable_list);
945                 }
946                 LIST_ADDTAIL(&block->list,&ctx->resource_dirty);
947         }
948 }
949
950 void r600_context_pipe_state_set_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, struct r600_block *block)
951 {
952         int dirty;
953         int num_regs = ctx->chip_class >= EVERGREEN ? 8 : 7;
954
955         if (state == NULL) {
956                 block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_RESOURCE_DIRTY);
957                 pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, NULL);
958                 pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, NULL);
959                 LIST_DELINIT(&block->list);
960                 LIST_DELINIT(&block->enable_list);
961                 return;
962         }
963
964         dirty = block->status & R600_BLOCK_STATUS_RESOURCE_DIRTY;
965
966         if (memcmp(block->reg, state->val, num_regs*4)) {
967                 memcpy(block->reg, state->val, num_regs * 4);
968                 dirty |= R600_BLOCK_STATUS_RESOURCE_DIRTY;
969         }
970
971         /* if no BOs on block, force dirty */
972         if (!block->reloc[1].bo || !block->reloc[2].bo)
973                 dirty |= R600_BLOCK_STATUS_RESOURCE_DIRTY;
974
975         if (!dirty) {
976                 if ((block->reloc[1].bo->buf != state->bo[0]->buf) ||
977                     (block->reloc[2].bo->buf != state->bo[1]->buf))
978                         dirty |= R600_BLOCK_STATUS_RESOURCE_DIRTY;
979         }
980
981         if (dirty) {
982                 /* TEXTURE RESOURCE */
983                 pipe_resource_reference((struct pipe_resource**)&block->reloc[1].bo, &state->bo[0]->b.b);
984                 block->reloc[1].bo_usage = state->bo_usage[0];
985                 pipe_resource_reference((struct pipe_resource**)&block->reloc[2].bo, &state->bo[1]->b.b);
986                 block->reloc[2].bo_usage = state->bo_usage[1];
987
988                 r600_context_dirty_resource_block(ctx, block, dirty, num_regs - 1);
989         }
990 }
991
992 void r600_context_pipe_state_set_ps_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid)
993 {
994         struct r600_block *block = ctx->ps_resources.blocks[rid];
995
996         r600_context_pipe_state_set_resource(ctx, state, block);
997 }
998
999 void r600_context_pipe_state_set_vs_resource(struct r600_context *ctx, struct r600_pipe_resource_state *state, unsigned rid)
1000 {
1001         struct r600_block *block = ctx->vs_resources.blocks[rid];
1002
1003         r600_context_pipe_state_set_resource(ctx, state, block);
1004 }
1005
1006 void r600_context_pipe_state_set_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned offset)
1007 {
1008         struct r600_range *range;
1009         struct r600_block *block;
1010         int i;
1011         int dirty;
1012
1013         range = &ctx->range[CTX_RANGE_ID(offset)];
1014         block = range->blocks[CTX_BLOCK_ID(offset)];
1015         if (state == NULL) {
1016                 block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_DIRTY);
1017                 LIST_DELINIT(&block->list);
1018                 LIST_DELINIT(&block->enable_list);
1019                 return;
1020         }
1021         dirty = block->status & R600_BLOCK_STATUS_DIRTY;
1022
1023         for (i = 0; i < 3; i++) {
1024                 if (block->reg[i] != state->regs[i].value) {
1025                         block->reg[i] = state->regs[i].value;
1026                         dirty |= R600_BLOCK_STATUS_DIRTY;
1027                 }
1028         }
1029
1030         if (dirty)
1031                 r600_context_dirty_block(ctx, block, dirty, 2);
1032 }
1033
1034 static inline void r600_context_pipe_state_set_sampler_border(struct r600_context *ctx, struct r600_pipe_state *state, unsigned offset)
1035 {
1036         struct r600_range *range;
1037         struct r600_block *block;
1038         int i;
1039         int dirty;
1040
1041         range = &ctx->range[CTX_RANGE_ID(offset)];
1042         block = range->blocks[CTX_BLOCK_ID(offset)];
1043         if (state == NULL) {
1044                 block->status &= ~(R600_BLOCK_STATUS_ENABLED | R600_BLOCK_STATUS_DIRTY);
1045                 LIST_DELINIT(&block->list);
1046                 LIST_DELINIT(&block->enable_list);
1047                 return;
1048         }
1049         if (state->nregs <= 3) {
1050                 return;
1051         }
1052         dirty = block->status & R600_BLOCK_STATUS_DIRTY;
1053         for (i = 0; i < 4; i++) {
1054                 if (block->reg[i] != state->regs[i + 3].value) {
1055                         block->reg[i] = state->regs[i + 3].value;
1056                         dirty |= R600_BLOCK_STATUS_DIRTY;
1057                 }
1058         }
1059
1060         /* We have to flush the shaders before we change the border color
1061          * registers, or previous draw commands that haven't completed yet
1062          * will end up using the new border color. */
1063         if (dirty & R600_BLOCK_STATUS_DIRTY)
1064                 r600_context_ps_partial_flush(ctx);
1065         if (dirty)
1066                 r600_context_dirty_block(ctx, block, dirty, 3);
1067 }
1068
1069 void r600_context_pipe_state_set_ps_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id)
1070 {
1071         unsigned offset;
1072
1073         offset = R_03C000_SQ_TEX_SAMPLER_WORD0_0 + 12*id;
1074         r600_context_pipe_state_set_sampler(ctx, state, offset);
1075         offset = R_00A400_TD_PS_SAMPLER0_BORDER_RED + 16*id;
1076         r600_context_pipe_state_set_sampler_border(ctx, state, offset);
1077 }
1078
1079 void r600_context_pipe_state_set_vs_sampler(struct r600_context *ctx, struct r600_pipe_state *state, unsigned id)
1080 {
1081         unsigned offset;
1082
1083         offset = R_03C000_SQ_TEX_SAMPLER_WORD0_0 + 12*(id + 18);
1084         r600_context_pipe_state_set_sampler(ctx, state, offset);
1085         offset = R_00A600_TD_VS_SAMPLER0_BORDER_RED + 16*id;
1086         r600_context_pipe_state_set_sampler_border(ctx, state, offset);
1087 }
1088
1089 /**
1090  * @param pkt_flags should be set to RADEON_CP_PACKET3_COMPUTE_MODE if this
1091  * block will be used for compute shaders.
1092  */
1093 void r600_context_block_emit_dirty(struct r600_context *ctx, struct r600_block *block,
1094         unsigned pkt_flags)
1095 {
1096         struct radeon_winsys_cs *cs = ctx->cs;
1097         int optional = block->nbo == 0 && !(block->flags & REG_FLAG_DIRTY_ALWAYS);
1098         int cp_dwords = block->pm4_ndwords, start_dword = 0;
1099         int new_dwords = 0;
1100         int nbo = block->nbo;
1101
1102         if (block->nreg_dirty == 0 && optional) {
1103                 goto out;
1104         }
1105
1106         if (nbo) {
1107                 for (int j = 0; j < block->nreg; j++) {
1108                         if (block->pm4_bo_index[j]) {
1109                                 /* find relocation */
1110                                 struct r600_block_reloc *reloc = &block->reloc[block->pm4_bo_index[j]];
1111                                 if (reloc->bo) {
1112                                         block->pm4[reloc->bo_pm4_index] =
1113                                                         r600_context_bo_reloc(ctx, reloc->bo, reloc->bo_usage);
1114                                 } else {
1115                                         block->pm4[reloc->bo_pm4_index] = 0;
1116                                 }
1117                                 nbo--;
1118                                 if (nbo == 0)
1119                                         break;
1120
1121                         }
1122                 }
1123         }
1124
1125         optional &= (block->nreg_dirty != block->nreg);
1126         if (optional) {
1127                 new_dwords = block->nreg_dirty;
1128                 start_dword = cs->cdw;
1129                 cp_dwords = new_dwords + 2;
1130         }
1131         memcpy(&cs->buf[cs->cdw], block->pm4, cp_dwords * 4);
1132
1133         /* We are applying the pkt_flags after copying the register block to
1134          * the the command stream, because it is possible this block will be
1135          * emitted with a different pkt_flags, and we don't want to store the
1136          * pkt_flags in the block.
1137          */
1138         cs->buf[cs->cdw] |= pkt_flags;
1139         cs->cdw += cp_dwords;
1140
1141         if (optional) {
1142                 uint32_t newword;
1143
1144                 newword = cs->buf[start_dword];
1145                 newword &= PKT_COUNT_C;
1146                 newword |= PKT_COUNT_S(new_dwords);
1147                 cs->buf[start_dword] = newword;
1148         }
1149 out:
1150         block->status ^= R600_BLOCK_STATUS_DIRTY;
1151         block->nreg_dirty = 0;
1152         LIST_DELINIT(&block->list);
1153 }
1154
1155 void r600_context_block_resource_emit_dirty(struct r600_context *ctx, struct r600_block *block)
1156 {
1157         struct radeon_winsys_cs *cs = ctx->cs;
1158         int cp_dwords = block->pm4_ndwords;
1159         int nbo = block->nbo;
1160
1161         for (int j = 0; j < nbo; j++) {
1162                 if (block->pm4_bo_index[j]) {
1163                         /* find relocation */
1164                         struct r600_block_reloc *reloc = &block->reloc[block->pm4_bo_index[j]];
1165                         block->pm4[reloc->bo_pm4_index] =
1166                                 r600_context_bo_reloc(ctx, reloc->bo, reloc->bo_usage);
1167                 }
1168         }
1169
1170         memcpy(&cs->buf[cs->cdw], block->pm4, cp_dwords * 4);
1171         cs->cdw += cp_dwords;
1172
1173         block->status ^= R600_BLOCK_STATUS_RESOURCE_DIRTY;
1174         block->nreg_dirty = 0;
1175         LIST_DELINIT(&block->list);
1176 }
1177
1178 void r600_inval_shader_cache(struct r600_context *ctx)
1179 {
1180         ctx->surface_sync_cmd.flush_flags |= S_0085F0_SH_ACTION_ENA(1);
1181         r600_atom_dirty(ctx, &ctx->surface_sync_cmd.atom);
1182 }
1183
1184 void r600_inval_texture_cache(struct r600_context *ctx)
1185 {
1186         ctx->surface_sync_cmd.flush_flags |= S_0085F0_TC_ACTION_ENA(1);
1187         r600_atom_dirty(ctx, &ctx->surface_sync_cmd.atom);
1188 }
1189
1190 void r600_inval_vertex_cache(struct r600_context *ctx)
1191 {
1192         if (ctx->has_vertex_cache) {
1193                 ctx->surface_sync_cmd.flush_flags |= S_0085F0_VC_ACTION_ENA(1);
1194         } else {
1195                 /* Some GPUs don't have the vertex cache and must use the texture cache instead. */
1196                 ctx->surface_sync_cmd.flush_flags |= S_0085F0_TC_ACTION_ENA(1);
1197         }
1198         r600_atom_dirty(ctx, &ctx->surface_sync_cmd.atom);
1199 }
1200
1201 void r600_flush_framebuffer(struct r600_context *ctx, bool flush_now)
1202 {
1203         if (!(ctx->flags & R600_CONTEXT_DST_CACHES_DIRTY))
1204                 return;
1205
1206         ctx->surface_sync_cmd.flush_flags |=
1207                 r600_get_cb_flush_flags(ctx) |
1208                 (ctx->framebuffer.zsbuf ? S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1) : 0);
1209
1210         if (flush_now) {
1211                 r600_emit_atom(ctx, &ctx->surface_sync_cmd.atom);
1212         } else {
1213                 r600_atom_dirty(ctx, &ctx->surface_sync_cmd.atom);
1214         }
1215
1216         /* Also add a complete cache flush to work around broken flushing on R6xx. */
1217         if (ctx->chip_class == R600) {
1218                 if (flush_now) {
1219                         r600_emit_atom(ctx, &ctx->r6xx_flush_and_inv_cmd);
1220                 } else {
1221                         r600_atom_dirty(ctx, &ctx->r6xx_flush_and_inv_cmd);
1222                 }
1223         }
1224
1225         ctx->flags &= ~R600_CONTEXT_DST_CACHES_DIRTY;
1226 }
1227
1228 void r600_context_flush(struct r600_context *ctx, unsigned flags)
1229 {
1230         struct radeon_winsys_cs *cs = ctx->cs;
1231         struct r600_block *enable_block = NULL;
1232         bool timer_queries_suspended = false;
1233         bool nontimer_queries_suspended = false;
1234         bool streamout_suspended = false;
1235
1236         if (cs->cdw == ctx->start_cs_cmd.atom.num_dw)
1237                 return;
1238
1239         /* suspend queries */
1240         if (ctx->num_cs_dw_timer_queries_suspend) {
1241                 r600_suspend_timer_queries(ctx);
1242                 timer_queries_suspended = true;
1243         }
1244         if (ctx->num_cs_dw_nontimer_queries_suspend) {
1245                 r600_suspend_nontimer_queries(ctx);
1246                 nontimer_queries_suspended = true;
1247         }
1248
1249         if (ctx->num_cs_dw_streamout_end) {
1250                 r600_context_streamout_end(ctx);
1251                 streamout_suspended = true;
1252         }
1253
1254         r600_flush_framebuffer(ctx, true);
1255
1256         /* partial flush is needed to avoid lockups on some chips with user fences */
1257         r600_context_ps_partial_flush(ctx);
1258
1259         /* old kernels and userspace don't set SX_MISC, so we must reset it to 0 here */
1260         if (ctx->chip_class <= R700) {
1261                 r600_write_context_reg(cs, R_028350_SX_MISC, 0);
1262         }
1263
1264         /* force to keep tiling flags */
1265         flags |= RADEON_FLUSH_KEEP_TILING_FLAGS;
1266
1267         /* Flush the CS. */
1268         ctx->ws->cs_flush(ctx->cs, flags);
1269
1270         ctx->pm4_dirty_cdwords = 0;
1271         ctx->flags = 0;
1272
1273         /* Begin a new CS. */
1274         r600_emit_atom(ctx, &ctx->start_cs_cmd.atom);
1275
1276         /* Invalidate caches. */
1277         r600_inval_texture_cache(ctx);
1278         r600_flush_framebuffer(ctx, false);
1279
1280         /* Re-emit states. */
1281         r600_atom_dirty(ctx, &ctx->cb_misc_state.atom);
1282         r600_atom_dirty(ctx, &ctx->db_misc_state.atom);
1283
1284         ctx->vertex_buffer_state.dirty_mask = ctx->vertex_buffer_state.enabled_mask;
1285         r600_vertex_buffers_dirty(ctx);
1286
1287         ctx->vs_constbuf_state.dirty_mask = ctx->vs_constbuf_state.enabled_mask;
1288         ctx->ps_constbuf_state.dirty_mask = ctx->ps_constbuf_state.enabled_mask;
1289         r600_constant_buffers_dirty(ctx, &ctx->vs_constbuf_state);
1290         r600_constant_buffers_dirty(ctx, &ctx->ps_constbuf_state);
1291
1292         ctx->vs_samplers.views.dirty_mask = ctx->vs_samplers.views.enabled_mask;
1293         ctx->ps_samplers.views.dirty_mask = ctx->ps_samplers.views.enabled_mask;
1294         r600_sampler_views_dirty(ctx, &ctx->vs_samplers.views);
1295         r600_sampler_views_dirty(ctx, &ctx->ps_samplers.views);
1296
1297         if (streamout_suspended) {
1298                 ctx->streamout_start = TRUE;
1299                 ctx->streamout_append_bitmask = ~0;
1300         }
1301
1302         /* resume queries */
1303         if (timer_queries_suspended) {
1304                 r600_resume_timer_queries(ctx);
1305         }
1306         if (nontimer_queries_suspended) {
1307                 r600_resume_nontimer_queries(ctx);
1308         }
1309
1310         /* set all valid group as dirty so they get reemited on
1311          * next draw command
1312          */
1313         LIST_FOR_EACH_ENTRY(enable_block, &ctx->enable_list, enable_list) {
1314                 if (!(enable_block->flags & BLOCK_FLAG_RESOURCE)) {
1315                         if(!(enable_block->status & R600_BLOCK_STATUS_DIRTY)) {
1316                                 LIST_ADDTAIL(&enable_block->list,&ctx->dirty);
1317                                 enable_block->status |= R600_BLOCK_STATUS_DIRTY;
1318                         }
1319                 } else {
1320                         if(!(enable_block->status & R600_BLOCK_STATUS_RESOURCE_DIRTY)) {
1321                                 LIST_ADDTAIL(&enable_block->list,&ctx->resource_dirty);
1322                                 enable_block->status |= R600_BLOCK_STATUS_RESOURCE_DIRTY;
1323                         }
1324                 }
1325                 ctx->pm4_dirty_cdwords += enable_block->pm4_ndwords;
1326                 enable_block->nreg_dirty = enable_block->nreg;
1327         }
1328 }
1329
1330 void r600_context_emit_fence(struct r600_context *ctx, struct r600_resource *fence_bo, unsigned offset, unsigned value)
1331 {
1332         struct radeon_winsys_cs *cs = ctx->cs;
1333         uint64_t va;
1334
1335         r600_need_cs_space(ctx, 10, FALSE);
1336
1337         va = r600_resource_va(&ctx->screen->screen, (void*)fence_bo);
1338         va = va + (offset << 2);
1339
1340         r600_context_ps_partial_flush(ctx);
1341         cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE_EOP, 4, 0);
1342         cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5);
1343         cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL;       /* ADDRESS_LO */
1344         /* DATA_SEL | INT_EN | ADDRESS_HI */
1345         cs->buf[cs->cdw++] = (1 << 29) | (0 << 24) | ((va >> 32UL) & 0xFF);
1346         cs->buf[cs->cdw++] = value;                   /* DATA_LO */
1347         cs->buf[cs->cdw++] = 0;                       /* DATA_HI */
1348         cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
1349         cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, fence_bo, RADEON_USAGE_WRITE);
1350 }
1351
1352 static void r600_flush_vgt_streamout(struct r600_context *ctx)
1353 {
1354         struct radeon_winsys_cs *cs = ctx->cs;
1355
1356         cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONFIG_REG, 1, 0);
1357         cs->buf[cs->cdw++] = (R_008490_CP_STRMOUT_CNTL - R600_CONFIG_REG_OFFSET) >> 2;
1358         cs->buf[cs->cdw++] = 0;
1359
1360         cs->buf[cs->cdw++] = PKT3(PKT3_EVENT_WRITE, 0, 0);
1361         cs->buf[cs->cdw++] = EVENT_TYPE(EVENT_TYPE_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0);
1362
1363         cs->buf[cs->cdw++] = PKT3(PKT3_WAIT_REG_MEM, 5, 0);
1364         cs->buf[cs->cdw++] = WAIT_REG_MEM_EQUAL; /* wait until the register is equal to the reference value */
1365         cs->buf[cs->cdw++] = R_008490_CP_STRMOUT_CNTL >> 2;  /* register */
1366         cs->buf[cs->cdw++] = 0;
1367         cs->buf[cs->cdw++] = S_008490_OFFSET_UPDATE_DONE(1); /* reference value */
1368         cs->buf[cs->cdw++] = S_008490_OFFSET_UPDATE_DONE(1); /* mask */
1369         cs->buf[cs->cdw++] = 4; /* poll interval */
1370 }
1371
1372 static void r600_set_streamout_enable(struct r600_context *ctx, unsigned buffer_enable_bit)
1373 {
1374         struct radeon_winsys_cs *cs = ctx->cs;
1375
1376         if (buffer_enable_bit) {
1377                 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 1, 0);
1378                 cs->buf[cs->cdw++] = (R_028AB0_VGT_STRMOUT_EN - R600_CONTEXT_REG_OFFSET) >> 2;
1379                 cs->buf[cs->cdw++] = S_028AB0_STREAMOUT(1);
1380
1381                 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 1, 0);
1382                 cs->buf[cs->cdw++] = (R_028B20_VGT_STRMOUT_BUFFER_EN - R600_CONTEXT_REG_OFFSET) >> 2;
1383                 cs->buf[cs->cdw++] = buffer_enable_bit;
1384         } else {
1385                 cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 1, 0);
1386                 cs->buf[cs->cdw++] = (R_028AB0_VGT_STRMOUT_EN - R600_CONTEXT_REG_OFFSET) >> 2;
1387                 cs->buf[cs->cdw++] = S_028AB0_STREAMOUT(0);
1388         }
1389 }
1390
1391 void r600_context_streamout_begin(struct r600_context *ctx)
1392 {
1393         struct radeon_winsys_cs *cs = ctx->cs;
1394         struct r600_so_target **t = ctx->so_targets;
1395         unsigned *stride_in_dw = ctx->vs_shader->so.stride;
1396         unsigned buffer_en, i, update_flags = 0;
1397         uint64_t va;
1398
1399         buffer_en = (ctx->num_so_targets >= 1 && t[0] ? 1 : 0) |
1400                     (ctx->num_so_targets >= 2 && t[1] ? 2 : 0) |
1401                     (ctx->num_so_targets >= 3 && t[2] ? 4 : 0) |
1402                     (ctx->num_so_targets >= 4 && t[3] ? 8 : 0);
1403
1404         ctx->num_cs_dw_streamout_end =
1405                 12 + /* flush_vgt_streamout */
1406                 util_bitcount(buffer_en) * 8 + /* STRMOUT_BUFFER_UPDATE */
1407                 3 /* set_streamout_enable(0) */;
1408
1409         r600_need_cs_space(ctx,
1410                            12 + /* flush_vgt_streamout */
1411                            6 + /* set_streamout_enable */
1412                            util_bitcount(buffer_en) * 7 + /* SET_CONTEXT_REG */
1413                            (ctx->chip_class == R700 ? util_bitcount(buffer_en) * 5 : 0) + /* STRMOUT_BASE_UPDATE */
1414                            util_bitcount(buffer_en & ctx->streamout_append_bitmask) * 8 + /* STRMOUT_BUFFER_UPDATE */
1415                            util_bitcount(buffer_en & ~ctx->streamout_append_bitmask) * 6 + /* STRMOUT_BUFFER_UPDATE */
1416                            (ctx->family > CHIP_R600 && ctx->family < CHIP_RV770 ? 2 : 0) + /* SURFACE_BASE_UPDATE */
1417                            ctx->num_cs_dw_streamout_end, TRUE);
1418
1419         if (ctx->chip_class >= EVERGREEN) {
1420                 evergreen_flush_vgt_streamout(ctx);
1421                 evergreen_set_streamout_enable(ctx, buffer_en);
1422         } else {
1423                 r600_flush_vgt_streamout(ctx);
1424                 r600_set_streamout_enable(ctx, buffer_en);
1425         }
1426
1427         for (i = 0; i < ctx->num_so_targets; i++) {
1428                 if (t[i]) {
1429                         t[i]->stride_in_dw = stride_in_dw[i];
1430                         t[i]->so_index = i;
1431                         va = r600_resource_va(&ctx->screen->screen,
1432                                               (void*)t[i]->b.buffer);
1433
1434                         update_flags |= SURFACE_BASE_UPDATE_STRMOUT(i);
1435
1436                         cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 3, 0);
1437                         cs->buf[cs->cdw++] = (R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 +
1438                                                         16*i - R600_CONTEXT_REG_OFFSET) >> 2;
1439                         cs->buf[cs->cdw++] = (t[i]->b.buffer_offset +
1440                                                         t[i]->b.buffer_size) >> 2; /* BUFFER_SIZE (in DW) */
1441                         cs->buf[cs->cdw++] = stride_in_dw[i];              /* VTX_STRIDE (in DW) */
1442                         cs->buf[cs->cdw++] = va >> 8;                      /* BUFFER_BASE */
1443
1444                         cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
1445                         cs->buf[cs->cdw++] =
1446                                 r600_context_bo_reloc(ctx, r600_resource(t[i]->b.buffer),
1447                                                       RADEON_USAGE_WRITE);
1448
1449                         /* R7xx requires this packet after updating BUFFER_BASE.
1450                          * Without this, R7xx locks up. */
1451                         if (ctx->chip_class == R700) {
1452                                 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BASE_UPDATE, 1, 0);
1453                                 cs->buf[cs->cdw++] = i;
1454                                 cs->buf[cs->cdw++] = va >> 8;
1455
1456                                 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
1457                                 cs->buf[cs->cdw++] =
1458                                         r600_context_bo_reloc(ctx, r600_resource(t[i]->b.buffer),
1459                                                               RADEON_USAGE_WRITE);
1460                         }
1461
1462                         if (ctx->streamout_append_bitmask & (1 << i)) {
1463                                 va = r600_resource_va(&ctx->screen->screen,
1464                                                       (void*)t[i]->filled_size);
1465                                 /* Append. */
1466                                 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0);
1467                                 cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) |
1468                                                                STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM); /* control */
1469                                 cs->buf[cs->cdw++] = 0; /* unused */
1470                                 cs->buf[cs->cdw++] = 0; /* unused */
1471                                 cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL; /* src address lo */
1472                                 cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* src address hi */
1473
1474                                 cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
1475                                 cs->buf[cs->cdw++] =
1476                                         r600_context_bo_reloc(ctx,  t[i]->filled_size,
1477                                                               RADEON_USAGE_READ);
1478                         } else {
1479                                 /* Start from the beginning. */
1480                                 cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0);
1481                                 cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) |
1482                                                                STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET); /* control */
1483                                 cs->buf[cs->cdw++] = 0; /* unused */
1484                                 cs->buf[cs->cdw++] = 0; /* unused */
1485                                 cs->buf[cs->cdw++] = t[i]->b.buffer_offset >> 2; /* buffer offset in DW */
1486                                 cs->buf[cs->cdw++] = 0; /* unused */
1487                         }
1488                 }
1489         }
1490
1491         if (ctx->family > CHIP_R600 && ctx->family < CHIP_RV770) {
1492                 cs->buf[cs->cdw++] = PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0);
1493                 cs->buf[cs->cdw++] = update_flags;
1494         }
1495 }
1496
1497 void r600_context_streamout_end(struct r600_context *ctx)
1498 {
1499         struct radeon_winsys_cs *cs = ctx->cs;
1500         struct r600_so_target **t = ctx->so_targets;
1501         unsigned i, flush_flags = 0;
1502         uint64_t va;
1503
1504         if (ctx->chip_class >= EVERGREEN) {
1505                 evergreen_flush_vgt_streamout(ctx);
1506         } else {
1507                 r600_flush_vgt_streamout(ctx);
1508         }
1509
1510         for (i = 0; i < ctx->num_so_targets; i++) {
1511                 if (t[i]) {
1512                         va = r600_resource_va(&ctx->screen->screen,
1513                                               (void*)t[i]->filled_size);
1514                         cs->buf[cs->cdw++] = PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0);
1515                         cs->buf[cs->cdw++] = STRMOUT_SELECT_BUFFER(i) |
1516                                                        STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
1517                                                        STRMOUT_STORE_BUFFER_FILLED_SIZE; /* control */
1518                         cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL;     /* dst address lo */
1519                         cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* dst address hi */
1520                         cs->buf[cs->cdw++] = 0; /* unused */
1521                         cs->buf[cs->cdw++] = 0; /* unused */
1522
1523                         cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
1524                         cs->buf[cs->cdw++] =
1525                                 r600_context_bo_reloc(ctx,  t[i]->filled_size,
1526                                                       RADEON_USAGE_WRITE);
1527
1528                         flush_flags |= S_0085F0_SO0_DEST_BASE_ENA(1) << i;
1529                 }
1530         }
1531
1532         if (ctx->chip_class >= EVERGREEN) {
1533                 evergreen_set_streamout_enable(ctx, 0);
1534         } else {
1535                 r600_set_streamout_enable(ctx, 0);
1536         }
1537
1538         /* This is needed to fix cache flushes on r600. */
1539         if (ctx->chip_class == R600) {
1540                 if (ctx->family == CHIP_RV670 ||
1541                     ctx->family == CHIP_RS780 ||
1542                     ctx->family == CHIP_RS880) {
1543                         flush_flags |= S_0085F0_DEST_BASE_0_ENA(1);
1544                 }
1545
1546                 r600_atom_dirty(ctx, &ctx->r6xx_flush_and_inv_cmd);
1547         }
1548
1549         /* Flush streamout caches. */
1550         ctx->surface_sync_cmd.flush_flags |=
1551                 S_0085F0_SMX_ACTION_ENA(1) | flush_flags;
1552         r600_atom_dirty(ctx, &ctx->surface_sync_cmd.atom);
1553
1554         ctx->num_cs_dw_streamout_end = 0;
1555
1556 #if 0
1557         for (i = 0; i < ctx->num_so_targets; i++) {
1558                 if (!t[i])
1559                         continue;
1560
1561                 uint32_t *ptr = ctx->ws->buffer_map(t[i]->filled_size->buf, ctx->cs, RADEON_USAGE_READ);
1562                 printf("FILLED_SIZE%i: %u\n", i, *ptr);
1563                 ctx->ws->buffer_unmap(t[i]->filled_size->buf);
1564         }
1565 #endif
1566 }
1567
1568 void r600_context_draw_opaque_count(struct r600_context *ctx, struct r600_so_target *t)
1569 {
1570         struct radeon_winsys_cs *cs = ctx->cs;
1571         uint64_t va = r600_resource_va(&ctx->screen->screen,
1572                                        (void*)t->filled_size);
1573
1574         r600_need_cs_space(ctx, 14 + 21, TRUE);
1575
1576         cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 1, 0);
1577         cs->buf[cs->cdw++] = (R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET - R600_CONTEXT_REG_OFFSET) >> 2;
1578         cs->buf[cs->cdw++] = 0;
1579
1580         cs->buf[cs->cdw++] = PKT3(PKT3_SET_CONTEXT_REG, 1, 0);
1581         cs->buf[cs->cdw++] = (R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE - R600_CONTEXT_REG_OFFSET) >> 2;
1582         cs->buf[cs->cdw++] = t->stride_in_dw;
1583
1584         cs->buf[cs->cdw++] = PKT3(PKT3_COPY_DW, 4, 0);
1585         cs->buf[cs->cdw++] = COPY_DW_SRC_IS_MEM | COPY_DW_DST_IS_REG;
1586         cs->buf[cs->cdw++] = va & 0xFFFFFFFFUL;     /* src address lo */
1587         cs->buf[cs->cdw++] = (va >> 32UL) & 0xFFUL; /* src address hi */
1588         cs->buf[cs->cdw++] = R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2; /* dst register */
1589         cs->buf[cs->cdw++] = 0; /* unused */
1590
1591         cs->buf[cs->cdw++] = PKT3(PKT3_NOP, 0, 0);
1592         cs->buf[cs->cdw++] = r600_context_bo_reloc(ctx, t->filled_size, RADEON_USAGE_READ);
1593 }