lavapipe: add some safety asserts when beginning rendering
[platform/upstream/mesa.git] / src / gallium / frontends / lavapipe / lvp_execute.c
1 /*
2  * Copyright © 2019 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 /* use a gallium context to execute a command buffer */
25
26 #include "lvp_private.h"
27
28 #include "pipe/p_context.h"
29 #include "pipe/p_state.h"
30 #include "lvp_conv.h"
31
32 #include "pipe/p_shader_tokens.h"
33 #include "tgsi/tgsi_text.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "tgsi/tgsi_from_mesa.h"
36
37 #include "util/format/u_format.h"
38 #include "util/u_surface.h"
39 #include "util/u_sampler.h"
40 #include "util/u_box.h"
41 #include "util/u_inlines.h"
42 #include "util/u_memory.h"
43 #include "util/u_prim.h"
44 #include "util/u_prim_restart.h"
45 #include "util/format/u_format_zs.h"
46 #include "util/ptralloc.h"
47 #include "tgsi/tgsi_from_mesa.h"
48
49 #include "vk_cmd_enqueue_entrypoints.h"
50 #include "vk_util.h"
51
52 #define VK_PROTOTYPES
53 #include <vulkan/vulkan.h>
54
55 #define DOUBLE_EQ(a, b) (fabs((a) - (b)) < DBL_EPSILON)
56
57 enum gs_output {
58   GS_OUTPUT_NONE,
59   GS_OUTPUT_NOT_LINES,
60   GS_OUTPUT_LINES,
61 };
62
63 struct lvp_render_attachment {
64    struct lvp_image_view *imgv;
65    VkResolveModeFlags resolve_mode;
66    struct lvp_image_view *resolve_imgv;
67    VkAttachmentLoadOp load_op;
68    VkClearValue clear_value;
69 };
70
71 struct rendering_state {
72    struct pipe_context *pctx;
73    struct u_upload_mgr *uploader;
74    struct cso_context *cso;
75
76    bool blend_dirty;
77    bool rs_dirty;
78    bool dsa_dirty;
79    bool stencil_ref_dirty;
80    bool clip_state_dirty;
81    bool blend_color_dirty;
82    bool ve_dirty;
83    bool vb_dirty;
84    bool constbuf_dirty[PIPE_SHADER_TYPES];
85    bool pcbuf_dirty[PIPE_SHADER_TYPES];
86    bool has_pcbuf[PIPE_SHADER_TYPES];
87    bool inlines_dirty[PIPE_SHADER_TYPES];
88    bool vp_dirty;
89    bool scissor_dirty;
90    bool ib_dirty;
91    bool sample_mask_dirty;
92    bool min_samples_dirty;
93    struct pipe_draw_indirect_info indirect_info;
94    struct pipe_draw_info info;
95
96    struct pipe_grid_info dispatch_info;
97    struct pipe_framebuffer_state framebuffer;
98
99    struct pipe_blend_state blend_state;
100    struct {
101       float offset_units;
102       float offset_scale;
103       float offset_clamp;
104       bool enabled;
105    } depth_bias;
106    struct pipe_rasterizer_state rs_state;
107    struct pipe_depth_stencil_alpha_state dsa_state;
108
109    struct pipe_blend_color blend_color;
110    struct pipe_stencil_ref stencil_ref;
111    struct pipe_clip_state clip_state;
112
113    int num_scissors;
114    struct pipe_scissor_state scissors[16];
115
116    int num_viewports;
117    struct pipe_viewport_state viewports[16];
118    struct {
119       float min, max;
120    } depth[16];
121
122    uint8_t patch_vertices;
123    ubyte index_size;
124    unsigned index_offset;
125    struct pipe_resource *index_buffer;
126    struct pipe_constant_buffer const_buffer[PIPE_SHADER_TYPES][16];
127    int num_const_bufs[PIPE_SHADER_TYPES];
128    int num_vb;
129    unsigned start_vb;
130    struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
131    struct cso_velems_state velem;
132
133    struct lvp_access_info access[MESA_SHADER_STAGES];
134    struct pipe_sampler_view *sv[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_SAMPLER_VIEWS];
135    int num_sampler_views[PIPE_SHADER_TYPES];
136    struct pipe_sampler_state ss[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
137    /* cso_context api is stupid */
138    const struct pipe_sampler_state *cso_ss_ptr[PIPE_SHADER_TYPES][PIPE_MAX_SAMPLERS];
139    int num_sampler_states[PIPE_SHADER_TYPES];
140    bool sv_dirty[PIPE_SHADER_TYPES];
141    bool ss_dirty[PIPE_SHADER_TYPES];
142
143    struct pipe_image_view iv[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_IMAGES];
144    int num_shader_images[PIPE_SHADER_TYPES];
145    struct pipe_shader_buffer sb[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
146    int num_shader_buffers[PIPE_SHADER_TYPES];
147    bool iv_dirty[PIPE_SHADER_TYPES];
148    bool sb_dirty[PIPE_SHADER_TYPES];
149    bool disable_multisample;
150    enum gs_output gs_output_lines : 2;
151
152    uint32_t color_write_disables:8;
153    uint32_t pad:13;
154
155    void *velems_cso;
156
157    uint8_t push_constants[128 * 4];
158    uint16_t push_size[2]; //gfx, compute
159    struct {
160       void *block[MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS * MAX_SETS];
161       uint16_t size[MAX_PER_STAGE_DESCRIPTOR_UNIFORM_BLOCKS * MAX_SETS];
162       uint16_t count;
163    } uniform_blocks[PIPE_SHADER_TYPES];
164
165    VkRect2D render_area;
166    bool suspending;
167    bool render_cond;
168    uint32_t color_att_count;
169    struct lvp_render_attachment *color_att;
170    struct lvp_render_attachment depth_att;
171    struct lvp_render_attachment stencil_att;
172    struct lvp_image_view *ds_imgv;
173    struct lvp_image_view *ds_resolve_imgv;
174    uint32_t                                     forced_sample_count;
175    VkResolveModeFlagBits                        forced_depth_resolve_mode;
176    VkResolveModeFlagBits                        forced_stencil_resolve_mode;
177
178    uint32_t sample_mask;
179    unsigned min_samples;
180    float min_sample_shading;
181    bool force_min_sample;
182    bool sample_shading;
183    bool depth_clamp_sets_clip;
184
185    uint32_t num_so_targets;
186    struct pipe_stream_output_target *so_targets[PIPE_MAX_SO_BUFFERS];
187    uint32_t so_offsets[PIPE_MAX_SO_BUFFERS];
188
189    struct lvp_pipeline *pipeline[2];
190
191    bool tess_ccw;
192    void *tess_states[2];
193 };
194
195 ALWAYS_INLINE static void
196 assert_subresource_layers(const struct pipe_resource *pres, const VkImageSubresourceLayers *layers, const VkOffset3D *offsets)
197 {
198 #ifndef NDEBUG
199    if (pres->target == PIPE_TEXTURE_3D) {
200       assert(layers->baseArrayLayer == 0);
201       assert(layers->layerCount == 1);
202       assert(offsets[0].z <= pres->depth0);
203       assert(offsets[1].z <= pres->depth0);
204    } else {
205       assert(layers->baseArrayLayer < pres->array_size);
206       assert(layers->baseArrayLayer + layers->layerCount <= pres->array_size);
207       assert(offsets[0].z == 0);
208       assert(offsets[1].z == 1);
209    }
210 #endif
211 }
212
213 static void finish_fence(struct rendering_state *state)
214 {
215    struct pipe_fence_handle *handle = NULL;
216
217    state->pctx->flush(state->pctx, &handle, 0);
218
219    state->pctx->screen->fence_finish(state->pctx->screen,
220                                      NULL,
221                                      handle, PIPE_TIMEOUT_INFINITE);
222    state->pctx->screen->fence_reference(state->pctx->screen,
223                                         &handle, NULL);
224 }
225
226 static unsigned
227 get_pcbuf_size(struct rendering_state *state, enum pipe_shader_type pstage)
228 {
229    bool is_compute = pstage == PIPE_SHADER_COMPUTE;
230    return state->has_pcbuf[pstage] ? state->push_size[is_compute] : 0;
231 }
232
233 static unsigned
234 calc_ubo0_size(struct rendering_state *state, enum pipe_shader_type pstage)
235 {
236    unsigned size = get_pcbuf_size(state, pstage);
237    for (unsigned i = 0; i < state->uniform_blocks[pstage].count; i++)
238       size += state->uniform_blocks[pstage].size[i];
239    return size;
240 }
241
242 static void
243 fill_ubo0(struct rendering_state *state, uint8_t *mem, enum pipe_shader_type pstage)
244 {
245    unsigned push_size = get_pcbuf_size(state, pstage);
246    if (push_size)
247       memcpy(mem, state->push_constants, push_size);
248
249    mem += push_size;
250    for (unsigned i = 0; i < state->uniform_blocks[pstage].count; i++) {
251       unsigned size = state->uniform_blocks[pstage].size[i];
252       memcpy(mem, state->uniform_blocks[pstage].block[i], size);
253       mem += size;
254    }
255 }
256
257 static void
258 update_pcbuf(struct rendering_state *state, enum pipe_shader_type pstage)
259 {
260    unsigned size = calc_ubo0_size(state, pstage);
261    if (size) {
262       uint8_t *mem;
263       struct pipe_constant_buffer cbuf;
264       cbuf.buffer_size = size;
265       cbuf.buffer = NULL;
266       cbuf.user_buffer = NULL;
267       u_upload_alloc(state->uploader, 0, size, 64, &cbuf.buffer_offset, &cbuf.buffer, (void**)&mem);
268       fill_ubo0(state, mem, pstage);
269       state->pctx->set_constant_buffer(state->pctx, pstage, 0, true, &cbuf);
270    }
271    state->pcbuf_dirty[pstage] = false;
272 }
273
274 static void
275 update_inline_shader_state(struct rendering_state *state, enum pipe_shader_type sh, bool pcbuf_dirty, bool constbuf_dirty)
276 {
277    bool is_compute = sh == PIPE_SHADER_COMPUTE;
278    uint32_t inline_uniforms[MAX_INLINABLE_UNIFORMS];
279    unsigned stage = tgsi_processor_to_shader_stage(sh);
280    state->inlines_dirty[sh] = false;
281    if (!state->pipeline[is_compute]->inlines[stage].can_inline)
282       return;
283    struct lvp_pipeline *pipeline = state->pipeline[is_compute];
284    /* these buffers have already been flushed in llvmpipe, so they're safe to read */
285    nir_shader *base_nir = pipeline->pipeline_nir[stage];
286    if (stage == PIPE_SHADER_TESS_EVAL && state->tess_ccw)
287       base_nir = pipeline->tess_ccw;
288    nir_shader *nir = nir_shader_clone(pipeline->pipeline_nir[stage], base_nir);
289    nir_function_impl *impl = nir_shader_get_entrypoint(nir);
290    unsigned ssa_alloc = impl->ssa_alloc;
291    unsigned count = pipeline->inlines[stage].count[0];
292    if (count && pcbuf_dirty) {
293       unsigned push_size = get_pcbuf_size(state, sh);
294       for (unsigned i = 0; i < count; i++) {
295          unsigned offset = pipeline->inlines[stage].uniform_offsets[0][i];
296          if (offset < push_size) {
297             memcpy(&inline_uniforms[i], &state->push_constants[offset], sizeof(uint32_t));
298          } else {
299             for (unsigned i = 0; i < state->uniform_blocks[sh].count; i++) {
300                if (offset < push_size + state->uniform_blocks[sh].size[i]) {
301                   unsigned ubo_offset = offset - push_size;
302                   uint8_t *block = state->uniform_blocks[sh].block[i];
303                   memcpy(&inline_uniforms[i], &block[ubo_offset], sizeof(uint32_t));
304                   break;
305                }
306                push_size += state->uniform_blocks[sh].size[i];
307             }
308          }
309       }
310       NIR_PASS_V(nir, lvp_inline_uniforms, pipeline, inline_uniforms, 0);
311    }
312    if (constbuf_dirty) {
313       struct pipe_box box = {0};
314       u_foreach_bit(slot, pipeline->inlines[stage].can_inline) {
315          unsigned count = pipeline->inlines[stage].count[slot];
316          struct pipe_constant_buffer *cbuf = &state->const_buffer[sh][slot - 1];
317          struct pipe_resource *pres = cbuf->buffer;
318          box.x = cbuf->buffer_offset;
319          box.width = cbuf->buffer_size - cbuf->buffer_offset;
320          struct pipe_transfer *xfer;
321          uint8_t *map = state->pctx->buffer_map(state->pctx, pres, 0, PIPE_MAP_READ, &box, &xfer);
322          for (unsigned i = 0; i < count; i++) {
323             unsigned offset = pipeline->inlines[stage].uniform_offsets[slot][i];
324             memcpy(&inline_uniforms[i], map + offset, sizeof(uint32_t));
325          }
326          state->pctx->buffer_unmap(state->pctx, xfer);
327          NIR_PASS_V(nir, lvp_inline_uniforms, pipeline, inline_uniforms, slot);
328       }
329    }
330    lvp_shader_optimize(nir);
331    impl = nir_shader_get_entrypoint(nir);
332    void *shader_state;
333    if (ssa_alloc - impl->ssa_alloc < ssa_alloc / 2 &&
334        !pipeline->inlines[stage].must_inline) {
335       /* not enough change; don't inline further */
336       pipeline->inlines[stage].can_inline = 0;
337       ralloc_free(nir);
338       pipeline->shader_cso[sh] = lvp_pipeline_compile(pipeline, nir_shader_clone(NULL, pipeline->pipeline_nir[stage]));
339       shader_state = pipeline->shader_cso[sh];
340    } else {
341       shader_state = lvp_pipeline_compile(pipeline, nir);
342    }
343    switch (sh) {
344    case PIPE_SHADER_VERTEX:
345       state->pctx->bind_vs_state(state->pctx, shader_state);
346       break;
347    case PIPE_SHADER_TESS_CTRL:
348       state->pctx->bind_tcs_state(state->pctx, shader_state);
349       break;
350    case PIPE_SHADER_TESS_EVAL:
351       state->pctx->bind_tes_state(state->pctx, shader_state);
352       break;
353    case PIPE_SHADER_GEOMETRY:
354       state->pctx->bind_gs_state(state->pctx, shader_state);
355       break;
356    case PIPE_SHADER_FRAGMENT:
357       state->pctx->bind_fs_state(state->pctx, shader_state);
358       break;
359    case PIPE_SHADER_COMPUTE:
360       state->pctx->bind_compute_state(state->pctx, shader_state);
361       break;
362    default: break;
363    }
364 }
365
366 static void emit_compute_state(struct rendering_state *state)
367 {
368    if (state->iv_dirty[PIPE_SHADER_COMPUTE]) {
369       state->pctx->set_shader_images(state->pctx, PIPE_SHADER_COMPUTE,
370                                      0, state->num_shader_images[PIPE_SHADER_COMPUTE],
371                                      0, state->iv[PIPE_SHADER_COMPUTE]);
372       state->iv_dirty[PIPE_SHADER_COMPUTE] = false;
373    }
374
375    bool pcbuf_dirty = state->pcbuf_dirty[PIPE_SHADER_COMPUTE];
376    if (state->pcbuf_dirty[PIPE_SHADER_COMPUTE])
377       update_pcbuf(state, PIPE_SHADER_COMPUTE);
378
379    bool constbuf_dirty = state->constbuf_dirty[PIPE_SHADER_COMPUTE];
380    if (state->constbuf_dirty[PIPE_SHADER_COMPUTE]) {
381       for (unsigned i = 0; i < state->num_const_bufs[PIPE_SHADER_COMPUTE]; i++)
382          state->pctx->set_constant_buffer(state->pctx, PIPE_SHADER_COMPUTE,
383                                           i + 1, false, &state->const_buffer[PIPE_SHADER_COMPUTE][i]);
384       state->constbuf_dirty[PIPE_SHADER_COMPUTE] = false;
385    }
386
387    if (state->inlines_dirty[PIPE_SHADER_COMPUTE])
388       update_inline_shader_state(state, PIPE_SHADER_COMPUTE, pcbuf_dirty, constbuf_dirty);
389
390    if (state->sb_dirty[PIPE_SHADER_COMPUTE]) {
391       state->pctx->set_shader_buffers(state->pctx, PIPE_SHADER_COMPUTE,
392                                       0, state->num_shader_buffers[PIPE_SHADER_COMPUTE],
393                                       state->sb[PIPE_SHADER_COMPUTE], state->access[MESA_SHADER_COMPUTE].buffers_written);
394       state->sb_dirty[PIPE_SHADER_COMPUTE] = false;
395    }
396
397    if (state->sv_dirty[PIPE_SHADER_COMPUTE]) {
398       state->pctx->set_sampler_views(state->pctx, PIPE_SHADER_COMPUTE, 0, state->num_sampler_views[PIPE_SHADER_COMPUTE],
399                                      0, false, state->sv[PIPE_SHADER_COMPUTE]);
400       state->sv_dirty[PIPE_SHADER_COMPUTE] = false;
401    }
402
403    if (state->ss_dirty[PIPE_SHADER_COMPUTE]) {
404       cso_set_samplers(state->cso, PIPE_SHADER_COMPUTE, state->num_sampler_states[PIPE_SHADER_COMPUTE], state->cso_ss_ptr[PIPE_SHADER_COMPUTE]);
405       state->ss_dirty[PIPE_SHADER_COMPUTE] = false;
406    }
407 }
408
409 static void emit_state(struct rendering_state *state)
410 {
411    int sh;
412    if (state->blend_dirty) {
413       uint32_t mask = 0;
414       /* zero out the colormask values for disabled attachments */
415       if (state->color_write_disables) {
416          u_foreach_bit(att, state->color_write_disables) {
417             mask |= state->blend_state.rt[att].colormask << (att * 4);
418             state->blend_state.rt[att].colormask = 0;
419          }
420       }
421       cso_set_blend(state->cso, &state->blend_state);
422       /* reset colormasks using saved bitmask */
423       if (state->color_write_disables) {
424          const uint32_t att_mask = BITFIELD_MASK(4);
425          u_foreach_bit(att, state->color_write_disables) {
426             state->blend_state.rt[att].colormask = (mask >> (att * 4)) & att_mask;
427          }
428       }
429       state->blend_dirty = false;
430    }
431
432    if (state->rs_dirty) {
433       bool ms = state->rs_state.multisample;
434       if (state->disable_multisample &&
435           (state->gs_output_lines == GS_OUTPUT_LINES ||
436            (state->gs_output_lines == GS_OUTPUT_NONE && u_reduced_prim(state->info.mode) == PIPE_PRIM_LINES)))
437          state->rs_state.multisample = false;
438       assert(offsetof(struct pipe_rasterizer_state, offset_clamp) - offsetof(struct pipe_rasterizer_state, offset_units) == sizeof(float) * 2);
439       if (state->depth_bias.enabled) {
440          memcpy(&state->rs_state.offset_units, &state->depth_bias, sizeof(float) * 3);
441          state->rs_state.offset_tri = true;
442          state->rs_state.offset_line = true;
443          state->rs_state.offset_point = true;
444       } else {
445          memset(&state->rs_state.offset_units, 0, sizeof(float) * 3);
446          state->rs_state.offset_tri = false;
447          state->rs_state.offset_line = false;
448          state->rs_state.offset_point = false;
449       }
450       cso_set_rasterizer(state->cso, &state->rs_state);
451       state->rs_dirty = false;
452       state->rs_state.multisample = ms;
453    }
454
455    if (state->dsa_dirty) {
456       cso_set_depth_stencil_alpha(state->cso, &state->dsa_state);
457       state->dsa_dirty = false;
458    }
459
460    if (state->sample_mask_dirty) {
461       cso_set_sample_mask(state->cso, state->sample_mask);
462       state->sample_mask_dirty = false;
463    }
464
465    if (state->min_samples_dirty) {
466       cso_set_min_samples(state->cso, state->min_samples);
467       state->min_samples_dirty = false;
468    }
469
470    if (state->blend_color_dirty) {
471       state->pctx->set_blend_color(state->pctx, &state->blend_color);
472       state->blend_color_dirty = false;
473    }
474
475    if (state->stencil_ref_dirty) {
476       cso_set_stencil_ref(state->cso, state->stencil_ref);
477       state->stencil_ref_dirty = false;
478    }
479
480    if (state->vb_dirty) {
481       cso_set_vertex_buffers(state->cso, state->start_vb, state->num_vb, 0, false, state->vb);
482       state->vb_dirty = false;
483    }
484
485    if (state->ve_dirty) {
486       cso_set_vertex_elements(state->cso, &state->velem);
487       state->ve_dirty = false;
488    }
489
490    bool constbuf_dirty[PIPE_SHADER_TYPES] = {false};
491    bool pcbuf_dirty[PIPE_SHADER_TYPES] = {false};
492    for (sh = 0; sh < PIPE_SHADER_COMPUTE; sh++) {
493       constbuf_dirty[sh] = state->constbuf_dirty[sh];
494       if (state->constbuf_dirty[sh]) {
495          for (unsigned idx = 0; idx < state->num_const_bufs[sh]; idx++)
496             state->pctx->set_constant_buffer(state->pctx, sh,
497                                              idx + 1, false, &state->const_buffer[sh][idx]);
498       }
499       state->constbuf_dirty[sh] = false;
500    }
501
502    for (sh = 0; sh < PIPE_SHADER_COMPUTE; sh++) {
503       pcbuf_dirty[sh] = state->pcbuf_dirty[sh];
504       if (state->pcbuf_dirty[sh])
505          update_pcbuf(state, sh);
506    }
507
508    for (sh = 0; sh < PIPE_SHADER_COMPUTE; sh++) {
509       if (state->inlines_dirty[sh])
510          update_inline_shader_state(state, sh, pcbuf_dirty[sh], constbuf_dirty[sh]);
511    }
512
513    for (sh = 0; sh < PIPE_SHADER_COMPUTE; sh++) {
514       if (state->sb_dirty[sh]) {
515          state->pctx->set_shader_buffers(state->pctx, sh,
516                                          0, state->num_shader_buffers[sh],
517                                          state->sb[sh], state->access[tgsi_processor_to_shader_stage(sh)].buffers_written);
518       }
519    }
520
521    for (sh = 0; sh < PIPE_SHADER_COMPUTE; sh++) {
522       if (state->iv_dirty[sh]) {
523          state->pctx->set_shader_images(state->pctx, sh,
524                                         0, state->num_shader_images[sh], 0,
525                                         state->iv[sh]);
526       }
527    }
528
529    for (sh = 0; sh < PIPE_SHADER_COMPUTE; sh++) {
530       if (state->sv_dirty[sh]) {
531          state->pctx->set_sampler_views(state->pctx, sh, 0, state->num_sampler_views[sh],
532                                         0, false, state->sv[sh]);
533          state->sv_dirty[sh] = false;
534       }
535    }
536
537    for (sh = 0; sh < PIPE_SHADER_COMPUTE; sh++) {
538       if (state->ss_dirty[sh]) {
539          cso_set_samplers(state->cso, sh, state->num_sampler_states[sh], state->cso_ss_ptr[sh]);
540          state->ss_dirty[sh] = false;
541       }
542    }
543
544    if (state->vp_dirty) {
545       state->pctx->set_viewport_states(state->pctx, 0, state->num_viewports, state->viewports);
546       state->vp_dirty = false;
547    }
548
549    if (state->scissor_dirty) {
550       state->pctx->set_scissor_states(state->pctx, 0, state->num_scissors, state->scissors);
551       state->scissor_dirty = false;
552    }
553 }
554
555 static void handle_compute_pipeline(struct vk_cmd_queue_entry *cmd,
556                                     struct rendering_state *state)
557 {
558    LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
559
560    if ((pipeline->layout->push_constant_stages & VK_SHADER_STAGE_COMPUTE_BIT) > 0)
561       state->has_pcbuf[PIPE_SHADER_COMPUTE] = pipeline->layout->push_constant_size > 0;
562    state->uniform_blocks[PIPE_SHADER_COMPUTE].count = pipeline->layout->stage[MESA_SHADER_COMPUTE].uniform_block_count;
563    for (unsigned j = 0; j < pipeline->layout->stage[MESA_SHADER_COMPUTE].uniform_block_count; j++)
564       state->uniform_blocks[PIPE_SHADER_COMPUTE].size[j] = pipeline->layout->stage[MESA_SHADER_COMPUTE].uniform_block_sizes[j];
565    if (!state->has_pcbuf[PIPE_SHADER_COMPUTE] && !pipeline->layout->stage[MESA_SHADER_COMPUTE].uniform_block_count)
566       state->pcbuf_dirty[PIPE_SHADER_COMPUTE] = false;
567
568    state->iv_dirty[MESA_SHADER_COMPUTE] |= state->num_shader_images[MESA_SHADER_COMPUTE] &&
569                           (state->access[MESA_SHADER_COMPUTE].images_read != pipeline->access[MESA_SHADER_COMPUTE].images_read ||
570                            state->access[MESA_SHADER_COMPUTE].images_written != pipeline->access[MESA_SHADER_COMPUTE].images_written);
571    state->sb_dirty[MESA_SHADER_COMPUTE] |= state->num_shader_buffers[MESA_SHADER_COMPUTE] &&
572                                            state->access[MESA_SHADER_COMPUTE].buffers_written != pipeline->access[MESA_SHADER_COMPUTE].buffers_written;
573    memcpy(&state->access[MESA_SHADER_COMPUTE], &pipeline->access[MESA_SHADER_COMPUTE], sizeof(struct lvp_access_info));
574
575    state->dispatch_info.block[0] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.workgroup_size[0];
576    state->dispatch_info.block[1] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.workgroup_size[1];
577    state->dispatch_info.block[2] = pipeline->pipeline_nir[MESA_SHADER_COMPUTE]->info.workgroup_size[2];
578    state->inlines_dirty[PIPE_SHADER_COMPUTE] = pipeline->inlines[MESA_SHADER_COMPUTE].can_inline;
579    if (!pipeline->inlines[MESA_SHADER_COMPUTE].can_inline)
580       state->pctx->bind_compute_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_COMPUTE]);
581 }
582
583 static void
584 set_viewport_depth_xform(struct rendering_state *state, unsigned idx)
585 {
586    double n = state->depth[idx].min;
587    double f = state->depth[idx].max;
588
589    if (!state->rs_state.clip_halfz) {
590       state->viewports[idx].scale[2] = 0.5 * (f - n);
591       state->viewports[idx].translate[2] = 0.5 * (n + f);
592    } else {
593       state->viewports[idx].scale[2] = (f - n);
594       state->viewports[idx].translate[2] = n;
595    }
596 }
597
598 static void
599 get_viewport_xform(struct rendering_state *state,
600                    const VkViewport *viewport,
601                    unsigned idx)
602 {
603    float x = viewport->x;
604    float y = viewport->y;
605    float half_width = 0.5f * viewport->width;
606    float half_height = 0.5f * viewport->height;
607
608    state->viewports[idx].scale[0] = half_width;
609    state->viewports[idx].translate[0] = half_width + x;
610    state->viewports[idx].scale[1] = half_height;
611    state->viewports[idx].translate[1] = half_height + y;
612
613    memcpy(&state->depth[idx].min, &viewport->minDepth, sizeof(float) * 2);
614 }
615
616 static void
617 update_samples(struct rendering_state *state, VkSampleCountFlags samples)
618 {
619    state->rs_dirty |= state->rs_state.multisample != (samples > 1);
620    state->rs_state.multisample = samples > 1;
621    state->min_samples = 1;
622    if (state->sample_shading) {
623       state->min_samples = ceil(samples * state->min_sample_shading);
624       if (state->min_samples > 1)
625          state->min_samples = samples;
626       if (state->min_samples < 1)
627          state->min_samples = 1;
628    }
629    if (state->force_min_sample)
630       state->min_samples = samples;
631    state->min_samples_dirty = true;
632    if (samples != state->framebuffer.samples) {
633       state->framebuffer.samples = samples;
634       state->pctx->set_framebuffer_state(state->pctx, &state->framebuffer);
635    }
636 }
637
638 static void handle_graphics_pipeline(struct vk_cmd_queue_entry *cmd,
639                                      struct rendering_state *state)
640 {
641    LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
642    const struct vk_graphics_pipeline_state *ps = &pipeline->graphics_state;
643
644    for (enum pipe_shader_type sh = PIPE_SHADER_VERTEX; sh < PIPE_SHADER_COMPUTE; sh++) {
645       state->iv_dirty[sh] |= state->num_shader_images[sh] &&
646                              (state->access[sh].images_read != pipeline->access[sh].images_read ||
647                               state->access[sh].images_written != pipeline->access[sh].images_written);
648       state->sb_dirty[sh] |= state->num_shader_buffers[sh] && state->access[sh].buffers_written != pipeline->access[sh].buffers_written;
649    }
650    memcpy(state->access, pipeline->access, sizeof(struct lvp_access_info) * 5); //4 vertex stages + fragment
651
652    for (enum pipe_shader_type sh = PIPE_SHADER_VERTEX; sh < PIPE_SHADER_COMPUTE; sh++)
653       state->has_pcbuf[sh] = false;
654
655    for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
656       enum pipe_shader_type sh = pipe_shader_type_from_mesa(i);
657       state->uniform_blocks[sh].count = pipeline->layout->stage[i].uniform_block_count;
658       for (unsigned j = 0; j < pipeline->layout->stage[i].uniform_block_count; j++)
659          state->uniform_blocks[sh].size[j] = pipeline->layout->stage[i].uniform_block_sizes[j];
660    }
661    u_foreach_bit(stage, pipeline->layout->push_constant_stages) {
662       enum pipe_shader_type sh = pipe_shader_type_from_mesa(stage);
663       state->has_pcbuf[sh] = pipeline->layout->push_constant_size > 0;
664       if (!state->has_pcbuf[sh] && !state->uniform_blocks[sh].count)
665          state->pcbuf_dirty[sh] = false;
666    }
667
668    bool has_stage[PIPE_SHADER_TYPES] = { false };
669
670    state->pctx->bind_gs_state(state->pctx, NULL);
671    if (state->pctx->bind_tcs_state)
672       state->pctx->bind_tcs_state(state->pctx, NULL);
673    if (state->pctx->bind_tes_state)
674       state->pctx->bind_tes_state(state->pctx, NULL);
675    state->tess_states[0] = NULL;
676    state->tess_states[1] = NULL;
677    state->gs_output_lines = GS_OUTPUT_NONE;
678    {
679       u_foreach_bit(b, pipeline->graphics_state.shader_stages) {
680          VkShaderStageFlagBits vk_stage = (1 << b);
681          switch (vk_stage) {
682          case VK_SHADER_STAGE_FRAGMENT_BIT:
683             state->inlines_dirty[PIPE_SHADER_FRAGMENT] = pipeline->inlines[MESA_SHADER_FRAGMENT].can_inline;
684             if (!pipeline->inlines[MESA_SHADER_FRAGMENT].can_inline)
685                state->pctx->bind_fs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
686             has_stage[PIPE_SHADER_FRAGMENT] = true;
687             break;
688          case VK_SHADER_STAGE_VERTEX_BIT:
689             state->inlines_dirty[PIPE_SHADER_VERTEX] = pipeline->inlines[MESA_SHADER_VERTEX].can_inline;
690             if (!pipeline->inlines[MESA_SHADER_VERTEX].can_inline)
691                state->pctx->bind_vs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_VERTEX]);
692             has_stage[PIPE_SHADER_VERTEX] = true;
693             break;
694          case VK_SHADER_STAGE_GEOMETRY_BIT:
695             state->inlines_dirty[PIPE_SHADER_GEOMETRY] = pipeline->inlines[MESA_SHADER_GEOMETRY].can_inline;
696             if (!pipeline->inlines[MESA_SHADER_GEOMETRY].can_inline)
697                state->pctx->bind_gs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_GEOMETRY]);
698             state->gs_output_lines = pipeline->gs_output_lines ? GS_OUTPUT_LINES : GS_OUTPUT_NOT_LINES;
699             has_stage[PIPE_SHADER_GEOMETRY] = true;
700             break;
701          case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
702             state->inlines_dirty[PIPE_SHADER_TESS_CTRL] = pipeline->inlines[MESA_SHADER_TESS_CTRL].can_inline;
703             if (!pipeline->inlines[MESA_SHADER_TESS_CTRL].can_inline)
704                state->pctx->bind_tcs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_TESS_CTRL]);
705             has_stage[PIPE_SHADER_TESS_CTRL] = true;
706             break;
707          case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
708             state->inlines_dirty[PIPE_SHADER_TESS_EVAL] = pipeline->inlines[MESA_SHADER_TESS_EVAL].can_inline;
709             if (!pipeline->inlines[MESA_SHADER_TESS_EVAL].can_inline) {
710                if (BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_TS_DOMAIN_ORIGIN)) {
711                   state->tess_states[0] = pipeline->shader_cso[PIPE_SHADER_TESS_EVAL];
712                   state->tess_states[1] = pipeline->tess_ccw_cso;
713                   state->pctx->bind_tes_state(state->pctx, state->tess_states[state->tess_ccw]);
714                } else {
715                   state->pctx->bind_tes_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_TESS_EVAL]);
716                }
717             }
718             if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_TS_DOMAIN_ORIGIN))
719                state->tess_ccw = false;
720             has_stage[PIPE_SHADER_TESS_EVAL] = true;
721             break;
722          default:
723             assert(0);
724             break;
725          }
726       }
727    }
728
729    /* there should always be a dummy fs. */
730    if (!has_stage[PIPE_SHADER_FRAGMENT])
731       state->pctx->bind_fs_state(state->pctx, pipeline->shader_cso[PIPE_SHADER_FRAGMENT]);
732    if (state->pctx->bind_gs_state && !has_stage[PIPE_SHADER_GEOMETRY])
733       state->pctx->bind_gs_state(state->pctx, NULL);
734    if (state->pctx->bind_tcs_state && !has_stage[PIPE_SHADER_TESS_CTRL])
735       state->pctx->bind_tcs_state(state->pctx, NULL);
736    if (state->pctx->bind_tes_state && !has_stage[PIPE_SHADER_TESS_EVAL])
737       state->pctx->bind_tes_state(state->pctx, NULL);
738
739    /* rasterization state */
740    if (ps->rs) {
741       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_DEPTH_CLAMP_ENABLE))
742          state->rs_state.depth_clamp = ps->rs->depth_clamp_enable;
743       if (BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_DEPTH_CLIP_ENABLE)) {
744          state->depth_clamp_sets_clip = false;
745       } else {
746          state->rs_state.depth_clip_near = state->rs_state.depth_clip_far =
747             vk_rasterization_state_depth_clip_enable(ps->rs);
748          state->depth_clamp_sets_clip =
749             ps->rs->depth_clip_enable == VK_MESA_DEPTH_CLIP_ENABLE_NOT_CLAMP;
750       }
751
752       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_RASTERIZER_DISCARD_ENABLE))
753          state->rs_state.rasterizer_discard = ps->rs->rasterizer_discard_enable;
754
755       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_LINE_MODE)) {
756          state->rs_state.line_smooth = pipeline->line_smooth;
757          state->rs_state.line_rectangular = pipeline->line_rectangular;
758       }
759       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_LINE_STIPPLE_ENABLE))
760          state->rs_state.line_stipple_enable = ps->rs->line.stipple.enable;
761       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_POLYGON_MODE)) {
762          state->rs_state.fill_front = vk_polygon_mode_to_pipe(ps->rs->polygon_mode);
763          state->rs_state.fill_back = vk_polygon_mode_to_pipe(ps->rs->polygon_mode);
764       }
765       state->rs_state.point_size_per_vertex = true;
766       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_PROVOKING_VERTEX)) {
767          state->rs_state.flatshade_first =
768             ps->rs->provoking_vertex == VK_PROVOKING_VERTEX_MODE_FIRST_VERTEX_EXT;
769       }
770       state->rs_state.point_quad_rasterization = true;
771       state->rs_state.half_pixel_center = true;
772       state->rs_state.scissor = true;
773       state->rs_state.no_ms_sample_mask_out = true;
774
775       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_LINE_WIDTH))
776          state->rs_state.line_width = ps->rs->line.width;
777       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_LINE_STIPPLE)) {
778          state->rs_state.line_stipple_factor = ps->rs->line.stipple.factor - 1;
779          state->rs_state.line_stipple_pattern = ps->rs->line.stipple.pattern;
780       }
781
782       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_DEPTH_BIAS_ENABLE))
783          state->depth_bias.enabled = ps->rs->depth_bias.enable;
784       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_DEPTH_BIAS_FACTORS)) {
785          state->depth_bias.offset_units = ps->rs->depth_bias.constant;
786          state->depth_bias.offset_scale = ps->rs->depth_bias.slope;
787          state->depth_bias.offset_clamp = ps->rs->depth_bias.clamp;
788       }
789
790       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_CULL_MODE))
791          state->rs_state.cull_face = vk_cull_to_pipe(ps->rs->cull_mode);
792
793       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_FRONT_FACE))
794          state->rs_state.front_ccw = (ps->rs->front_face == VK_FRONT_FACE_COUNTER_CLOCKWISE);
795       state->rs_dirty = true;
796    }
797
798    if (ps->ds) {
799       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_DS_DEPTH_TEST_ENABLE))
800          state->dsa_state.depth_enabled = ps->ds->depth.test_enable;
801       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_DS_DEPTH_WRITE_ENABLE))
802          state->dsa_state.depth_writemask = ps->ds->depth.write_enable;
803       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_DS_DEPTH_COMPARE_OP))
804          state->dsa_state.depth_func = ps->ds->depth.compare_op;
805       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_DS_DEPTH_BOUNDS_TEST_ENABLE))
806          state->dsa_state.depth_bounds_test = ps->ds->depth.bounds_test.enable;
807
808       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_DS_DEPTH_BOUNDS_TEST_BOUNDS)) {
809          state->dsa_state.depth_bounds_min = ps->ds->depth.bounds_test.min;
810          state->dsa_state.depth_bounds_max = ps->ds->depth.bounds_test.max;
811       }
812
813       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_DS_STENCIL_TEST_ENABLE)) {
814          state->dsa_state.stencil[0].enabled = ps->ds->stencil.test_enable;
815          state->dsa_state.stencil[1].enabled = ps->ds->stencil.test_enable;
816       }
817
818       const struct vk_stencil_test_face_state *front = &ps->ds->stencil.front;
819       const struct vk_stencil_test_face_state *back = &ps->ds->stencil.back;
820
821       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_DS_STENCIL_OP)) {
822          state->dsa_state.stencil[0].func = front->op.compare;
823          state->dsa_state.stencil[0].fail_op = vk_conv_stencil_op(front->op.fail);
824          state->dsa_state.stencil[0].zpass_op = vk_conv_stencil_op(front->op.pass);
825          state->dsa_state.stencil[0].zfail_op = vk_conv_stencil_op(front->op.depth_fail);
826
827          state->dsa_state.stencil[1].func = back->op.compare;
828          state->dsa_state.stencil[1].fail_op = vk_conv_stencil_op(back->op.fail);
829          state->dsa_state.stencil[1].zpass_op = vk_conv_stencil_op(back->op.pass);
830          state->dsa_state.stencil[1].zfail_op = vk_conv_stencil_op(back->op.depth_fail);
831       }
832
833       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_DS_STENCIL_COMPARE_MASK)) {
834          state->dsa_state.stencil[0].valuemask = front->compare_mask;
835          state->dsa_state.stencil[1].valuemask = back->compare_mask;
836       }
837
838       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_DS_STENCIL_WRITE_MASK)) {
839          state->dsa_state.stencil[0].writemask = front->write_mask;
840          state->dsa_state.stencil[1].writemask = back->write_mask;
841       }
842
843       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_DS_STENCIL_REFERENCE)) {
844          state->stencil_ref.ref_value[0] = front->reference;
845          state->stencil_ref.ref_value[1] = back->reference;
846          state->stencil_ref_dirty = true;
847       }
848       state->dsa_dirty = true;
849    }
850
851    state->blend_state.independent_blend_enable = ps->rp->color_attachment_count > 1;
852
853    if (ps->cb) {
854       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_CB_LOGIC_OP_ENABLE))
855          state->blend_state.logicop_enable = ps->cb->logic_op_enable;
856       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_CB_LOGIC_OP))
857          state->blend_state.logicop_func = vk_conv_logic_op(ps->cb->logic_op);
858
859       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_CB_COLOR_WRITE_ENABLES))
860          state->color_write_disables = ~ps->cb->color_write_enables;
861
862       for (unsigned i = 0; i < ps->cb->attachment_count; i++) {
863          const struct vk_color_blend_attachment_state *att = &ps->cb->attachments[i];
864          if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_CB_WRITE_MASKS))
865             state->blend_state.rt[i].colormask = att->write_mask;
866          if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_CB_BLEND_ENABLES))
867             state->blend_state.rt[i].blend_enable = att->blend_enable;
868
869          if (!att->blend_enable) {
870             state->blend_state.rt[i].rgb_func = 0;
871             state->blend_state.rt[i].rgb_src_factor = 0;
872             state->blend_state.rt[i].rgb_dst_factor = 0;
873             state->blend_state.rt[i].alpha_func = 0;
874             state->blend_state.rt[i].alpha_src_factor = 0;
875             state->blend_state.rt[i].alpha_dst_factor = 0;
876          } else if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_CB_BLEND_EQUATIONS)) {
877             state->blend_state.rt[i].rgb_func = vk_conv_blend_func(att->color_blend_op);
878             state->blend_state.rt[i].rgb_src_factor = vk_conv_blend_factor(att->src_color_blend_factor);
879             state->blend_state.rt[i].rgb_dst_factor = vk_conv_blend_factor(att->dst_color_blend_factor);
880             state->blend_state.rt[i].alpha_func = vk_conv_blend_func(att->alpha_blend_op);
881             state->blend_state.rt[i].alpha_src_factor = vk_conv_blend_factor(att->src_alpha_blend_factor);
882             state->blend_state.rt[i].alpha_dst_factor = vk_conv_blend_factor(att->dst_alpha_blend_factor);
883          }
884
885          /* At least llvmpipe applies the blend factor prior to the blend function,
886           * regardless of what function is used. (like i965 hardware).
887           * It means for MIN/MAX the blend factor has to be stomped to ONE.
888           */
889          if (att->color_blend_op == VK_BLEND_OP_MIN ||
890              att->color_blend_op == VK_BLEND_OP_MAX) {
891             state->blend_state.rt[i].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
892             state->blend_state.rt[i].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
893          }
894
895          if (att->alpha_blend_op == VK_BLEND_OP_MIN ||
896              att->alpha_blend_op == VK_BLEND_OP_MAX) {
897             state->blend_state.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
898             state->blend_state.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
899          }
900       }
901       state->blend_dirty = true;
902       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_CB_BLEND_CONSTANTS)) {
903          memcpy(state->blend_color.color, ps->cb->blend_constants, 4 * sizeof(float));
904          state->blend_color_dirty = true;
905       }
906    } else if (ps->rp->color_attachment_count == 0) {
907       memset(&state->blend_state, 0, sizeof(state->blend_state));
908       state->blend_dirty = true;
909    }
910
911    if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_RS_LINE_MODE))
912       state->disable_multisample = pipeline->disable_multisample;
913    if (ps->ms) {
914       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_MS_SAMPLE_MASK)) {
915          state->sample_mask = ps->ms->sample_mask;
916          state->sample_mask_dirty = true;
917       }
918       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_MS_ALPHA_TO_COVERAGE_ENABLE))
919          state->blend_state.alpha_to_coverage = ps->ms->alpha_to_coverage_enable;
920       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_MS_ALPHA_TO_ONE_ENABLE))
921          state->blend_state.alpha_to_one = ps->ms->alpha_to_one_enable;
922       state->force_min_sample = pipeline->force_min_sample;
923       state->sample_shading = ps->ms->sample_shading_enable;
924       state->min_sample_shading = ps->ms->min_sample_shading;
925       state->blend_dirty = true;
926       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_MS_RASTERIZATION_SAMPLES))
927          update_samples(state, ps->ms->rasterization_samples);
928    } else {
929       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_MS_SAMPLE_MASK) &&
930           !BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_MS_ALPHA_TO_ONE_ENABLE))
931          state->rs_state.multisample = false;
932       state->sample_shading = false;
933       state->force_min_sample = false;
934       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_MS_SAMPLE_MASK)) {
935          state->sample_mask_dirty = state->sample_mask != 0xffffffff;
936          state->sample_mask = 0xffffffff;
937          state->min_samples_dirty = state->min_samples;
938          state->min_samples = 0;
939       }
940       state->blend_dirty |= state->blend_state.alpha_to_coverage || state->blend_state.alpha_to_one;
941       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_MS_ALPHA_TO_COVERAGE_ENABLE))
942          state->blend_state.alpha_to_coverage = false;
943       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_MS_ALPHA_TO_ONE_ENABLE))
944          state->blend_state.alpha_to_one = false;
945       state->rs_dirty = true;
946    }
947
948    if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_VI_BINDING_STRIDES)) {
949       u_foreach_bit(b, ps->vi->bindings_valid)
950          state->vb[b].stride = ps->vi->bindings[b].stride;
951       state->vb_dirty = true;
952    }
953
954    if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_VI)) {
955       u_foreach_bit(a, ps->vi->attributes_valid) {
956          uint32_t b = ps->vi->attributes[a].binding;
957          state->velem.velems[a].src_offset = ps->vi->attributes[a].offset;
958          state->velem.velems[a].vertex_buffer_index = b;
959          state->velem.velems[a].src_format =
960             lvp_vk_format_to_pipe_format(ps->vi->attributes[a].format);
961          state->velem.velems[a].dual_slot = false;
962
963          uint32_t d = ps->vi->bindings[b].divisor;
964          switch (ps->vi->bindings[b].input_rate) {
965          case VK_VERTEX_INPUT_RATE_VERTEX:
966             state->velem.velems[a].instance_divisor = 0;
967             break;
968          case VK_VERTEX_INPUT_RATE_INSTANCE:
969             state->velem.velems[a].instance_divisor = d ? d : UINT32_MAX;
970             break;
971          default:
972             unreachable("Invalid vertex input rate");
973          }
974       }
975
976       state->velem.count = util_last_bit(ps->vi->attributes_valid);
977       state->vb_dirty = true;
978       state->ve_dirty = true;
979    }
980
981    if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_IA_PRIMITIVE_TOPOLOGY)) {
982       state->info.mode = vk_conv_topology(ps->ia->primitive_topology);
983       state->rs_dirty = true;
984    }
985    if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_IA_PRIMITIVE_RESTART_ENABLE))
986       state->info.primitive_restart = ps->ia->primitive_restart_enable;
987
988    if (ps->ts && !BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_TS_PATCH_CONTROL_POINTS))
989       state->patch_vertices = ps->ts->patch_control_points;
990
991    if (ps->vp) {
992       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_VP_VIEWPORT_COUNT)) {
993          state->num_viewports = ps->vp->viewport_count;
994          state->vp_dirty = true;
995       }
996       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_VP_SCISSOR_COUNT)) {
997          state->num_scissors = ps->vp->scissor_count;
998          state->scissor_dirty = true;
999       }
1000
1001       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_VP_VIEWPORTS)) {
1002          for (uint32_t i = 0; i < ps->vp->viewport_count; i++) {
1003             get_viewport_xform(state, &ps->vp->viewports[i], i);
1004             set_viewport_depth_xform(state, i);
1005          }
1006          state->vp_dirty = true;
1007       }
1008       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_VP_SCISSORS)) {
1009          for (uint32_t i = 0; i < ps->vp->scissor_count; i++) {
1010             const VkRect2D *ss = &ps->vp->scissors[i];
1011             state->scissors[i].minx = ss->offset.x;
1012             state->scissors[i].miny = ss->offset.y;
1013             state->scissors[i].maxx = ss->offset.x + ss->extent.width;
1014             state->scissors[i].maxy = ss->offset.y + ss->extent.height;
1015          }
1016          state->scissor_dirty = true;
1017       }
1018
1019       if (!BITSET_TEST(ps->dynamic, MESA_VK_DYNAMIC_VP_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE) &&
1020           state->rs_state.clip_halfz != !ps->vp->depth_clip_negative_one_to_one) {
1021          state->rs_state.clip_halfz = !ps->vp->depth_clip_negative_one_to_one;
1022          state->rs_dirty = true;
1023          for (uint32_t i = 0; i < state->num_viewports; i++)
1024             set_viewport_depth_xform(state, i);
1025          state->vp_dirty = true;
1026       }
1027    }
1028 }
1029
1030 static void
1031 handle_pipeline_access(struct rendering_state *state, gl_shader_stage stage)
1032 {
1033    enum pipe_shader_type pstage = pipe_shader_type_from_mesa(stage);
1034    for (unsigned i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
1035       state->iv[pstage][i].access = 0;
1036       state->iv[pstage][i].shader_access = 0;
1037    }
1038    u_foreach_bit64(idx, state->access[stage].images_read) {
1039       state->iv[pstage][idx].access |= PIPE_IMAGE_ACCESS_READ;
1040       state->iv[pstage][idx].shader_access |= PIPE_IMAGE_ACCESS_READ;
1041    }
1042    u_foreach_bit64(idx, state->access[stage].images_written) {
1043       state->iv[pstage][idx].access |= PIPE_IMAGE_ACCESS_WRITE;
1044       state->iv[pstage][idx].shader_access |= PIPE_IMAGE_ACCESS_WRITE;
1045    }
1046 }
1047
1048 static void handle_pipeline(struct vk_cmd_queue_entry *cmd,
1049                             struct rendering_state *state)
1050 {
1051    LVP_FROM_HANDLE(lvp_pipeline, pipeline, cmd->u.bind_pipeline.pipeline);
1052    if (pipeline->is_compute_pipeline) {
1053       handle_compute_pipeline(cmd, state);
1054       handle_pipeline_access(state, MESA_SHADER_COMPUTE);
1055    } else {
1056       handle_graphics_pipeline(cmd, state);
1057       for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++)
1058          handle_pipeline_access(state, i);
1059    }
1060    state->push_size[pipeline->is_compute_pipeline] = pipeline->layout->push_constant_size;
1061    state->pipeline[pipeline->is_compute_pipeline] = pipeline;
1062 }
1063
1064 static void handle_vertex_buffers2(struct vk_cmd_queue_entry *cmd,
1065                                    struct rendering_state *state)
1066 {
1067    struct vk_cmd_bind_vertex_buffers2 *vcb = &cmd->u.bind_vertex_buffers2;
1068
1069    int i;
1070    for (i = 0; i < vcb->binding_count; i++) {
1071       int idx = i + vcb->first_binding;
1072
1073       state->vb[idx].buffer_offset = vcb->offsets[i];
1074       state->vb[idx].buffer.resource =
1075          vcb->buffers[i] ? lvp_buffer_from_handle(vcb->buffers[i])->bo : NULL;
1076
1077       if (vcb->strides)
1078          state->vb[idx].stride = vcb->strides[i];
1079    }
1080    if (vcb->first_binding < state->start_vb)
1081       state->start_vb = vcb->first_binding;
1082    if (vcb->first_binding + vcb->binding_count >= state->num_vb)
1083       state->num_vb = vcb->first_binding + vcb->binding_count;
1084    state->vb_dirty = true;
1085 }
1086
1087 struct dyn_info {
1088    struct {
1089       uint16_t const_buffer_count;
1090       uint16_t shader_buffer_count;
1091       uint16_t sampler_count;
1092       uint16_t sampler_view_count;
1093       uint16_t image_count;
1094       uint16_t uniform_block_count;
1095    } stage[MESA_SHADER_STAGES];
1096
1097    uint32_t dyn_index;
1098    const uint32_t *dynamic_offsets;
1099    uint32_t dynamic_offset_count;
1100 };
1101
1102 static void fill_sampler_stage(struct rendering_state *state,
1103                                struct dyn_info *dyn_info,
1104                                gl_shader_stage stage,
1105                                enum pipe_shader_type p_stage,
1106                                int array_idx,
1107                                const union lvp_descriptor_info *descriptor,
1108                                const struct lvp_descriptor_set_binding_layout *binding)
1109 {
1110    int ss_idx = binding->stage[stage].sampler_index;
1111    if (ss_idx == -1)
1112       return;
1113    ss_idx += array_idx;
1114    ss_idx += dyn_info->stage[stage].sampler_count;
1115    struct pipe_sampler_state *ss = binding->immutable_samplers ? binding->immutable_samplers[array_idx] : descriptor->sampler;
1116    state->ss[p_stage][ss_idx] = *ss;
1117    if (state->num_sampler_states[p_stage] <= ss_idx)
1118       state->num_sampler_states[p_stage] = ss_idx + 1;
1119    state->ss_dirty[p_stage] = true;
1120 }
1121
1122 static void fill_sampler_view_stage(struct rendering_state *state,
1123                                     struct dyn_info *dyn_info,
1124                                     gl_shader_stage stage,
1125                                     enum pipe_shader_type p_stage,
1126                                     int array_idx,
1127                                     const union lvp_descriptor_info *descriptor,
1128                                     const struct lvp_descriptor_set_binding_layout *binding)
1129 {
1130    int sv_idx = binding->stage[stage].sampler_view_index;
1131    if (sv_idx == -1)
1132       return;
1133    sv_idx += array_idx;
1134    sv_idx += dyn_info->stage[stage].sampler_view_count;
1135
1136    assert(sv_idx < ARRAY_SIZE(state->sv[p_stage]));
1137    state->sv[p_stage][sv_idx] = descriptor->sampler_view;
1138
1139    if (state->num_sampler_views[p_stage] <= sv_idx)
1140       state->num_sampler_views[p_stage] = sv_idx + 1;
1141    state->sv_dirty[p_stage] = true;
1142 }
1143
1144 static void fill_image_view_stage(struct rendering_state *state,
1145                                   struct dyn_info *dyn_info,
1146                                   gl_shader_stage stage,
1147                                   enum pipe_shader_type p_stage,
1148                                   int array_idx,
1149                                   const union lvp_descriptor_info *descriptor,
1150                                   const struct lvp_descriptor_set_binding_layout *binding)
1151 {
1152    int idx = binding->stage[stage].image_index;
1153    if (idx == -1)
1154       return;
1155    idx += array_idx;
1156    idx += dyn_info->stage[stage].image_count;
1157    uint16_t access = state->iv[p_stage][idx].access;
1158    uint16_t shader_access = state->iv[p_stage][idx].shader_access;
1159    state->iv[p_stage][idx] = descriptor->image_view;
1160    state->iv[p_stage][idx].access = access;
1161    state->iv[p_stage][idx].shader_access = shader_access;
1162
1163    if (state->num_shader_images[p_stage] <= idx)
1164       state->num_shader_images[p_stage] = idx + 1;
1165
1166    state->iv_dirty[p_stage] = true;
1167 }
1168
1169 static void handle_descriptor(struct rendering_state *state,
1170                               struct dyn_info *dyn_info,
1171                               const struct lvp_descriptor_set_binding_layout *binding,
1172                               gl_shader_stage stage,
1173                               enum pipe_shader_type p_stage,
1174                               int array_idx,
1175                               VkDescriptorType type,
1176                               const union lvp_descriptor_info *descriptor)
1177 {
1178    bool is_dynamic = type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
1179       type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
1180
1181    switch (type) {
1182    case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK: {
1183       int idx = binding->stage[stage].uniform_block_index;
1184       if (idx == -1)
1185          return;
1186       idx += dyn_info->stage[stage].uniform_block_count;
1187       assert(descriptor->uniform);
1188       state->uniform_blocks[p_stage].block[idx] = descriptor->uniform;
1189       state->pcbuf_dirty[p_stage] = true;
1190       state->inlines_dirty[p_stage] = true;
1191       break;
1192    }
1193    case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1194    case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1195    case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
1196       fill_image_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1197       break;
1198    }
1199    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1200    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
1201       int idx = binding->stage[stage].const_buffer_index;
1202       if (idx == -1)
1203          return;
1204       idx += array_idx;
1205       idx += dyn_info->stage[stage].const_buffer_count;
1206       state->const_buffer[p_stage][idx] = descriptor->ubo;
1207       if (is_dynamic) {
1208          uint32_t offset = dyn_info->dynamic_offsets[dyn_info->dyn_index + binding->dynamic_index + array_idx];
1209          state->const_buffer[p_stage][idx].buffer_offset += offset;
1210       }
1211       if (state->num_const_bufs[p_stage] <= idx)
1212          state->num_const_bufs[p_stage] = idx + 1;
1213       state->constbuf_dirty[p_stage] = true;
1214       state->inlines_dirty[p_stage] = true;
1215       break;
1216    }
1217    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1218    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
1219       int idx = binding->stage[stage].shader_buffer_index;
1220       if (idx == -1)
1221          return;
1222       idx += array_idx;
1223       idx += dyn_info->stage[stage].shader_buffer_count;
1224       state->sb[p_stage][idx] = descriptor->ssbo;
1225       if (is_dynamic) {
1226          uint32_t offset = dyn_info->dynamic_offsets[dyn_info->dyn_index + binding->dynamic_index + array_idx];
1227          state->sb[p_stage][idx].buffer_offset += offset;
1228       }
1229       if (state->num_shader_buffers[p_stage] <= idx)
1230          state->num_shader_buffers[p_stage] = idx + 1;
1231       state->sb_dirty[p_stage] = true;
1232       break;
1233    }
1234    case VK_DESCRIPTOR_TYPE_SAMPLER:
1235       fill_sampler_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1236       break;
1237    case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1238    case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1239       fill_sampler_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1240       break;
1241    case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1242       fill_sampler_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1243       fill_sampler_view_stage(state, dyn_info, stage, p_stage, array_idx, descriptor, binding);
1244       break;
1245    default:
1246       fprintf(stderr, "Unhandled descriptor set %d\n", type);
1247       unreachable("oops");
1248       break;
1249    }
1250 }
1251
1252 static void handle_set_stage(struct rendering_state *state,
1253                              struct dyn_info *dyn_info,
1254                              const struct lvp_descriptor_set *set,
1255                              gl_shader_stage stage,
1256                              enum pipe_shader_type p_stage)
1257 {
1258    int j;
1259    for (j = 0; j < set->layout->binding_count; j++) {
1260       const struct lvp_descriptor_set_binding_layout *binding;
1261       const struct lvp_descriptor *descriptor;
1262       binding = &set->layout->binding[j];
1263
1264       if (binding->valid) {
1265          unsigned array_size = binding->type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK ? 1 : binding->array_size;
1266          for (int i = 0; i < array_size; i++) {
1267             descriptor = &set->descriptors[binding->descriptor_index + i];
1268             handle_descriptor(state, dyn_info, binding, stage, p_stage, i, descriptor->type, &descriptor->info);
1269          }
1270       }
1271    }
1272 }
1273
1274 static void increment_dyn_info(struct dyn_info *dyn_info,
1275                                const struct vk_descriptor_set_layout *vk_layout,
1276                                bool inc_dyn)
1277 {
1278    const struct lvp_descriptor_set_layout *layout =
1279       vk_to_lvp_descriptor_set_layout(vk_layout);
1280
1281    for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) {
1282       dyn_info->stage[stage].const_buffer_count += layout->stage[stage].const_buffer_count;
1283       dyn_info->stage[stage].shader_buffer_count += layout->stage[stage].shader_buffer_count;
1284       dyn_info->stage[stage].sampler_count += layout->stage[stage].sampler_count;
1285       dyn_info->stage[stage].sampler_view_count += layout->stage[stage].sampler_view_count;
1286       dyn_info->stage[stage].image_count += layout->stage[stage].image_count;
1287       dyn_info->stage[stage].uniform_block_count += layout->stage[stage].uniform_block_count;
1288    }
1289    if (inc_dyn)
1290       dyn_info->dyn_index += layout->dynamic_offset_count;
1291 }
1292
1293 static void handle_compute_descriptor_sets(struct vk_cmd_queue_entry *cmd,
1294                                            struct dyn_info *dyn_info,
1295                                            struct rendering_state *state)
1296 {
1297    struct vk_cmd_bind_descriptor_sets *bds = &cmd->u.bind_descriptor_sets;
1298    LVP_FROM_HANDLE(lvp_pipeline_layout, layout, bds->layout);
1299    int i;
1300
1301    for (i = 0; i < bds->first_set; i++) {
1302       increment_dyn_info(dyn_info, layout->vk.set_layouts[i], false);
1303    }
1304    for (i = 0; i < bds->descriptor_set_count; i++) {
1305       const struct lvp_descriptor_set *set = lvp_descriptor_set_from_handle(bds->descriptor_sets[i]);
1306
1307       if (set->layout->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT)
1308          handle_set_stage(state, dyn_info, set, MESA_SHADER_COMPUTE, PIPE_SHADER_COMPUTE);
1309       increment_dyn_info(dyn_info, layout->vk.set_layouts[bds->first_set + i], true);
1310    }
1311 }
1312
1313 static void handle_descriptor_sets(struct vk_cmd_queue_entry *cmd,
1314                                    struct rendering_state *state)
1315 {
1316    struct vk_cmd_bind_descriptor_sets *bds = &cmd->u.bind_descriptor_sets;
1317    LVP_FROM_HANDLE(lvp_pipeline_layout, layout, bds->layout);
1318    int i;
1319    struct dyn_info dyn_info;
1320
1321    dyn_info.dyn_index = 0;
1322    dyn_info.dynamic_offsets = bds->dynamic_offsets;
1323    dyn_info.dynamic_offset_count = bds->dynamic_offset_count;
1324
1325    memset(dyn_info.stage, 0, sizeof(dyn_info.stage));
1326    if (bds->pipeline_bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
1327       handle_compute_descriptor_sets(cmd, &dyn_info, state);
1328       return;
1329    }
1330
1331    for (i = 0; i < bds->first_set; i++) {
1332       increment_dyn_info(&dyn_info, layout->vk.set_layouts[i], false);
1333    }
1334
1335    for (i = 0; i < bds->descriptor_set_count; i++) {
1336       if (!layout->vk.set_layouts[bds->first_set + i])
1337          continue;
1338
1339       const struct lvp_descriptor_set *set = lvp_descriptor_set_from_handle(bds->descriptor_sets[i]);
1340       if (!set)
1341          continue;
1342       /* verify that there's enough total offsets */
1343       assert(set->layout->dynamic_offset_count <= dyn_info.dynamic_offset_count);
1344       /* verify there's either no offsets... */
1345       assert(!dyn_info.dynamic_offset_count ||
1346              /* or that the total number of offsets required is <= the number remaining */
1347              set->layout->dynamic_offset_count <= dyn_info.dynamic_offset_count - dyn_info.dyn_index);
1348
1349       if (set->layout->shader_stages & VK_SHADER_STAGE_VERTEX_BIT)
1350          handle_set_stage(state, &dyn_info, set, MESA_SHADER_VERTEX, PIPE_SHADER_VERTEX);
1351
1352       if (set->layout->shader_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
1353          handle_set_stage(state, &dyn_info, set, MESA_SHADER_GEOMETRY, PIPE_SHADER_GEOMETRY);
1354
1355       if (set->layout->shader_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
1356          handle_set_stage(state, &dyn_info, set, MESA_SHADER_TESS_CTRL, PIPE_SHADER_TESS_CTRL);
1357
1358       if (set->layout->shader_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1359          handle_set_stage(state, &dyn_info, set, MESA_SHADER_TESS_EVAL, PIPE_SHADER_TESS_EVAL);
1360
1361       if (set->layout->shader_stages & VK_SHADER_STAGE_FRAGMENT_BIT)
1362          handle_set_stage(state, &dyn_info, set, MESA_SHADER_FRAGMENT, PIPE_SHADER_FRAGMENT);
1363
1364       increment_dyn_info(&dyn_info, layout->vk.set_layouts[bds->first_set + i], true);
1365    }
1366 }
1367
1368 static struct pipe_surface *create_img_surface_bo(struct rendering_state *state,
1369                                                   VkImageSubresourceRange *range,
1370                                                   struct pipe_resource *bo,
1371                                                   enum pipe_format pformat,
1372                                                   int width,
1373                                                   int height,
1374                                                   int base_layer, int layer_count,
1375                                                   int level)
1376 {
1377    struct pipe_surface template;
1378
1379    memset(&template, 0, sizeof(struct pipe_surface));
1380
1381    template.format = pformat;
1382    template.width = width;
1383    template.height = height;
1384    template.u.tex.first_layer = range->baseArrayLayer + base_layer;
1385    template.u.tex.last_layer = range->baseArrayLayer + base_layer + layer_count - 1;
1386    template.u.tex.level = range->baseMipLevel + level;
1387
1388    if (template.format == PIPE_FORMAT_NONE)
1389       return NULL;
1390    return state->pctx->create_surface(state->pctx,
1391                                       bo, &template);
1392
1393 }
1394 static struct pipe_surface *create_img_surface(struct rendering_state *state,
1395                                                struct lvp_image_view *imgv,
1396                                                VkFormat format, int width,
1397                                                int height,
1398                                                int base_layer, int layer_count)
1399 {
1400    VkImageSubresourceRange imgv_subres =
1401       vk_image_view_subresource_range(&imgv->vk);
1402
1403    return create_img_surface_bo(state, &imgv_subres, imgv->image->bo,
1404                                 lvp_vk_format_to_pipe_format(format),
1405                                 width, height, base_layer, layer_count, 0);
1406 }
1407
1408 static void add_img_view_surface(struct rendering_state *state,
1409                                  struct lvp_image_view *imgv, int width, int height,
1410                                  int layer_count)
1411 {
1412    if (imgv->surface) {
1413       if (imgv->surface->width != width ||
1414           imgv->surface->height != height ||
1415           (imgv->surface->u.tex.last_layer - imgv->surface->u.tex.first_layer) != (layer_count - 1))
1416          pipe_surface_reference(&imgv->surface, NULL);
1417    }
1418
1419    if (!imgv->surface) {
1420       imgv->surface = create_img_surface(state, imgv, imgv->vk.format,
1421                                          width, height,
1422                                          0, layer_count);
1423    }
1424 }
1425
1426 static bool
1427 render_needs_clear(struct rendering_state *state)
1428 {
1429    for (uint32_t i = 0; i < state->color_att_count; i++) {
1430       if (state->color_att[i].load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1431          return true;
1432    }
1433    if (state->depth_att.load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1434       return true;
1435    if (state->stencil_att.load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1436       return true;
1437    return false;
1438 }
1439
1440 static void clear_attachment_layers(struct rendering_state *state,
1441                                     struct lvp_image_view *imgv,
1442                                     const VkRect2D *rect,
1443                                     unsigned base_layer, unsigned layer_count,
1444                                     unsigned ds_clear_flags, double dclear_val,
1445                                     uint32_t sclear_val,
1446                                     union pipe_color_union *col_val)
1447 {
1448    struct pipe_surface *clear_surf = create_img_surface(state,
1449                                                         imgv,
1450                                                         imgv->vk.format,
1451                                                         state->framebuffer.width,
1452                                                         state->framebuffer.height,
1453                                                         base_layer,
1454                                                         layer_count);
1455
1456    if (ds_clear_flags) {
1457       state->pctx->clear_depth_stencil(state->pctx,
1458                                        clear_surf,
1459                                        ds_clear_flags,
1460                                        dclear_val, sclear_val,
1461                                        rect->offset.x, rect->offset.y,
1462                                        rect->extent.width, rect->extent.height,
1463                                        true);
1464    } else {
1465       state->pctx->clear_render_target(state->pctx, clear_surf,
1466                                        col_val,
1467                                        rect->offset.x, rect->offset.y,
1468                                        rect->extent.width, rect->extent.height,
1469                                        true);
1470    }
1471    state->pctx->surface_destroy(state->pctx, clear_surf);
1472 }
1473
1474 static void render_clear(struct rendering_state *state)
1475 {
1476    for (uint32_t i = 0; i < state->color_att_count; i++) {
1477       if (state->color_att[i].load_op != VK_ATTACHMENT_LOAD_OP_CLEAR)
1478          continue;
1479
1480       union pipe_color_union color_clear_val = { 0 };
1481       const VkClearValue value = state->color_att[i].clear_value;
1482       color_clear_val.ui[0] = value.color.uint32[0];
1483       color_clear_val.ui[1] = value.color.uint32[1];
1484       color_clear_val.ui[2] = value.color.uint32[2];
1485       color_clear_val.ui[3] = value.color.uint32[3];
1486
1487       struct lvp_image_view *imgv = state->color_att[i].imgv;
1488       assert(imgv->surface);
1489
1490       if (state->info.view_mask) {
1491          u_foreach_bit(i, state->info.view_mask)
1492             clear_attachment_layers(state, imgv, &state->render_area,
1493                                     i, 1, 0, 0, 0, &color_clear_val);
1494       } else {
1495          state->pctx->clear_render_target(state->pctx,
1496                                           imgv->surface,
1497                                           &color_clear_val,
1498                                           state->render_area.offset.x,
1499                                           state->render_area.offset.y,
1500                                           state->render_area.extent.width,
1501                                           state->render_area.extent.height,
1502                                           false);
1503       }
1504    }
1505
1506    uint32_t ds_clear_flags = 0;
1507    double dclear_val = 0;
1508    if (state->depth_att.load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1509       ds_clear_flags |= PIPE_CLEAR_DEPTH;
1510       dclear_val = state->depth_att.clear_value.depthStencil.depth;
1511    }
1512
1513    uint32_t sclear_val = 0;
1514    if (state->stencil_att.load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1515       ds_clear_flags |= PIPE_CLEAR_STENCIL;
1516       sclear_val = state->stencil_att.clear_value.depthStencil.stencil;
1517    }
1518
1519    if (ds_clear_flags) {
1520       if (state->info.view_mask) {
1521          u_foreach_bit(i, state->info.view_mask)
1522             clear_attachment_layers(state, state->ds_imgv, &state->render_area,
1523                                     i, 1, ds_clear_flags, dclear_val, sclear_val, NULL);
1524       } else {
1525          state->pctx->clear_depth_stencil(state->pctx,
1526                                           state->ds_imgv->surface,
1527                                           ds_clear_flags,
1528                                           dclear_val, sclear_val,
1529                                           state->render_area.offset.x,
1530                                           state->render_area.offset.y,
1531                                           state->render_area.extent.width,
1532                                           state->render_area.extent.height,
1533                                           false);
1534       }
1535    }
1536 }
1537
1538 static void render_clear_fast(struct rendering_state *state)
1539 {
1540    /*
1541     * the state tracker clear interface only works if all the attachments have the same
1542     * clear color.
1543     */
1544    /* llvmpipe doesn't support scissored clears yet */
1545    if (state->render_area.offset.x || state->render_area.offset.y)
1546       goto slow_clear;
1547
1548    if (state->render_area.extent.width != state->framebuffer.width ||
1549        state->render_area.extent.height != state->framebuffer.height)
1550       goto slow_clear;
1551
1552    if (state->info.view_mask)
1553       goto slow_clear;
1554
1555    if (state->render_cond)
1556       goto slow_clear;
1557
1558    uint32_t buffers = 0;
1559    bool has_color_value = false;
1560    VkClearValue color_value = {0};
1561    for (uint32_t i = 0; i < state->color_att_count; i++) {
1562       if (state->color_att[i].load_op != VK_ATTACHMENT_LOAD_OP_CLEAR)
1563          continue;
1564
1565       buffers |= (PIPE_CLEAR_COLOR0 << i);
1566
1567       if (has_color_value) {
1568          if (memcmp(&color_value, &state->color_att[i].clear_value, sizeof(VkClearValue)))
1569             goto slow_clear;
1570       } else {
1571          memcpy(&color_value, &state->color_att[i].clear_value, sizeof(VkClearValue));
1572          has_color_value = true;
1573       }
1574    }
1575
1576    double dclear_val = 0;
1577    if (state->depth_att.load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1578       buffers |= PIPE_CLEAR_DEPTH;
1579       dclear_val = state->depth_att.clear_value.depthStencil.depth;
1580    }
1581
1582    uint32_t sclear_val = 0;
1583    if (state->stencil_att.load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1584       buffers |= PIPE_CLEAR_STENCIL;
1585       sclear_val = state->stencil_att.clear_value.depthStencil.stencil;
1586    }
1587
1588    union pipe_color_union col_val;
1589    for (unsigned i = 0; i < 4; i++)
1590       col_val.ui[i] = color_value.color.uint32[i];
1591
1592    state->pctx->clear(state->pctx, buffers,
1593                       NULL, &col_val,
1594                       dclear_val, sclear_val);
1595    return;
1596
1597 slow_clear:
1598    render_clear(state);
1599 }
1600
1601 static struct lvp_image_view *
1602 destroy_multisample_surface(struct rendering_state *state, struct lvp_image_view *imgv)
1603 {
1604    assert(imgv->image->vk.samples > 1);
1605    struct lvp_image_view *base = imgv->multisample;
1606    base->multisample = NULL;
1607    free((void*)imgv->image);
1608    pipe_surface_reference(&imgv->surface, NULL);
1609    free(imgv);
1610    return base;
1611 }
1612
1613 static void
1614 resolve_ds(struct rendering_state *state, bool multi)
1615 {
1616    VkResolveModeFlagBits depth_resolve_mode = multi ? state->forced_depth_resolve_mode : state->depth_att.resolve_mode;
1617    VkResolveModeFlagBits stencil_resolve_mode = multi ? state->forced_stencil_resolve_mode : state->stencil_att.resolve_mode;
1618    if (!depth_resolve_mode && !stencil_resolve_mode)
1619       return;
1620
1621    struct lvp_image_view *src_imgv = state->ds_imgv;
1622    if (multi && !src_imgv->multisample)
1623       return;
1624    if (!multi && src_imgv->image->vk.samples == 1)
1625       return;
1626
1627    assert(state->depth_att.resolve_imgv == NULL ||
1628           state->stencil_att.resolve_imgv == NULL ||
1629           state->depth_att.resolve_imgv == state->stencil_att.resolve_imgv ||
1630           multi);
1631    struct lvp_image_view *dst_imgv =
1632       multi ? src_imgv->multisample :
1633       state->depth_att.resolve_imgv ? state->depth_att.resolve_imgv :
1634                                       state->stencil_att.resolve_imgv;
1635
1636    int num_blits = 1;
1637    if (depth_resolve_mode != stencil_resolve_mode)
1638       num_blits = 2;
1639
1640    for (unsigned i = 0; i < num_blits; i++) {
1641       if (i == 0 && depth_resolve_mode == VK_RESOLVE_MODE_NONE)
1642          continue;
1643
1644       if (i == 1 && stencil_resolve_mode == VK_RESOLVE_MODE_NONE)
1645          continue;
1646
1647       struct pipe_blit_info info;
1648       memset(&info, 0, sizeof(info));
1649
1650       info.src.resource = src_imgv->image->bo;
1651       info.dst.resource = dst_imgv->image->bo;
1652       info.src.format = src_imgv->pformat;
1653       info.dst.format = dst_imgv->pformat;
1654       info.filter = PIPE_TEX_FILTER_NEAREST;
1655
1656       if (num_blits == 1)
1657          info.mask = PIPE_MASK_ZS;
1658       else if (i == 0)
1659          info.mask = PIPE_MASK_Z;
1660       else
1661          info.mask = PIPE_MASK_S;
1662
1663       if (i == 0 && depth_resolve_mode == VK_RESOLVE_MODE_SAMPLE_ZERO_BIT)
1664          info.sample0_only = true;
1665       if (i == 1 && stencil_resolve_mode == VK_RESOLVE_MODE_SAMPLE_ZERO_BIT)
1666          info.sample0_only = true;
1667
1668       info.src.box.x = state->render_area.offset.x;
1669       info.src.box.y = state->render_area.offset.y;
1670       info.src.box.width = state->render_area.extent.width;
1671       info.src.box.height = state->render_area.extent.height;
1672       info.src.box.depth = state->framebuffer.layers;
1673
1674       info.dst.box = info.src.box;
1675
1676       state->pctx->blit(state->pctx, &info);
1677    }
1678    if (multi)
1679       state->ds_imgv = destroy_multisample_surface(state, state->ds_imgv);
1680 }
1681
1682 static void
1683 resolve_color(struct rendering_state *state, bool multi)
1684 {
1685    for (uint32_t i = 0; i < state->color_att_count; i++) {
1686       if (!state->color_att[i].resolve_mode &&
1687           !(multi && state->forced_sample_count && state->color_att[i].imgv))
1688          continue;
1689
1690       struct lvp_image_view *src_imgv = state->color_att[i].imgv;
1691       /* skip non-msrtss resolves during msrtss resolve */
1692       if (multi && !src_imgv->multisample)
1693          continue;
1694       struct lvp_image_view *dst_imgv = multi ? src_imgv->multisample : state->color_att[i].resolve_imgv;
1695
1696       struct pipe_blit_info info;
1697       memset(&info, 0, sizeof(info));
1698
1699       info.src.resource = src_imgv->image->bo;
1700       info.dst.resource = dst_imgv->image->bo;
1701       info.src.format = src_imgv->pformat;
1702       info.dst.format = dst_imgv->pformat;
1703       info.filter = PIPE_TEX_FILTER_NEAREST;
1704       info.mask = PIPE_MASK_RGBA;
1705       info.src.box.x = state->render_area.offset.x;
1706       info.src.box.y = state->render_area.offset.y;
1707       info.src.box.width = state->render_area.extent.width;
1708       info.src.box.height = state->render_area.extent.height;
1709       info.src.box.depth = state->framebuffer.layers;
1710
1711       info.dst.box = info.src.box;
1712
1713       info.src.level = src_imgv->vk.base_mip_level;
1714       info.dst.level = dst_imgv->vk.base_mip_level;
1715
1716       state->pctx->blit(state->pctx, &info);
1717    }
1718
1719    if (!multi)
1720       return;
1721    for (uint32_t i = 0; i < state->color_att_count; i++) {
1722       struct lvp_image_view *src_imgv = state->color_att[i].imgv;
1723       if (src_imgv && src_imgv->multisample) //check if it has a msrtss view
1724          state->color_att[i].imgv = destroy_multisample_surface(state, src_imgv);
1725    }
1726 }
1727
1728 static void render_resolve(struct rendering_state *state)
1729 {
1730    if (state->forced_sample_count) {
1731       resolve_ds(state, true);
1732       resolve_color(state, true);
1733    }
1734    resolve_ds(state, false);
1735    resolve_color(state, false);
1736 }
1737
1738 static void
1739 replicate_attachment(struct rendering_state *state, struct lvp_image_view *src, struct lvp_image_view *dst)
1740 {
1741    unsigned level = dst->surface->u.tex.level;
1742    struct pipe_box box;
1743    u_box_3d(0, 0, 0,
1744             u_minify(dst->image->bo->width0, level),
1745             u_minify(dst->image->bo->height0, level),
1746             u_minify(dst->image->bo->depth0, level),
1747             &box);
1748    state->pctx->resource_copy_region(state->pctx, dst->image->bo, level, 0, 0, 0, src->image->bo, level, &box);
1749 }
1750
1751 static struct lvp_image_view *
1752 create_multisample_surface(struct rendering_state *state, struct lvp_image_view *imgv, uint32_t samples, bool replicate)
1753 {
1754    assert(!imgv->multisample);
1755
1756    struct pipe_resource templ = *imgv->surface->texture;
1757    templ.nr_samples = samples;
1758    struct lvp_image *image = mem_dup(imgv->image, sizeof(struct lvp_image));
1759    image->vk.samples = samples;
1760    image->pmem = NULL;
1761    image->bo = state->pctx->screen->resource_create(state->pctx->screen, &templ);
1762
1763    struct lvp_image_view *multi = mem_dup(imgv, sizeof(struct lvp_image_view));
1764    multi->image = image;
1765    multi->surface = state->pctx->create_surface(state->pctx, image->bo, imgv->surface);
1766    struct pipe_resource *ref = image->bo;
1767    pipe_resource_reference(&ref, NULL);
1768    imgv->multisample = multi;
1769    multi->multisample = imgv;
1770    if (replicate)
1771       replicate_attachment(state, imgv, multi);
1772    return multi;
1773 }
1774
1775 static bool
1776 att_needs_replicate(const struct rendering_state *state, const struct lvp_image_view *imgv, VkAttachmentLoadOp load_op)
1777 {
1778    if (load_op == VK_ATTACHMENT_LOAD_OP_LOAD || load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1779       return true;
1780    if (state->render_area.offset.x || state->render_area.offset.y)
1781       return true;
1782    if (state->render_area.extent.width < imgv->image->vk.extent.width ||
1783        state->render_area.extent.height < imgv->image->vk.extent.height)
1784       return true;
1785    return false;
1786 }
1787
1788 static void render_att_init(struct lvp_render_attachment* att,
1789                             const VkRenderingAttachmentInfo *vk_att)
1790 {
1791    if (vk_att == NULL || vk_att->imageView == VK_NULL_HANDLE) {
1792       *att = (struct lvp_render_attachment) {
1793          .load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
1794       };
1795       return;
1796    }
1797
1798    *att = (struct lvp_render_attachment) {
1799       .imgv = lvp_image_view_from_handle(vk_att->imageView),
1800       .load_op = vk_att->loadOp,
1801       .clear_value = vk_att->clearValue,
1802    };
1803
1804    if (vk_att->resolveImageView && vk_att->resolveMode) {
1805       att->resolve_imgv = lvp_image_view_from_handle(vk_att->resolveImageView);
1806       att->resolve_mode = vk_att->resolveMode;
1807    }
1808 }
1809
1810 static void handle_begin_rendering(struct vk_cmd_queue_entry *cmd,
1811                                    struct rendering_state *state)
1812 {
1813    const VkRenderingInfo *info = cmd->u.begin_rendering.rendering_info;
1814    bool resuming = (info->flags & VK_RENDERING_RESUMING_BIT) == VK_RENDERING_RESUMING_BIT;
1815    bool suspending = (info->flags & VK_RENDERING_SUSPENDING_BIT) == VK_RENDERING_SUSPENDING_BIT;
1816
1817    const VkMultisampledRenderToSingleSampledInfoEXT *ssi =
1818          vk_find_struct_const(info->pNext, MULTISAMPLED_RENDER_TO_SINGLE_SAMPLED_INFO_EXT);
1819    if (ssi && ssi->multisampledRenderToSingleSampledEnable) {
1820       state->forced_sample_count = ssi->rasterizationSamples;
1821       state->forced_depth_resolve_mode = info->pDepthAttachment ? info->pDepthAttachment->resolveMode : 0;
1822       state->forced_stencil_resolve_mode = info->pStencilAttachment ? info->pStencilAttachment->resolveMode : 0;
1823    } else {
1824       state->forced_sample_count = 0;
1825       state->forced_depth_resolve_mode = 0;
1826       state->forced_stencil_resolve_mode = 0;
1827    }
1828
1829    state->info.view_mask = info->viewMask;
1830    state->render_area = info->renderArea;
1831    state->suspending = suspending;
1832    state->framebuffer.width = info->renderArea.offset.x +
1833                               info->renderArea.extent.width;
1834    state->framebuffer.height = info->renderArea.offset.y +
1835                                info->renderArea.extent.height;
1836    state->framebuffer.layers = info->viewMask ? util_last_bit(info->viewMask) : info->layerCount;
1837    state->framebuffer.nr_cbufs = info->colorAttachmentCount;
1838
1839    state->color_att_count = info->colorAttachmentCount;
1840    state->color_att = realloc(state->color_att, sizeof(*state->color_att) * state->color_att_count);
1841    for (unsigned i = 0; i < info->colorAttachmentCount; i++) {
1842       render_att_init(&state->color_att[i], &info->pColorAttachments[i]);
1843       if (state->color_att[i].imgv) {
1844          struct lvp_image_view *imgv = state->color_att[i].imgv;
1845          add_img_view_surface(state, imgv,
1846                               state->framebuffer.width, state->framebuffer.height,
1847                               state->framebuffer.layers);
1848          if (state->forced_sample_count && imgv->image->vk.samples == 1)
1849             state->color_att[i].imgv = create_multisample_surface(state, imgv, state->forced_sample_count,
1850                                                                   att_needs_replicate(state, imgv, state->color_att[i].load_op));
1851          state->framebuffer.cbufs[i] = state->color_att[i].imgv->surface;
1852          assert(state->render_area.offset.x + state->render_area.extent.width <= state->framebuffer.cbufs[i]->texture->width0);
1853          assert(state->render_area.offset.y + state->render_area.extent.height <= state->framebuffer.cbufs[i]->texture->height0);
1854       } else {
1855          state->framebuffer.cbufs[i] = NULL;
1856       }
1857    }
1858
1859    render_att_init(&state->depth_att, info->pDepthAttachment);
1860    render_att_init(&state->stencil_att, info->pStencilAttachment);
1861    if (state->depth_att.imgv || state->stencil_att.imgv) {
1862       assert(state->depth_att.imgv == NULL ||
1863              state->stencil_att.imgv == NULL ||
1864              state->depth_att.imgv == state->stencil_att.imgv);
1865       state->ds_imgv = state->depth_att.imgv ? state->depth_att.imgv :
1866                                                state->stencil_att.imgv;
1867       struct lvp_image_view *imgv = state->ds_imgv;
1868       add_img_view_surface(state, imgv,
1869                            state->framebuffer.width, state->framebuffer.height,
1870                            state->framebuffer.layers);
1871       if (state->forced_sample_count && imgv->image->vk.samples == 1) {
1872          VkAttachmentLoadOp load_op;
1873          if (state->depth_att.load_op == VK_ATTACHMENT_LOAD_OP_CLEAR ||
1874              state->stencil_att.load_op == VK_ATTACHMENT_LOAD_OP_CLEAR)
1875             load_op = VK_ATTACHMENT_LOAD_OP_CLEAR;
1876          else if (state->depth_att.load_op == VK_ATTACHMENT_LOAD_OP_LOAD ||
1877                   state->stencil_att.load_op == VK_ATTACHMENT_LOAD_OP_LOAD)
1878             load_op = VK_ATTACHMENT_LOAD_OP_LOAD;
1879          else
1880             load_op = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
1881          state->ds_imgv = create_multisample_surface(state, imgv, state->forced_sample_count,
1882                                                      att_needs_replicate(state, imgv, load_op));
1883       }
1884       state->framebuffer.zsbuf = state->ds_imgv->surface;
1885       assert(state->render_area.offset.x + state->render_area.extent.width <= state->framebuffer.zsbuf->texture->width0);
1886       assert(state->render_area.offset.y + state->render_area.extent.height <= state->framebuffer.zsbuf->texture->height0);
1887    } else {
1888       state->ds_imgv = NULL;
1889       state->framebuffer.zsbuf = NULL;
1890    }
1891
1892    state->pctx->set_framebuffer_state(state->pctx,
1893                                       &state->framebuffer);
1894
1895    if (!resuming && render_needs_clear(state))
1896       render_clear_fast(state);
1897 }
1898
1899 static void handle_end_rendering(struct vk_cmd_queue_entry *cmd,
1900                                  struct rendering_state *state)
1901 {
1902    if (!state->suspending)
1903       render_resolve(state);
1904 }
1905
1906 static void handle_draw(struct vk_cmd_queue_entry *cmd,
1907                         struct rendering_state *state)
1908 {
1909    struct pipe_draw_start_count_bias draw;
1910
1911    state->info.index_size = 0;
1912    state->info.index.resource = NULL;
1913    state->info.start_instance = cmd->u.draw.first_instance;
1914    state->info.instance_count = cmd->u.draw.instance_count;
1915
1916    draw.start = cmd->u.draw.first_vertex;
1917    draw.count = cmd->u.draw.vertex_count;
1918    draw.index_bias = 0;
1919
1920    state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
1921    state->pctx->draw_vbo(state->pctx, &state->info, 0, NULL, &draw, 1);
1922 }
1923
1924 static void handle_draw_multi(struct vk_cmd_queue_entry *cmd,
1925                               struct rendering_state *state)
1926 {
1927    struct pipe_draw_start_count_bias *draws = calloc(cmd->u.draw_multi_ext.draw_count,
1928                                                      sizeof(*draws));
1929
1930    state->info.index_size = 0;
1931    state->info.index.resource = NULL;
1932    state->info.start_instance = cmd->u.draw_multi_ext.first_instance;
1933    state->info.instance_count = cmd->u.draw_multi_ext.instance_count;
1934    if (cmd->u.draw_multi_ext.draw_count > 1)
1935       state->info.increment_draw_id = true;
1936
1937    for(unsigned i = 0; i < cmd->u.draw_multi_ext.draw_count; i++) {
1938       draws[i].start = cmd->u.draw_multi_ext.vertex_info[i].firstVertex;
1939       draws[i].count = cmd->u.draw_multi_ext.vertex_info[i].vertexCount;
1940       draws[i].index_bias = 0;
1941    }
1942
1943    state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
1944
1945    if (cmd->u.draw_multi_indexed_ext.draw_count)
1946       state->pctx->draw_vbo(state->pctx, &state->info, 0, NULL, draws, cmd->u.draw_multi_ext.draw_count);
1947
1948    free(draws);
1949 }
1950
1951 static void set_viewport(unsigned first_viewport, unsigned viewport_count,
1952                          const VkViewport* viewports,
1953                          struct rendering_state *state)
1954 {
1955    int i;
1956    unsigned base = 0;
1957    if (first_viewport == UINT32_MAX)
1958       state->num_viewports = viewport_count;
1959    else
1960       base = first_viewport;
1961
1962    for (i = 0; i < viewport_count; i++) {
1963       int idx = i + base;
1964       const VkViewport *vp = &viewports[i];
1965       get_viewport_xform(state, vp, idx);
1966       set_viewport_depth_xform(state, idx);
1967    }
1968    state->vp_dirty = true;
1969 }
1970
1971 static void handle_set_viewport(struct vk_cmd_queue_entry *cmd,
1972                                 struct rendering_state *state)
1973 {
1974    set_viewport(cmd->u.set_viewport.first_viewport,
1975                 cmd->u.set_viewport.viewport_count,
1976                 cmd->u.set_viewport.viewports,
1977                 state);
1978 }
1979
1980 static void handle_set_viewport_with_count(struct vk_cmd_queue_entry *cmd,
1981                                            struct rendering_state *state)
1982 {
1983    set_viewport(UINT32_MAX,
1984                 cmd->u.set_viewport_with_count.viewport_count,
1985                 cmd->u.set_viewport_with_count.viewports,
1986                 state);
1987 }
1988
1989 static void set_scissor(unsigned first_scissor,
1990                         unsigned scissor_count,
1991                         const VkRect2D *scissors,
1992                         struct rendering_state *state)
1993 {
1994    int i;
1995    unsigned base = 0;
1996    if (first_scissor == UINT32_MAX)
1997       state->num_scissors = scissor_count;
1998    else
1999       base = first_scissor;
2000
2001    for (i = 0; i < scissor_count; i++) {
2002       int idx = i + base;
2003       const VkRect2D *ss = &scissors[i];
2004       state->scissors[idx].minx = ss->offset.x;
2005       state->scissors[idx].miny = ss->offset.y;
2006       state->scissors[idx].maxx = ss->offset.x + ss->extent.width;
2007       state->scissors[idx].maxy = ss->offset.y + ss->extent.height;
2008    }
2009    state->scissor_dirty = true;
2010 }
2011
2012 static void handle_set_scissor(struct vk_cmd_queue_entry *cmd,
2013                                struct rendering_state *state)
2014 {
2015    set_scissor(cmd->u.set_scissor.first_scissor,
2016                cmd->u.set_scissor.scissor_count,
2017                cmd->u.set_scissor.scissors,
2018                state);
2019 }
2020
2021 static void handle_set_scissor_with_count(struct vk_cmd_queue_entry *cmd,
2022                                           struct rendering_state *state)
2023 {
2024    set_scissor(UINT32_MAX,
2025                cmd->u.set_scissor_with_count.scissor_count,
2026                cmd->u.set_scissor_with_count.scissors,
2027                state);
2028 }
2029
2030 static void handle_set_line_width(struct vk_cmd_queue_entry *cmd,
2031                                   struct rendering_state *state)
2032 {
2033    state->rs_state.line_width = cmd->u.set_line_width.line_width;
2034    state->rs_dirty = true;
2035 }
2036
2037 static void handle_set_depth_bias(struct vk_cmd_queue_entry *cmd,
2038                                   struct rendering_state *state)
2039 {
2040    state->depth_bias.offset_units = cmd->u.set_depth_bias.depth_bias_constant_factor;
2041    state->depth_bias.offset_scale = cmd->u.set_depth_bias.depth_bias_slope_factor;
2042    state->depth_bias.offset_clamp = cmd->u.set_depth_bias.depth_bias_clamp;
2043    state->rs_dirty = true;
2044 }
2045
2046 static void handle_set_blend_constants(struct vk_cmd_queue_entry *cmd,
2047                                        struct rendering_state *state)
2048 {
2049    memcpy(state->blend_color.color, cmd->u.set_blend_constants.blend_constants, 4 * sizeof(float));
2050    state->blend_color_dirty = true;
2051 }
2052
2053 static void handle_set_depth_bounds(struct vk_cmd_queue_entry *cmd,
2054                                     struct rendering_state *state)
2055 {
2056    state->dsa_dirty |= !DOUBLE_EQ(state->dsa_state.depth_bounds_min, cmd->u.set_depth_bounds.min_depth_bounds);
2057    state->dsa_dirty |= !DOUBLE_EQ(state->dsa_state.depth_bounds_max, cmd->u.set_depth_bounds.max_depth_bounds);
2058    state->dsa_state.depth_bounds_min = cmd->u.set_depth_bounds.min_depth_bounds;
2059    state->dsa_state.depth_bounds_max = cmd->u.set_depth_bounds.max_depth_bounds;
2060 }
2061
2062 static void handle_set_stencil_compare_mask(struct vk_cmd_queue_entry *cmd,
2063                                             struct rendering_state *state)
2064 {
2065    if (cmd->u.set_stencil_compare_mask.face_mask & VK_STENCIL_FACE_FRONT_BIT)
2066       state->dsa_state.stencil[0].valuemask = cmd->u.set_stencil_compare_mask.compare_mask;
2067    if (cmd->u.set_stencil_compare_mask.face_mask & VK_STENCIL_FACE_BACK_BIT)
2068       state->dsa_state.stencil[1].valuemask = cmd->u.set_stencil_compare_mask.compare_mask;
2069    state->dsa_dirty = true;
2070 }
2071
2072 static void handle_set_stencil_write_mask(struct vk_cmd_queue_entry *cmd,
2073                                           struct rendering_state *state)
2074 {
2075    if (cmd->u.set_stencil_write_mask.face_mask & VK_STENCIL_FACE_FRONT_BIT)
2076       state->dsa_state.stencil[0].writemask = cmd->u.set_stencil_write_mask.write_mask;
2077    if (cmd->u.set_stencil_write_mask.face_mask & VK_STENCIL_FACE_BACK_BIT)
2078       state->dsa_state.stencil[1].writemask = cmd->u.set_stencil_write_mask.write_mask;
2079    state->dsa_dirty = true;
2080 }
2081
2082 static void handle_set_stencil_reference(struct vk_cmd_queue_entry *cmd,
2083                                          struct rendering_state *state)
2084 {
2085    if (cmd->u.set_stencil_reference.face_mask & VK_STENCIL_FACE_FRONT_BIT)
2086       state->stencil_ref.ref_value[0] = cmd->u.set_stencil_reference.reference;
2087    if (cmd->u.set_stencil_reference.face_mask & VK_STENCIL_FACE_BACK_BIT)
2088       state->stencil_ref.ref_value[1] = cmd->u.set_stencil_reference.reference;
2089    state->stencil_ref_dirty = true;
2090 }
2091
2092 static void
2093 copy_depth_rect(ubyte * dst,
2094                 enum pipe_format dst_format,
2095                 unsigned dst_stride,
2096                 unsigned dst_x,
2097                 unsigned dst_y,
2098                 unsigned width,
2099                 unsigned height,
2100                 const ubyte * src,
2101                 enum pipe_format src_format,
2102                 int src_stride,
2103                 unsigned src_x,
2104                 unsigned src_y)
2105 {
2106    int src_stride_pos = src_stride < 0 ? -src_stride : src_stride;
2107    int src_blocksize = util_format_get_blocksize(src_format);
2108    int src_blockwidth = util_format_get_blockwidth(src_format);
2109    int src_blockheight = util_format_get_blockheight(src_format);
2110    int dst_blocksize = util_format_get_blocksize(dst_format);
2111    int dst_blockwidth = util_format_get_blockwidth(dst_format);
2112    int dst_blockheight = util_format_get_blockheight(dst_format);
2113
2114    assert(src_blocksize > 0);
2115    assert(src_blockwidth > 0);
2116    assert(src_blockheight > 0);
2117
2118    dst_x /= dst_blockwidth;
2119    dst_y /= dst_blockheight;
2120    width = (width + src_blockwidth - 1)/src_blockwidth;
2121    height = (height + src_blockheight - 1)/src_blockheight;
2122    src_x /= src_blockwidth;
2123    src_y /= src_blockheight;
2124
2125    dst += dst_x * dst_blocksize;
2126    src += src_x * src_blocksize;
2127    dst += dst_y * dst_stride;
2128    src += src_y * src_stride_pos;
2129
2130    if (dst_format == PIPE_FORMAT_S8_UINT) {
2131       if (src_format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
2132          util_format_z32_float_s8x24_uint_unpack_s_8uint(dst, dst_stride,
2133                                                          src, src_stride,
2134                                                          width, height);
2135       } else if (src_format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
2136          util_format_z24_unorm_s8_uint_unpack_s_8uint(dst, dst_stride,
2137                                                       src, src_stride,
2138                                                       width, height);
2139       } else {
2140       }
2141    } else if (dst_format == PIPE_FORMAT_Z24X8_UNORM) {
2142       util_format_z24_unorm_s8_uint_unpack_z24(dst, dst_stride,
2143                                                src, src_stride,
2144                                                width, height);
2145    } else if (dst_format == PIPE_FORMAT_Z32_FLOAT) {
2146       if (src_format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
2147          util_format_z32_float_s8x24_uint_unpack_z_float((float *)dst, dst_stride,
2148                                                          src, src_stride,
2149                                                          width, height);
2150       }
2151    } else if (dst_format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
2152       if (src_format == PIPE_FORMAT_Z32_FLOAT)
2153          util_format_z32_float_s8x24_uint_pack_z_float(dst, dst_stride,
2154                                                        (float *)src, src_stride,
2155                                                        width, height);
2156       else if (src_format == PIPE_FORMAT_S8_UINT)
2157          util_format_z32_float_s8x24_uint_pack_s_8uint(dst, dst_stride,
2158                                                        src, src_stride,
2159                                                        width, height);
2160    } else if (dst_format == PIPE_FORMAT_Z24_UNORM_S8_UINT) {
2161       if (src_format == PIPE_FORMAT_S8_UINT)
2162          util_format_z24_unorm_s8_uint_pack_s_8uint(dst, dst_stride,
2163                                                     src, src_stride,
2164                                                     width, height);
2165       if (src_format == PIPE_FORMAT_Z24X8_UNORM)
2166          util_format_z24_unorm_s8_uint_pack_z24(dst, dst_stride,
2167                                                 src, src_stride,
2168                                                 width, height);
2169    }
2170 }
2171
2172 static void
2173 copy_depth_box(ubyte *dst,
2174                enum pipe_format dst_format,
2175                unsigned dst_stride, unsigned dst_slice_stride,
2176                unsigned dst_x, unsigned dst_y, unsigned dst_z,
2177                unsigned width, unsigned height, unsigned depth,
2178                const ubyte * src,
2179                enum pipe_format src_format,
2180                int src_stride, unsigned src_slice_stride,
2181                unsigned src_x, unsigned src_y, unsigned src_z)
2182 {
2183    unsigned z;
2184    dst += dst_z * dst_slice_stride;
2185    src += src_z * src_slice_stride;
2186    for (z = 0; z < depth; ++z) {
2187       copy_depth_rect(dst,
2188                       dst_format,
2189                       dst_stride,
2190                       dst_x, dst_y,
2191                       width, height,
2192                       src,
2193                       src_format,
2194                       src_stride,
2195                       src_x, src_y);
2196
2197       dst += dst_slice_stride;
2198       src += src_slice_stride;
2199    }
2200 }
2201
2202 static void handle_copy_image_to_buffer2(struct vk_cmd_queue_entry *cmd,
2203                                              struct rendering_state *state)
2204 {
2205    int i;
2206    struct VkCopyImageToBufferInfo2 *copycmd = cmd->u.copy_image_to_buffer2.copy_image_to_buffer_info;
2207    LVP_FROM_HANDLE(lvp_image, src_image, copycmd->srcImage);
2208    struct pipe_box box, dbox;
2209    struct pipe_transfer *src_t, *dst_t;
2210    ubyte *src_data, *dst_data;
2211
2212    for (i = 0; i < copycmd->regionCount; i++) {
2213
2214       box.x = copycmd->pRegions[i].imageOffset.x;
2215       box.y = copycmd->pRegions[i].imageOffset.y;
2216       box.z = src_image->vk.image_type == VK_IMAGE_TYPE_3D ? copycmd->pRegions[i].imageOffset.z : copycmd->pRegions[i].imageSubresource.baseArrayLayer;
2217       box.width = copycmd->pRegions[i].imageExtent.width;
2218       box.height = copycmd->pRegions[i].imageExtent.height;
2219       box.depth = src_image->vk.image_type == VK_IMAGE_TYPE_3D ? copycmd->pRegions[i].imageExtent.depth : copycmd->pRegions[i].imageSubresource.layerCount;
2220
2221       src_data = state->pctx->texture_map(state->pctx,
2222                                            src_image->bo,
2223                                            copycmd->pRegions[i].imageSubresource.mipLevel,
2224                                            PIPE_MAP_READ,
2225                                            &box,
2226                                            &src_t);
2227
2228       dbox.x = copycmd->pRegions[i].bufferOffset;
2229       dbox.y = 0;
2230       dbox.z = 0;
2231       dbox.width = lvp_buffer_from_handle(copycmd->dstBuffer)->bo->width0 - copycmd->pRegions[i].bufferOffset;
2232       dbox.height = 1;
2233       dbox.depth = 1;
2234       dst_data = state->pctx->buffer_map(state->pctx,
2235                                            lvp_buffer_from_handle(copycmd->dstBuffer)->bo,
2236                                            0,
2237                                            PIPE_MAP_WRITE,
2238                                            &dbox,
2239                                            &dst_t);
2240
2241       enum pipe_format src_format = src_image->bo->format;
2242       enum pipe_format dst_format = src_format;
2243       if (util_format_is_depth_or_stencil(src_format)) {
2244          if (copycmd->pRegions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) {
2245             dst_format = util_format_get_depth_only(src_format);
2246          } else if (copycmd->pRegions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
2247             dst_format = PIPE_FORMAT_S8_UINT;
2248          }
2249       }
2250
2251       const struct vk_image_buffer_layout buffer_layout =
2252          vk_image_buffer_copy_layout(&src_image->vk, &copycmd->pRegions[i]);
2253       if (src_format != dst_format) {
2254          copy_depth_box(dst_data, dst_format,
2255                         buffer_layout.row_stride_B,
2256                         buffer_layout.image_stride_B,
2257                         0, 0, 0,
2258                         copycmd->pRegions[i].imageExtent.width,
2259                         copycmd->pRegions[i].imageExtent.height,
2260                         box.depth,
2261                         src_data, src_format, src_t->stride, src_t->layer_stride, 0, 0, 0);
2262       } else {
2263          util_copy_box((ubyte *)dst_data, src_format,
2264                        buffer_layout.row_stride_B,
2265                        buffer_layout.image_stride_B,
2266                        0, 0, 0,
2267                        copycmd->pRegions[i].imageExtent.width,
2268                        copycmd->pRegions[i].imageExtent.height,
2269                        box.depth,
2270                        src_data, src_t->stride, src_t->layer_stride, 0, 0, 0);
2271       }
2272       state->pctx->texture_unmap(state->pctx, src_t);
2273       state->pctx->buffer_unmap(state->pctx, dst_t);
2274    }
2275 }
2276
2277 static void handle_copy_buffer_to_image(struct vk_cmd_queue_entry *cmd,
2278                                         struct rendering_state *state)
2279 {
2280    int i;
2281    struct VkCopyBufferToImageInfo2 *copycmd = cmd->u.copy_buffer_to_image2.copy_buffer_to_image_info;
2282    LVP_FROM_HANDLE(lvp_image, dst_image, copycmd->dstImage);
2283    struct pipe_box box, sbox;
2284    struct pipe_transfer *src_t, *dst_t;
2285    void *src_data, *dst_data;
2286
2287    for (i = 0; i < copycmd->regionCount; i++) {
2288
2289       sbox.x = copycmd->pRegions[i].bufferOffset;
2290       sbox.y = 0;
2291       sbox.z = 0;
2292       sbox.width = lvp_buffer_from_handle(copycmd->srcBuffer)->bo->width0;
2293       sbox.height = 1;
2294       sbox.depth = 1;
2295       src_data = state->pctx->buffer_map(state->pctx,
2296                                            lvp_buffer_from_handle(copycmd->srcBuffer)->bo,
2297                                            0,
2298                                            PIPE_MAP_READ,
2299                                            &sbox,
2300                                            &src_t);
2301
2302
2303       box.x = copycmd->pRegions[i].imageOffset.x;
2304       box.y = copycmd->pRegions[i].imageOffset.y;
2305       box.z = dst_image->vk.image_type == VK_IMAGE_TYPE_3D ? copycmd->pRegions[i].imageOffset.z : copycmd->pRegions[i].imageSubresource.baseArrayLayer;
2306       box.width = copycmd->pRegions[i].imageExtent.width;
2307       box.height = copycmd->pRegions[i].imageExtent.height;
2308       box.depth = dst_image->vk.image_type == VK_IMAGE_TYPE_3D ? copycmd->pRegions[i].imageExtent.depth : copycmd->pRegions[i].imageSubresource.layerCount;
2309
2310       dst_data = state->pctx->texture_map(state->pctx,
2311                                            dst_image->bo,
2312                                            copycmd->pRegions[i].imageSubresource.mipLevel,
2313                                            PIPE_MAP_WRITE,
2314                                            &box,
2315                                            &dst_t);
2316
2317       enum pipe_format dst_format = dst_image->bo->format;
2318       enum pipe_format src_format = dst_format;
2319       if (util_format_is_depth_or_stencil(dst_format)) {
2320          if (copycmd->pRegions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) {
2321             src_format = util_format_get_depth_only(dst_image->bo->format);
2322          } else if (copycmd->pRegions[i].imageSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) {
2323             src_format = PIPE_FORMAT_S8_UINT;
2324          }
2325       }
2326
2327       const struct vk_image_buffer_layout buffer_layout =
2328          vk_image_buffer_copy_layout(&dst_image->vk, &copycmd->pRegions[i]);
2329       if (src_format != dst_format) {
2330          copy_depth_box(dst_data, dst_format,
2331                         dst_t->stride, dst_t->layer_stride,
2332                         0, 0, 0,
2333                         copycmd->pRegions[i].imageExtent.width,
2334                         copycmd->pRegions[i].imageExtent.height,
2335                         box.depth,
2336                         src_data, src_format,
2337                         buffer_layout.row_stride_B,
2338                         buffer_layout.image_stride_B,
2339                         0, 0, 0);
2340       } else {
2341          util_copy_box(dst_data, dst_format,
2342                        dst_t->stride, dst_t->layer_stride,
2343                        0, 0, 0,
2344                        copycmd->pRegions[i].imageExtent.width,
2345                        copycmd->pRegions[i].imageExtent.height,
2346                        box.depth,
2347                        src_data,
2348                        buffer_layout.row_stride_B,
2349                        buffer_layout.image_stride_B,
2350                        0, 0, 0);
2351       }
2352       state->pctx->buffer_unmap(state->pctx, src_t);
2353       state->pctx->texture_unmap(state->pctx, dst_t);
2354    }
2355 }
2356
2357 static void handle_copy_image(struct vk_cmd_queue_entry *cmd,
2358                               struct rendering_state *state)
2359 {
2360    int i;
2361    struct VkCopyImageInfo2 *copycmd = cmd->u.copy_image2.copy_image_info;
2362    LVP_FROM_HANDLE(lvp_image, src_image, copycmd->srcImage);
2363    LVP_FROM_HANDLE(lvp_image, dst_image, copycmd->dstImage);
2364
2365    for (i = 0; i < copycmd->regionCount; i++) {
2366       struct pipe_box src_box;
2367       src_box.x = copycmd->pRegions[i].srcOffset.x;
2368       src_box.y = copycmd->pRegions[i].srcOffset.y;
2369       src_box.width = copycmd->pRegions[i].extent.width;
2370       src_box.height = copycmd->pRegions[i].extent.height;
2371       if (src_image->bo->target == PIPE_TEXTURE_3D) {
2372          src_box.depth = copycmd->pRegions[i].extent.depth;
2373          src_box.z = copycmd->pRegions[i].srcOffset.z;
2374       } else {
2375          src_box.depth = copycmd->pRegions[i].srcSubresource.layerCount;
2376          src_box.z = copycmd->pRegions[i].srcSubresource.baseArrayLayer;
2377       }
2378
2379       unsigned dstz = dst_image->bo->target == PIPE_TEXTURE_3D ?
2380                       copycmd->pRegions[i].dstOffset.z :
2381                       copycmd->pRegions[i].dstSubresource.baseArrayLayer;
2382       state->pctx->resource_copy_region(state->pctx, dst_image->bo,
2383                                         copycmd->pRegions[i].dstSubresource.mipLevel,
2384                                         copycmd->pRegions[i].dstOffset.x,
2385                                         copycmd->pRegions[i].dstOffset.y,
2386                                         dstz,
2387                                         src_image->bo,
2388                                         copycmd->pRegions[i].srcSubresource.mipLevel,
2389                                         &src_box);
2390    }
2391 }
2392
2393 static void handle_copy_buffer(struct vk_cmd_queue_entry *cmd,
2394                                struct rendering_state *state)
2395 {
2396    int i;
2397    VkCopyBufferInfo2 *copycmd = cmd->u.copy_buffer2.copy_buffer_info;
2398
2399    for (i = 0; i < copycmd->regionCount; i++) {
2400       struct pipe_box box = { 0 };
2401       u_box_1d(copycmd->pRegions[i].srcOffset, copycmd->pRegions[i].size, &box);
2402       state->pctx->resource_copy_region(state->pctx, lvp_buffer_from_handle(copycmd->dstBuffer)->bo, 0,
2403                                         copycmd->pRegions[i].dstOffset, 0, 0,
2404                                         lvp_buffer_from_handle(copycmd->srcBuffer)->bo, 0, &box);
2405    }
2406 }
2407
2408 static void handle_blit_image(struct vk_cmd_queue_entry *cmd,
2409                               struct rendering_state *state)
2410 {
2411    int i;
2412    VkBlitImageInfo2 *blitcmd = cmd->u.blit_image2.blit_image_info;
2413    LVP_FROM_HANDLE(lvp_image, src_image, blitcmd->srcImage);
2414    LVP_FROM_HANDLE(lvp_image, dst_image, blitcmd->dstImage);
2415    struct pipe_blit_info info;
2416
2417    memset(&info, 0, sizeof(info));
2418
2419    info.src.resource = src_image->bo;
2420    info.dst.resource = dst_image->bo;
2421    info.src.format = src_image->bo->format;
2422    info.dst.format = dst_image->bo->format;
2423    info.mask = util_format_is_depth_or_stencil(info.src.format) ? PIPE_MASK_ZS : PIPE_MASK_RGBA;
2424    info.filter = blitcmd->filter == VK_FILTER_NEAREST ? PIPE_TEX_FILTER_NEAREST : PIPE_TEX_FILTER_LINEAR;
2425    for (i = 0; i < blitcmd->regionCount; i++) {
2426       int srcX0, srcX1, srcY0, srcY1, srcZ0, srcZ1;
2427       unsigned dstX0, dstX1, dstY0, dstY1, dstZ0, dstZ1;
2428
2429       srcX0 = blitcmd->pRegions[i].srcOffsets[0].x;
2430       srcX1 = blitcmd->pRegions[i].srcOffsets[1].x;
2431       srcY0 = blitcmd->pRegions[i].srcOffsets[0].y;
2432       srcY1 = blitcmd->pRegions[i].srcOffsets[1].y;
2433       srcZ0 = blitcmd->pRegions[i].srcOffsets[0].z;
2434       srcZ1 = blitcmd->pRegions[i].srcOffsets[1].z;
2435
2436       dstX0 = blitcmd->pRegions[i].dstOffsets[0].x;
2437       dstX1 = blitcmd->pRegions[i].dstOffsets[1].x;
2438       dstY0 = blitcmd->pRegions[i].dstOffsets[0].y;
2439       dstY1 = blitcmd->pRegions[i].dstOffsets[1].y;
2440       dstZ0 = blitcmd->pRegions[i].dstOffsets[0].z;
2441       dstZ1 = blitcmd->pRegions[i].dstOffsets[1].z;
2442
2443       if (dstX0 < dstX1) {
2444          info.dst.box.x = dstX0;
2445          info.src.box.x = srcX0;
2446          info.dst.box.width = dstX1 - dstX0;
2447          info.src.box.width = srcX1 - srcX0;
2448       } else {
2449          info.dst.box.x = dstX1;
2450          info.src.box.x = srcX1;
2451          info.dst.box.width = dstX0 - dstX1;
2452          info.src.box.width = srcX0 - srcX1;
2453       }
2454
2455       if (dstY0 < dstY1) {
2456          info.dst.box.y = dstY0;
2457          info.src.box.y = srcY0;
2458          info.dst.box.height = dstY1 - dstY0;
2459          info.src.box.height = srcY1 - srcY0;
2460       } else {
2461          info.dst.box.y = dstY1;
2462          info.src.box.y = srcY1;
2463          info.dst.box.height = dstY0 - dstY1;
2464          info.src.box.height = srcY0 - srcY1;
2465       }
2466
2467       assert_subresource_layers(info.src.resource, &blitcmd->pRegions[i].srcSubresource, blitcmd->pRegions[i].srcOffsets);
2468       assert_subresource_layers(info.dst.resource, &blitcmd->pRegions[i].dstSubresource, blitcmd->pRegions[i].dstOffsets);
2469       if (src_image->bo->target == PIPE_TEXTURE_3D) {
2470          if (dstZ0 < dstZ1) {
2471             info.dst.box.z = dstZ0;
2472             info.src.box.z = srcZ0;
2473             info.dst.box.depth = dstZ1 - dstZ0;
2474             info.src.box.depth = srcZ1 - srcZ0;
2475          } else {
2476             info.dst.box.z = dstZ1;
2477             info.src.box.z = srcZ1;
2478             info.dst.box.depth = dstZ0 - dstZ1;
2479             info.src.box.depth = srcZ0 - srcZ1;
2480          }
2481       } else {
2482          info.src.box.z = blitcmd->pRegions[i].srcSubresource.baseArrayLayer;
2483          info.dst.box.z = blitcmd->pRegions[i].dstSubresource.baseArrayLayer;
2484          info.src.box.depth = blitcmd->pRegions[i].srcSubresource.layerCount;
2485          info.dst.box.depth = blitcmd->pRegions[i].dstSubresource.layerCount;
2486       }
2487
2488       info.src.level = blitcmd->pRegions[i].srcSubresource.mipLevel;
2489       info.dst.level = blitcmd->pRegions[i].dstSubresource.mipLevel;
2490       state->pctx->blit(state->pctx, &info);
2491    }
2492 }
2493
2494 static void handle_fill_buffer(struct vk_cmd_queue_entry *cmd,
2495                                struct rendering_state *state)
2496 {
2497    struct vk_cmd_fill_buffer *fillcmd = &cmd->u.fill_buffer;
2498    uint32_t size = fillcmd->size;
2499
2500    if (fillcmd->size == VK_WHOLE_SIZE) {
2501       size = lvp_buffer_from_handle(fillcmd->dst_buffer)->bo->width0 - fillcmd->dst_offset;
2502       size = ROUND_DOWN_TO(size, 4);
2503    }
2504
2505    state->pctx->clear_buffer(state->pctx,
2506                              lvp_buffer_from_handle(fillcmd->dst_buffer)->bo,
2507                              fillcmd->dst_offset,
2508                              size,
2509                              &fillcmd->data,
2510                              4);
2511 }
2512
2513 static void handle_update_buffer(struct vk_cmd_queue_entry *cmd,
2514                                  struct rendering_state *state)
2515 {
2516    struct vk_cmd_update_buffer *updcmd = &cmd->u.update_buffer;
2517    uint32_t *dst;
2518    struct pipe_transfer *dst_t;
2519    struct pipe_box box;
2520
2521    u_box_1d(updcmd->dst_offset, updcmd->data_size, &box);
2522    dst = state->pctx->buffer_map(state->pctx,
2523                                    lvp_buffer_from_handle(updcmd->dst_buffer)->bo,
2524                                    0,
2525                                    PIPE_MAP_WRITE,
2526                                    &box,
2527                                    &dst_t);
2528
2529    memcpy(dst, updcmd->data, updcmd->data_size);
2530    state->pctx->buffer_unmap(state->pctx, dst_t);
2531 }
2532
2533 static void handle_draw_indexed(struct vk_cmd_queue_entry *cmd,
2534                                 struct rendering_state *state)
2535 {
2536    struct pipe_draw_start_count_bias draw = {0};
2537
2538    state->info.index_bounds_valid = false;
2539    state->info.min_index = 0;
2540    state->info.max_index = ~0;
2541    state->info.index_size = state->index_size;
2542    state->info.index.resource = state->index_buffer;
2543    state->info.start_instance = cmd->u.draw_indexed.first_instance;
2544    state->info.instance_count = cmd->u.draw_indexed.instance_count;
2545
2546    if (state->info.primitive_restart)
2547       state->info.restart_index = util_prim_restart_index_from_size(state->info.index_size);
2548
2549    draw.count = cmd->u.draw_indexed.index_count;
2550    draw.index_bias = cmd->u.draw_indexed.vertex_offset;
2551    /* TODO: avoid calculating multiple times if cmdbuf is submitted again */
2552    draw.start = (state->index_offset / state->index_size) + cmd->u.draw_indexed.first_index;
2553
2554    state->info.index_bias_varies = !cmd->u.draw_indexed.vertex_offset;
2555    state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
2556    state->pctx->draw_vbo(state->pctx, &state->info, 0, NULL, &draw, 1);
2557 }
2558
2559 static void handle_draw_multi_indexed(struct vk_cmd_queue_entry *cmd,
2560                                       struct rendering_state *state)
2561 {
2562    struct pipe_draw_start_count_bias *draws = calloc(cmd->u.draw_multi_indexed_ext.draw_count,
2563                                                      sizeof(*draws));
2564
2565    state->info.index_bounds_valid = false;
2566    state->info.min_index = 0;
2567    state->info.max_index = ~0;
2568    state->info.index_size = state->index_size;
2569    state->info.index.resource = state->index_buffer;
2570    state->info.start_instance = cmd->u.draw_multi_indexed_ext.first_instance;
2571    state->info.instance_count = cmd->u.draw_multi_indexed_ext.instance_count;
2572    if (cmd->u.draw_multi_indexed_ext.draw_count > 1)
2573       state->info.increment_draw_id = true;
2574
2575    if (state->info.primitive_restart)
2576       state->info.restart_index = util_prim_restart_index_from_size(state->info.index_size);
2577
2578    unsigned size = cmd->u.draw_multi_indexed_ext.draw_count * sizeof(struct pipe_draw_start_count_bias);
2579    memcpy(draws, cmd->u.draw_multi_indexed_ext.index_info, size);
2580
2581    /* only the first member is read if index_bias_varies is true */
2582    if (cmd->u.draw_multi_indexed_ext.draw_count &&
2583        cmd->u.draw_multi_indexed_ext.vertex_offset)
2584       draws[0].index_bias = *cmd->u.draw_multi_indexed_ext.vertex_offset;
2585
2586    /* TODO: avoid calculating multiple times if cmdbuf is submitted again */
2587    for (unsigned i = 0; i < cmd->u.draw_multi_indexed_ext.draw_count; i++)
2588       draws[i].start = (state->index_offset / state->index_size) + draws[i].start;
2589
2590    state->info.index_bias_varies = !cmd->u.draw_multi_indexed_ext.vertex_offset;
2591    state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
2592
2593    if (cmd->u.draw_multi_indexed_ext.draw_count)
2594       state->pctx->draw_vbo(state->pctx, &state->info, 0, NULL, draws, cmd->u.draw_multi_indexed_ext.draw_count);
2595
2596    free(draws);
2597 }
2598
2599 static void handle_draw_indirect(struct vk_cmd_queue_entry *cmd,
2600                                  struct rendering_state *state, bool indexed)
2601 {
2602    struct pipe_draw_start_count_bias draw = {0};
2603    if (indexed) {
2604       state->info.index_bounds_valid = false;
2605       state->info.index_size = state->index_size;
2606       state->info.index.resource = state->index_buffer;
2607       state->info.max_index = ~0;
2608       if (state->info.primitive_restart)
2609          state->info.restart_index = util_prim_restart_index_from_size(state->info.index_size);
2610    } else
2611       state->info.index_size = 0;
2612    state->indirect_info.offset = cmd->u.draw_indirect.offset;
2613    state->indirect_info.stride = cmd->u.draw_indirect.stride;
2614    state->indirect_info.draw_count = cmd->u.draw_indirect.draw_count;
2615    state->indirect_info.buffer = lvp_buffer_from_handle(cmd->u.draw_indirect.buffer)->bo;
2616
2617    state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
2618    state->pctx->draw_vbo(state->pctx, &state->info, 0, &state->indirect_info, &draw, 1);
2619 }
2620
2621 static void handle_index_buffer(struct vk_cmd_queue_entry *cmd,
2622                                 struct rendering_state *state)
2623 {
2624    struct vk_cmd_bind_index_buffer *ib = &cmd->u.bind_index_buffer;
2625    switch (ib->index_type) {
2626    case VK_INDEX_TYPE_UINT8_EXT:
2627       state->index_size = 1;
2628       break;
2629    case VK_INDEX_TYPE_UINT16:
2630       state->index_size = 2;
2631       break;
2632    case VK_INDEX_TYPE_UINT32:
2633       state->index_size = 4;
2634       break;
2635    default:
2636       break;
2637    }
2638    state->index_offset = ib->offset;
2639    if (ib->buffer)
2640       state->index_buffer = lvp_buffer_from_handle(ib->buffer)->bo;
2641    else
2642       state->index_buffer = NULL;
2643
2644    state->ib_dirty = true;
2645 }
2646
2647 static void handle_dispatch(struct vk_cmd_queue_entry *cmd,
2648                             struct rendering_state *state)
2649 {
2650    state->dispatch_info.grid[0] = cmd->u.dispatch.group_count_x;
2651    state->dispatch_info.grid[1] = cmd->u.dispatch.group_count_y;
2652    state->dispatch_info.grid[2] = cmd->u.dispatch.group_count_z;
2653    state->dispatch_info.grid_base[0] = 0;
2654    state->dispatch_info.grid_base[1] = 0;
2655    state->dispatch_info.grid_base[2] = 0;
2656    state->dispatch_info.indirect = NULL;
2657    state->pctx->launch_grid(state->pctx, &state->dispatch_info);
2658 }
2659
2660 static void handle_dispatch_base(struct vk_cmd_queue_entry *cmd,
2661                                  struct rendering_state *state)
2662 {
2663    state->dispatch_info.grid[0] = cmd->u.dispatch_base.group_count_x;
2664    state->dispatch_info.grid[1] = cmd->u.dispatch_base.group_count_y;
2665    state->dispatch_info.grid[2] = cmd->u.dispatch_base.group_count_z;
2666    state->dispatch_info.grid_base[0] = cmd->u.dispatch_base.base_group_x;
2667    state->dispatch_info.grid_base[1] = cmd->u.dispatch_base.base_group_y;
2668    state->dispatch_info.grid_base[2] = cmd->u.dispatch_base.base_group_z;
2669    state->dispatch_info.indirect = NULL;
2670    state->pctx->launch_grid(state->pctx, &state->dispatch_info);
2671 }
2672
2673 static void handle_dispatch_indirect(struct vk_cmd_queue_entry *cmd,
2674                                      struct rendering_state *state)
2675 {
2676    state->dispatch_info.indirect = lvp_buffer_from_handle(cmd->u.dispatch_indirect.buffer)->bo;
2677    state->dispatch_info.indirect_offset = cmd->u.dispatch_indirect.offset;
2678    state->pctx->launch_grid(state->pctx, &state->dispatch_info);
2679 }
2680
2681 static void handle_push_constants(struct vk_cmd_queue_entry *cmd,
2682                                   struct rendering_state *state)
2683 {
2684    memcpy(state->push_constants + cmd->u.push_constants.offset, cmd->u.push_constants.values, cmd->u.push_constants.size);
2685
2686    VkShaderStageFlags stage_flags = cmd->u.push_constants.stage_flags;
2687    state->pcbuf_dirty[PIPE_SHADER_VERTEX] |= (stage_flags & VK_SHADER_STAGE_VERTEX_BIT) > 0;
2688    state->pcbuf_dirty[PIPE_SHADER_FRAGMENT] |= (stage_flags & VK_SHADER_STAGE_FRAGMENT_BIT) > 0;
2689    state->pcbuf_dirty[PIPE_SHADER_GEOMETRY] |= (stage_flags & VK_SHADER_STAGE_GEOMETRY_BIT) > 0;
2690    state->pcbuf_dirty[PIPE_SHADER_TESS_CTRL] |= (stage_flags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) > 0;
2691    state->pcbuf_dirty[PIPE_SHADER_TESS_EVAL] |= (stage_flags & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) > 0;
2692    state->pcbuf_dirty[PIPE_SHADER_COMPUTE] |= (stage_flags & VK_SHADER_STAGE_COMPUTE_BIT) > 0;
2693    state->inlines_dirty[PIPE_SHADER_VERTEX] |= (stage_flags & VK_SHADER_STAGE_VERTEX_BIT) > 0;
2694    state->inlines_dirty[PIPE_SHADER_FRAGMENT] |= (stage_flags & VK_SHADER_STAGE_FRAGMENT_BIT) > 0;
2695    state->inlines_dirty[PIPE_SHADER_GEOMETRY] |= (stage_flags & VK_SHADER_STAGE_GEOMETRY_BIT) > 0;
2696    state->inlines_dirty[PIPE_SHADER_TESS_CTRL] |= (stage_flags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) > 0;
2697    state->inlines_dirty[PIPE_SHADER_TESS_EVAL] |= (stage_flags & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) > 0;
2698    state->inlines_dirty[PIPE_SHADER_COMPUTE] |= (stage_flags & VK_SHADER_STAGE_COMPUTE_BIT) > 0;
2699 }
2700
2701 static void lvp_execute_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer,
2702                                    struct rendering_state *state);
2703
2704 static void handle_execute_commands(struct vk_cmd_queue_entry *cmd,
2705                                     struct rendering_state *state)
2706 {
2707    for (unsigned i = 0; i < cmd->u.execute_commands.command_buffer_count; i++) {
2708       LVP_FROM_HANDLE(lvp_cmd_buffer, secondary_buf, cmd->u.execute_commands.command_buffers[i]);
2709       lvp_execute_cmd_buffer(secondary_buf, state);
2710    }
2711 }
2712
2713 static void handle_event_set2(struct vk_cmd_queue_entry *cmd,
2714                              struct rendering_state *state)
2715 {
2716    LVP_FROM_HANDLE(lvp_event, event, cmd->u.set_event2.event);
2717
2718    VkPipelineStageFlags2 src_stage_mask = 0;
2719
2720    for (uint32_t i = 0; i < cmd->u.set_event2.dependency_info->memoryBarrierCount; i++)
2721       src_stage_mask |= cmd->u.set_event2.dependency_info->pMemoryBarriers[i].srcStageMask;
2722    for (uint32_t i = 0; i < cmd->u.set_event2.dependency_info->bufferMemoryBarrierCount; i++)
2723       src_stage_mask |= cmd->u.set_event2.dependency_info->pBufferMemoryBarriers[i].srcStageMask;
2724    for (uint32_t i = 0; i < cmd->u.set_event2.dependency_info->imageMemoryBarrierCount; i++)
2725       src_stage_mask |= cmd->u.set_event2.dependency_info->pImageMemoryBarriers[i].srcStageMask;
2726
2727    if (src_stage_mask & VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT)
2728       state->pctx->flush(state->pctx, NULL, 0);
2729    event->event_storage = 1;
2730 }
2731
2732 static void handle_event_reset2(struct vk_cmd_queue_entry *cmd,
2733                                struct rendering_state *state)
2734 {
2735    LVP_FROM_HANDLE(lvp_event, event, cmd->u.reset_event2.event);
2736
2737    if (cmd->u.reset_event2.stage_mask == VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT)
2738       state->pctx->flush(state->pctx, NULL, 0);
2739    event->event_storage = 0;
2740 }
2741
2742 static void handle_wait_events2(struct vk_cmd_queue_entry *cmd,
2743                                struct rendering_state *state)
2744 {
2745    finish_fence(state);
2746    for (unsigned i = 0; i < cmd->u.wait_events2.event_count; i++) {
2747       LVP_FROM_HANDLE(lvp_event, event, cmd->u.wait_events2.events[i]);
2748
2749       while (event->event_storage != true);
2750    }
2751 }
2752
2753 static void handle_pipeline_barrier(struct vk_cmd_queue_entry *cmd,
2754                                     struct rendering_state *state)
2755 {
2756    finish_fence(state);
2757 }
2758
2759 static void handle_begin_query(struct vk_cmd_queue_entry *cmd,
2760                                struct rendering_state *state)
2761 {
2762    struct vk_cmd_begin_query *qcmd = &cmd->u.begin_query;
2763    LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2764
2765    if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
2766        pool->pipeline_stats & VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT)
2767       emit_compute_state(state);
2768
2769    emit_state(state);
2770
2771    if (!pool->queries[qcmd->query]) {
2772       enum pipe_query_type qtype = pool->base_type;
2773       pool->queries[qcmd->query] = state->pctx->create_query(state->pctx,
2774                                                              qtype, 0);
2775    }
2776
2777    state->pctx->begin_query(state->pctx, pool->queries[qcmd->query]);
2778 }
2779
2780 static void handle_end_query(struct vk_cmd_queue_entry *cmd,
2781                              struct rendering_state *state)
2782 {
2783    struct vk_cmd_end_query *qcmd = &cmd->u.end_query;
2784    LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2785    assert(pool->queries[qcmd->query]);
2786
2787    state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
2788 }
2789
2790
2791 static void handle_begin_query_indexed_ext(struct vk_cmd_queue_entry *cmd,
2792                                            struct rendering_state *state)
2793 {
2794    struct vk_cmd_begin_query_indexed_ext *qcmd = &cmd->u.begin_query_indexed_ext;
2795    LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2796
2797    if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
2798        pool->pipeline_stats & VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT)
2799       emit_compute_state(state);
2800
2801    emit_state(state);
2802
2803    if (!pool->queries[qcmd->query]) {
2804       enum pipe_query_type qtype = pool->base_type;
2805       pool->queries[qcmd->query] = state->pctx->create_query(state->pctx,
2806                                                              qtype, qcmd->index);
2807    }
2808
2809    state->pctx->begin_query(state->pctx, pool->queries[qcmd->query]);
2810 }
2811
2812 static void handle_end_query_indexed_ext(struct vk_cmd_queue_entry *cmd,
2813                                          struct rendering_state *state)
2814 {
2815    struct vk_cmd_end_query_indexed_ext *qcmd = &cmd->u.end_query_indexed_ext;
2816    LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2817    assert(pool->queries[qcmd->query]);
2818
2819    state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
2820 }
2821
2822 static void handle_reset_query_pool(struct vk_cmd_queue_entry *cmd,
2823                                     struct rendering_state *state)
2824 {
2825    struct vk_cmd_reset_query_pool *qcmd = &cmd->u.reset_query_pool;
2826    LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2827    for (unsigned i = qcmd->first_query; i < qcmd->first_query + qcmd->query_count; i++) {
2828       if (pool->queries[i]) {
2829          state->pctx->destroy_query(state->pctx, pool->queries[i]);
2830          pool->queries[i] = NULL;
2831       }
2832    }
2833 }
2834
2835 static void handle_write_timestamp2(struct vk_cmd_queue_entry *cmd,
2836                                     struct rendering_state *state)
2837 {
2838    struct vk_cmd_write_timestamp2 *qcmd = &cmd->u.write_timestamp2;
2839    LVP_FROM_HANDLE(lvp_query_pool, pool, qcmd->query_pool);
2840    if (!pool->queries[qcmd->query]) {
2841       pool->queries[qcmd->query] = state->pctx->create_query(state->pctx,
2842                                                              PIPE_QUERY_TIMESTAMP, 0);
2843    }
2844
2845    if (!(qcmd->stage == VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT))
2846       state->pctx->flush(state->pctx, NULL, 0);
2847    state->pctx->end_query(state->pctx, pool->queries[qcmd->query]);
2848
2849 }
2850
2851 static void handle_copy_query_pool_results(struct vk_cmd_queue_entry *cmd,
2852                                            struct rendering_state *state)
2853 {
2854    struct vk_cmd_copy_query_pool_results *copycmd = &cmd->u.copy_query_pool_results;
2855    LVP_FROM_HANDLE(lvp_query_pool, pool, copycmd->query_pool);
2856    enum pipe_query_flags flags = (copycmd->flags & VK_QUERY_RESULT_WAIT_BIT) ? PIPE_QUERY_WAIT : 0;
2857
2858    if (copycmd->flags & VK_QUERY_RESULT_PARTIAL_BIT)
2859       flags |= PIPE_QUERY_PARTIAL;
2860    unsigned result_size = copycmd->flags & VK_QUERY_RESULT_64_BIT ? 8 : 4;
2861    for (unsigned i = copycmd->first_query; i < copycmd->first_query + copycmd->query_count; i++) {
2862       unsigned offset = copycmd->dst_offset + (copycmd->stride * (i - copycmd->first_query));
2863       if (pool->queries[i]) {
2864          unsigned num_results = 0;
2865          if (copycmd->flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
2866             if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
2867                num_results = util_bitcount(pool->pipeline_stats);
2868             } else
2869                num_results = pool-> type == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT ? 2 : 1;
2870             state->pctx->get_query_result_resource(state->pctx,
2871                                                    pool->queries[i],
2872                                                    flags,
2873                                                    copycmd->flags & VK_QUERY_RESULT_64_BIT ? PIPE_QUERY_TYPE_U64 : PIPE_QUERY_TYPE_U32,
2874                                                    -1,
2875                                                    lvp_buffer_from_handle(copycmd->dst_buffer)->bo,
2876                                                    offset + num_results * result_size);
2877          }
2878          if (pool->type == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
2879             num_results = 0;
2880             u_foreach_bit(bit, pool->pipeline_stats)
2881                state->pctx->get_query_result_resource(state->pctx,
2882                                                       pool->queries[i],
2883                                                       flags,
2884                                                       copycmd->flags & VK_QUERY_RESULT_64_BIT ? PIPE_QUERY_TYPE_U64 : PIPE_QUERY_TYPE_U32,
2885                                                       bit,
2886                                                       lvp_buffer_from_handle(copycmd->dst_buffer)->bo,
2887                                                       offset + num_results++ * result_size);
2888          } else {
2889             state->pctx->get_query_result_resource(state->pctx,
2890                                                    pool->queries[i],
2891                                                    flags,
2892                                                    copycmd->flags & VK_QUERY_RESULT_64_BIT ? PIPE_QUERY_TYPE_U64 : PIPE_QUERY_TYPE_U32,
2893                                                    0,
2894                                                    lvp_buffer_from_handle(copycmd->dst_buffer)->bo,
2895                                                    offset);
2896          }
2897       } else {
2898          /* if no queries emitted yet, just reset the buffer to 0 so avail is reported correctly */
2899          if (copycmd->flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
2900             struct pipe_transfer *src_t;
2901             uint32_t *map;
2902
2903             struct pipe_box box = {0};
2904             box.x = offset;
2905             box.width = copycmd->stride;
2906             box.height = 1;
2907             box.depth = 1;
2908             map = state->pctx->buffer_map(state->pctx,
2909                                             lvp_buffer_from_handle(copycmd->dst_buffer)->bo, 0, PIPE_MAP_READ, &box,
2910                                             &src_t);
2911
2912             memset(map, 0, box.width);
2913             state->pctx->buffer_unmap(state->pctx, src_t);
2914          }
2915       }
2916    }
2917 }
2918
2919 static void handle_clear_color_image(struct vk_cmd_queue_entry *cmd,
2920                                      struct rendering_state *state)
2921 {
2922    LVP_FROM_HANDLE(lvp_image, image, cmd->u.clear_color_image.image);
2923    union util_color uc;
2924    uint32_t *col_val = uc.ui;
2925    util_pack_color_union(image->bo->format, &uc, (void*)cmd->u.clear_color_image.color);
2926    for (unsigned i = 0; i < cmd->u.clear_color_image.range_count; i++) {
2927       VkImageSubresourceRange *range = &cmd->u.clear_color_image.ranges[i];
2928       struct pipe_box box;
2929       box.x = 0;
2930       box.y = 0;
2931       box.z = 0;
2932
2933       uint32_t level_count = vk_image_subresource_level_count(&image->vk, range);
2934       for (unsigned j = range->baseMipLevel; j < range->baseMipLevel + level_count; j++) {
2935          box.width = u_minify(image->bo->width0, j);
2936          box.height = u_minify(image->bo->height0, j);
2937          box.depth = 1;
2938          if (image->bo->target == PIPE_TEXTURE_3D)
2939             box.depth = u_minify(image->bo->depth0, j);
2940          else if (image->bo->target == PIPE_TEXTURE_1D_ARRAY) {
2941             box.y = range->baseArrayLayer;
2942             box.height = vk_image_subresource_layer_count(&image->vk, range);
2943             box.depth = 1;
2944          } else {
2945             box.z = range->baseArrayLayer;
2946             box.depth = vk_image_subresource_layer_count(&image->vk, range);
2947          }
2948
2949          state->pctx->clear_texture(state->pctx, image->bo,
2950                                     j, &box, (void *)col_val);
2951       }
2952    }
2953 }
2954
2955 static void handle_clear_ds_image(struct vk_cmd_queue_entry *cmd,
2956                                   struct rendering_state *state)
2957 {
2958    LVP_FROM_HANDLE(lvp_image, image, cmd->u.clear_depth_stencil_image.image);
2959    for (unsigned i = 0; i < cmd->u.clear_depth_stencil_image.range_count; i++) {
2960       VkImageSubresourceRange *range = &cmd->u.clear_depth_stencil_image.ranges[i];
2961       uint32_t ds_clear_flags = 0;
2962       if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT)
2963          ds_clear_flags |= PIPE_CLEAR_DEPTH;
2964       if (range->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)
2965          ds_clear_flags |= PIPE_CLEAR_STENCIL;
2966
2967       uint32_t level_count = vk_image_subresource_level_count(&image->vk, range);
2968       for (unsigned j = 0; j < level_count; j++) {
2969          struct pipe_surface *surf;
2970          unsigned width, height, depth;
2971          width = u_minify(image->bo->width0, range->baseMipLevel + j);
2972          height = u_minify(image->bo->height0, range->baseMipLevel + j);
2973
2974          if (image->bo->target == PIPE_TEXTURE_3D)
2975             depth = u_minify(image->bo->depth0, range->baseMipLevel + j);
2976          else {
2977             depth = vk_image_subresource_layer_count(&image->vk, range);
2978          }
2979
2980          surf = create_img_surface_bo(state, range,
2981                                       image->bo, image->bo->format,
2982                                       width, height,
2983                                       0, depth, j);
2984
2985          state->pctx->clear_depth_stencil(state->pctx,
2986                                           surf,
2987                                           ds_clear_flags,
2988                                           cmd->u.clear_depth_stencil_image.depth_stencil->depth,
2989                                           cmd->u.clear_depth_stencil_image.depth_stencil->stencil,
2990                                           0, 0,
2991                                           width, height, true);
2992          state->pctx->surface_destroy(state->pctx, surf);
2993       }
2994    }
2995 }
2996
2997 static void handle_clear_attachments(struct vk_cmd_queue_entry *cmd,
2998                                      struct rendering_state *state)
2999 {
3000    for (uint32_t a = 0; a < cmd->u.clear_attachments.attachment_count; a++) {
3001       VkClearAttachment *att = &cmd->u.clear_attachments.attachments[a];
3002       struct lvp_image_view *imgv;
3003
3004       if (att->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) {
3005          imgv = state->color_att[att->colorAttachment].imgv;
3006       } else {
3007          imgv = state->ds_imgv;
3008       }
3009       if (!imgv)
3010          continue;
3011
3012       union pipe_color_union col_val;
3013       double dclear_val = 0;
3014       uint32_t sclear_val = 0;
3015       uint32_t ds_clear_flags = 0;
3016       if (att->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
3017          ds_clear_flags |= PIPE_CLEAR_DEPTH;
3018          dclear_val = att->clearValue.depthStencil.depth;
3019       }
3020       if (att->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
3021          ds_clear_flags |= PIPE_CLEAR_STENCIL;
3022          sclear_val = att->clearValue.depthStencil.stencil;
3023       }
3024       if (att->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
3025          for (unsigned i = 0; i < 4; i++)
3026             col_val.ui[i] = att->clearValue.color.uint32[i];
3027       }
3028
3029       for (uint32_t r = 0; r < cmd->u.clear_attachments.rect_count; r++) {
3030
3031          VkClearRect *rect = &cmd->u.clear_attachments.rects[r];
3032          /* avoid crashing on spec violations */
3033          rect->rect.offset.x = MAX2(rect->rect.offset.x, 0);
3034          rect->rect.offset.y = MAX2(rect->rect.offset.y, 0);
3035          rect->rect.extent.width = MIN2(rect->rect.extent.width, state->framebuffer.width - rect->rect.offset.x);
3036          rect->rect.extent.height = MIN2(rect->rect.extent.height, state->framebuffer.height - rect->rect.offset.y);
3037          if (state->info.view_mask) {
3038             u_foreach_bit(i, state->info.view_mask)
3039                clear_attachment_layers(state, imgv, &rect->rect,
3040                                        i, 1,
3041                                        ds_clear_flags, dclear_val, sclear_val,
3042                                        &col_val);
3043          } else
3044             clear_attachment_layers(state, imgv, &rect->rect,
3045                                     rect->baseArrayLayer, rect->layerCount,
3046                                     ds_clear_flags, dclear_val, sclear_val,
3047                                     &col_val);
3048       }
3049    }
3050 }
3051
3052 static void handle_resolve_image(struct vk_cmd_queue_entry *cmd,
3053                                  struct rendering_state *state)
3054 {
3055    int i;
3056    VkResolveImageInfo2 *resolvecmd = cmd->u.resolve_image2.resolve_image_info;
3057    LVP_FROM_HANDLE(lvp_image, src_image, resolvecmd->srcImage);
3058    LVP_FROM_HANDLE(lvp_image, dst_image, resolvecmd->dstImage);
3059    struct pipe_blit_info info;
3060
3061    memset(&info, 0, sizeof(info));
3062
3063    info.src.resource = src_image->bo;
3064    info.dst.resource = dst_image->bo;
3065    info.src.format = src_image->bo->format;
3066    info.dst.format = dst_image->bo->format;
3067    info.mask = util_format_is_depth_or_stencil(info.src.format) ? PIPE_MASK_ZS : PIPE_MASK_RGBA;
3068    info.filter = PIPE_TEX_FILTER_NEAREST;
3069    for (i = 0; i < resolvecmd->regionCount; i++) {
3070       int srcX0, srcY0;
3071       unsigned dstX0, dstY0;
3072
3073       srcX0 = resolvecmd->pRegions[i].srcOffset.x;
3074       srcY0 = resolvecmd->pRegions[i].srcOffset.y;
3075
3076       dstX0 = resolvecmd->pRegions[i].dstOffset.x;
3077       dstY0 = resolvecmd->pRegions[i].dstOffset.y;
3078
3079       info.dst.box.x = dstX0;
3080       info.dst.box.y = dstY0;
3081       info.src.box.x = srcX0;
3082       info.src.box.y = srcY0;
3083
3084       info.dst.box.width = resolvecmd->pRegions[i].extent.width;
3085       info.src.box.width = resolvecmd->pRegions[i].extent.width;
3086       info.dst.box.height = resolvecmd->pRegions[i].extent.height;
3087       info.src.box.height = resolvecmd->pRegions[i].extent.height;
3088
3089       info.dst.box.depth = resolvecmd->pRegions[i].dstSubresource.layerCount;
3090       info.src.box.depth = resolvecmd->pRegions[i].srcSubresource.layerCount;
3091
3092       info.src.level = resolvecmd->pRegions[i].srcSubresource.mipLevel;
3093       info.src.box.z = resolvecmd->pRegions[i].srcOffset.z + resolvecmd->pRegions[i].srcSubresource.baseArrayLayer;
3094
3095       info.dst.level = resolvecmd->pRegions[i].dstSubresource.mipLevel;
3096       info.dst.box.z = resolvecmd->pRegions[i].dstOffset.z + resolvecmd->pRegions[i].dstSubresource.baseArrayLayer;
3097
3098       state->pctx->blit(state->pctx, &info);
3099    }
3100 }
3101
3102 static void handle_draw_indirect_count(struct vk_cmd_queue_entry *cmd,
3103                                        struct rendering_state *state, bool indexed)
3104 {
3105    struct pipe_draw_start_count_bias draw = {0};
3106    if (indexed) {
3107       state->info.index_bounds_valid = false;
3108       state->info.index_size = state->index_size;
3109       state->info.index.resource = state->index_buffer;
3110       state->info.max_index = ~0;
3111    } else
3112       state->info.index_size = 0;
3113    state->indirect_info.offset = cmd->u.draw_indirect_count.offset;
3114    state->indirect_info.stride = cmd->u.draw_indirect_count.stride;
3115    state->indirect_info.draw_count = cmd->u.draw_indirect_count.max_draw_count;
3116    state->indirect_info.buffer = lvp_buffer_from_handle(cmd->u.draw_indirect_count.buffer)->bo;
3117    state->indirect_info.indirect_draw_count_offset = cmd->u.draw_indirect_count.count_buffer_offset;
3118    state->indirect_info.indirect_draw_count = lvp_buffer_from_handle(cmd->u.draw_indirect_count.count_buffer)->bo;
3119
3120    state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
3121    state->pctx->draw_vbo(state->pctx, &state->info, 0, &state->indirect_info, &draw, 1);
3122 }
3123
3124 static void handle_compute_push_descriptor_set(struct lvp_cmd_push_descriptor_set *pds,
3125                                                struct dyn_info *dyn_info,
3126                                                struct rendering_state *state)
3127 {
3128    const struct lvp_descriptor_set_layout *layout =
3129       vk_to_lvp_descriptor_set_layout(pds->layout->vk.set_layouts[pds->set]);
3130
3131    if (!(layout->shader_stages & VK_SHADER_STAGE_COMPUTE_BIT))
3132       return;
3133    for (unsigned i = 0; i < pds->set; i++) {
3134       increment_dyn_info(dyn_info, pds->layout->vk.set_layouts[i], false);
3135    }
3136    unsigned info_idx = 0;
3137    for (unsigned i = 0; i < pds->descriptor_write_count; i++) {
3138       struct lvp_write_descriptor *desc = &pds->descriptors[i];
3139       const struct lvp_descriptor_set_binding_layout *binding =
3140          &layout->binding[desc->dst_binding];
3141
3142       if (!binding->valid)
3143          continue;
3144
3145       for (unsigned j = 0; j < desc->descriptor_count; j++) {
3146          union lvp_descriptor_info *info = &pds->infos[info_idx + j];
3147
3148          handle_descriptor(state, dyn_info, binding,
3149                            MESA_SHADER_COMPUTE, PIPE_SHADER_COMPUTE,
3150                            j, desc->descriptor_type,
3151                            info);
3152       }
3153       info_idx += desc->descriptor_count;
3154    }
3155 }
3156
3157 static struct lvp_cmd_push_descriptor_set *
3158 create_push_descriptor_set(struct rendering_state *state, struct vk_cmd_push_descriptor_set_khr *in_cmd)
3159 {
3160    LVP_FROM_HANDLE(lvp_pipeline_layout, layout, in_cmd->layout);
3161    struct lvp_cmd_push_descriptor_set *out_cmd;
3162    int count_descriptors = 0;
3163
3164    for (unsigned i = 0; i < in_cmd->descriptor_write_count; i++) {
3165       count_descriptors += in_cmd->descriptor_writes[i].descriptorCount;
3166    }
3167
3168    void *descriptors;
3169    void *infos;
3170    void **ptrs[] = {&descriptors, &infos};
3171    size_t sizes[] = {
3172       in_cmd->descriptor_write_count * sizeof(struct lvp_write_descriptor),
3173       count_descriptors * sizeof(union lvp_descriptor_info),
3174    };
3175    out_cmd = ptrzalloc(sizeof(struct lvp_cmd_push_descriptor_set), 2, sizes, ptrs);
3176    if (!out_cmd)
3177       return NULL;
3178
3179    out_cmd->bind_point = in_cmd->pipeline_bind_point;
3180    out_cmd->layout = layout;
3181    out_cmd->set = in_cmd->set;
3182    out_cmd->descriptor_write_count = in_cmd->descriptor_write_count;
3183    out_cmd->descriptors = descriptors;
3184    out_cmd->infos = infos;
3185
3186    unsigned descriptor_index = 0;
3187
3188    for (unsigned i = 0; i < in_cmd->descriptor_write_count; i++) {
3189       struct lvp_write_descriptor *desc = &out_cmd->descriptors[i];
3190
3191       /* dstSet is ignored */
3192       desc->dst_binding = in_cmd->descriptor_writes[i].dstBinding;
3193       desc->dst_array_element = in_cmd->descriptor_writes[i].dstArrayElement;
3194       desc->descriptor_count = in_cmd->descriptor_writes[i].descriptorCount;
3195       desc->descriptor_type = in_cmd->descriptor_writes[i].descriptorType;
3196
3197       for (unsigned j = 0; j < desc->descriptor_count; j++) {
3198          union lvp_descriptor_info *info = &out_cmd->infos[descriptor_index + j];
3199          switch (desc->descriptor_type) {
3200          case VK_DESCRIPTOR_TYPE_SAMPLER:
3201             if (in_cmd->descriptor_writes[i].pImageInfo[j].sampler)
3202                info->sampler = &lvp_sampler_from_handle(in_cmd->descriptor_writes[i].pImageInfo[j].sampler)->state;
3203             else
3204                info->sampler = NULL;
3205             break;
3206          case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3207             if (in_cmd->descriptor_writes[i].pImageInfo[j].sampler)
3208                info->sampler = &lvp_sampler_from_handle(in_cmd->descriptor_writes[i].pImageInfo[j].sampler)->state;
3209             else
3210                info->sampler = NULL;
3211             if (in_cmd->descriptor_writes[i].pImageInfo[j].imageView)
3212                info->sampler_view = lvp_image_view_from_handle(in_cmd->descriptor_writes[i].pImageInfo[j].imageView)->sv;
3213             else
3214                info->sampler_view = NULL;
3215             break;
3216          case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3217             if (in_cmd->descriptor_writes[i].pImageInfo[j].imageView)
3218                info->sampler_view = lvp_image_view_from_handle(in_cmd->descriptor_writes[i].pImageInfo[j].imageView)->sv;
3219             else
3220                info->sampler_view = NULL;
3221             break;
3222          case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3223          case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3224             if (in_cmd->descriptor_writes[i].pImageInfo[j].imageView)
3225                info->image_view = lvp_image_view_from_handle(in_cmd->descriptor_writes[i].pImageInfo[j].imageView)->iv;
3226             else
3227                info->image_view = ((struct pipe_image_view){0});
3228             break;
3229          case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: {
3230             struct lvp_buffer_view *bview = lvp_buffer_view_from_handle(in_cmd->descriptor_writes[i].pTexelBufferView[j]);
3231             info->sampler_view = bview ? bview->sv : NULL;
3232             break;
3233          }
3234          case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
3235             struct lvp_buffer_view *bview = lvp_buffer_view_from_handle(in_cmd->descriptor_writes[i].pTexelBufferView[j]);
3236             info->image_view = bview ? bview->iv : ((struct pipe_image_view){0});
3237             break;
3238          }
3239          case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3240          case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: {
3241             LVP_FROM_HANDLE(lvp_buffer, buffer, in_cmd->descriptor_writes[i].pBufferInfo[j].buffer);
3242             info->ubo.buffer = buffer ? buffer->bo : NULL;
3243             info->ubo.buffer_offset = buffer ? in_cmd->descriptor_writes[i].pBufferInfo[j].offset : 0;
3244             info->ubo.buffer_size = buffer ? in_cmd->descriptor_writes[i].pBufferInfo[j].range : 0;
3245             if (buffer && in_cmd->descriptor_writes[i].pBufferInfo[j].range == VK_WHOLE_SIZE)
3246                info->ubo.buffer_size = info->ubo.buffer->width0 - info->ubo.buffer_offset;
3247             break;
3248          }
3249          case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3250          case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
3251             LVP_FROM_HANDLE(lvp_buffer, buffer, in_cmd->descriptor_writes[i].pBufferInfo[j].buffer);
3252             info->ssbo.buffer = buffer ? buffer->bo : NULL;
3253             info->ssbo.buffer_offset = buffer ? in_cmd->descriptor_writes[i].pBufferInfo[j].offset : 0;
3254             info->ssbo.buffer_size = buffer ? in_cmd->descriptor_writes[i].pBufferInfo[j].range : 0;
3255             if (buffer && in_cmd->descriptor_writes[i].pBufferInfo[j].range == VK_WHOLE_SIZE)
3256                info->ssbo.buffer_size = info->ssbo.buffer->width0 - info->ssbo.buffer_offset;
3257             break;
3258          }
3259          default:
3260             break;
3261          }
3262       }
3263       descriptor_index += desc->descriptor_count;
3264    }
3265
3266    return out_cmd;
3267 }
3268
3269 static void handle_push_descriptor_set_generic(struct vk_cmd_push_descriptor_set_khr *_pds,
3270                                                struct rendering_state *state)
3271 {
3272    struct lvp_cmd_push_descriptor_set *pds = create_push_descriptor_set(state, _pds);
3273    const struct lvp_descriptor_set_layout *layout =
3274       vk_to_lvp_descriptor_set_layout(pds->layout->vk.set_layouts[pds->set]);
3275
3276    struct dyn_info dyn_info;
3277    memset(&dyn_info.stage, 0, sizeof(dyn_info.stage));
3278    dyn_info.dyn_index = 0;
3279    if (pds->bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
3280       handle_compute_push_descriptor_set(pds, &dyn_info, state);
3281    }
3282
3283    for (unsigned i = 0; i < pds->set; i++) {
3284       increment_dyn_info(&dyn_info, pds->layout->vk.set_layouts[i], false);
3285    }
3286
3287    unsigned info_idx = 0;
3288    for (unsigned i = 0; i < pds->descriptor_write_count; i++) {
3289       struct lvp_write_descriptor *desc = &pds->descriptors[i];
3290       const struct lvp_descriptor_set_binding_layout *binding =
3291          &layout->binding[desc->dst_binding];
3292
3293       if (!binding->valid)
3294          continue;
3295
3296       for (unsigned j = 0; j < desc->descriptor_count; j++) {
3297          union lvp_descriptor_info *info = &pds->infos[info_idx + j];
3298
3299          if (layout->shader_stages & VK_SHADER_STAGE_VERTEX_BIT)
3300             handle_descriptor(state, &dyn_info, binding,
3301                               MESA_SHADER_VERTEX, PIPE_SHADER_VERTEX,
3302                               j, desc->descriptor_type,
3303                               info);
3304          if (layout->shader_stages & VK_SHADER_STAGE_FRAGMENT_BIT)
3305             handle_descriptor(state, &dyn_info, binding,
3306                               MESA_SHADER_FRAGMENT, PIPE_SHADER_FRAGMENT,
3307                               j, desc->descriptor_type,
3308                               info);
3309          if (layout->shader_stages & VK_SHADER_STAGE_GEOMETRY_BIT)
3310             handle_descriptor(state, &dyn_info, binding,
3311                               MESA_SHADER_GEOMETRY, PIPE_SHADER_GEOMETRY,
3312                               j, desc->descriptor_type,
3313                               info);
3314          if (layout->shader_stages & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
3315             handle_descriptor(state, &dyn_info, binding,
3316                               MESA_SHADER_TESS_CTRL, PIPE_SHADER_TESS_CTRL,
3317                               j, desc->descriptor_type,
3318                               info);
3319          if (layout->shader_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
3320             handle_descriptor(state, &dyn_info, binding,
3321                               MESA_SHADER_TESS_EVAL, PIPE_SHADER_TESS_EVAL,
3322                               j, desc->descriptor_type,
3323                               info);
3324       }
3325       info_idx += desc->descriptor_count;
3326    }
3327    free(pds);
3328 }
3329
3330 static void handle_push_descriptor_set(struct vk_cmd_queue_entry *cmd,
3331                                        struct rendering_state *state)
3332 {
3333    handle_push_descriptor_set_generic(&cmd->u.push_descriptor_set_khr, state);
3334 }
3335
3336 static void handle_push_descriptor_set_with_template(struct vk_cmd_queue_entry *cmd,
3337                                                      struct rendering_state *state)
3338 {
3339    LVP_FROM_HANDLE(lvp_descriptor_update_template, templ, cmd->u.push_descriptor_set_with_template_khr.descriptor_update_template);
3340    struct vk_cmd_push_descriptor_set_khr *pds;
3341    int pds_size = sizeof(*pds);
3342
3343    pds_size += templ->entry_count * sizeof(struct VkWriteDescriptorSet);
3344
3345    for (unsigned i = 0; i < templ->entry_count; i++) {
3346       VkDescriptorUpdateTemplateEntry *entry = &templ->entry[i];
3347       switch (entry->descriptorType) {
3348       case VK_DESCRIPTOR_TYPE_SAMPLER:
3349       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3350       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3351       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3352       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3353          pds_size += sizeof(VkDescriptorImageInfo) * entry->descriptorCount;
3354          break;
3355       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3356       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3357          pds_size += sizeof(VkBufferView) * entry->descriptorCount;
3358          break;
3359       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3360       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3361       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3362       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3363       default:
3364          pds_size += sizeof(VkDescriptorBufferInfo) * entry->descriptorCount;
3365          break;
3366       }
3367    }
3368
3369    pds = calloc(1, pds_size);
3370    if (!pds)
3371       return;
3372
3373    pds->pipeline_bind_point = templ->bind_point;
3374    pds->layout = lvp_pipeline_layout_to_handle(templ->pipeline_layout);
3375    pds->set = templ->set;
3376    pds->descriptor_write_count = templ->entry_count;
3377    pds->descriptor_writes = (struct VkWriteDescriptorSet *)(pds + 1);
3378    const uint8_t *next_info = (const uint8_t *) (pds->descriptor_writes + templ->entry_count);
3379
3380    const uint8_t *pSrc = cmd->u.push_descriptor_set_with_template_khr.data;
3381    for (unsigned i = 0; i < templ->entry_count; i++) {
3382       struct VkWriteDescriptorSet *desc = &pds->descriptor_writes[i];
3383       struct VkDescriptorUpdateTemplateEntry *entry = &templ->entry[i];
3384
3385       /* dstSet is ignored */
3386       desc->dstBinding = entry->dstBinding;
3387       desc->dstArrayElement = entry->dstArrayElement;
3388       desc->descriptorCount = entry->descriptorCount;
3389       desc->descriptorType = entry->descriptorType;
3390       desc->pImageInfo = (const VkDescriptorImageInfo *) next_info;
3391       desc->pTexelBufferView = (const VkBufferView *) next_info;
3392       desc->pBufferInfo = (const VkDescriptorBufferInfo *) next_info;
3393
3394       for (unsigned j = 0; j < desc->descriptorCount; j++) {
3395          switch (desc->descriptorType) {
3396          case VK_DESCRIPTOR_TYPE_SAMPLER:
3397          case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
3398          case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
3399          case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
3400          case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
3401             memcpy((VkDescriptorImageInfo*)&desc->pImageInfo[j], pSrc, sizeof(VkDescriptorImageInfo));
3402             next_info += sizeof(VkDescriptorImageInfo);
3403             pSrc += sizeof(VkDescriptorImageInfo);
3404             break;
3405          case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
3406          case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
3407             memcpy((VkBufferView*)&desc->pTexelBufferView[j], pSrc, sizeof(VkBufferView));
3408             next_info += sizeof(VkBufferView);
3409             pSrc += sizeof(VkBufferView);
3410             break;
3411          case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
3412          case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
3413          case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
3414          case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
3415          default:
3416             memcpy((VkDescriptorBufferInfo*)&desc->pBufferInfo[j], pSrc, sizeof(VkDescriptorBufferInfo));
3417             next_info += sizeof(VkDescriptorBufferInfo);
3418             pSrc += sizeof(VkDescriptorBufferInfo);
3419             break;
3420          }
3421       }
3422    }
3423    handle_push_descriptor_set_generic(pds, state);
3424    free(pds);
3425 }
3426
3427 static void handle_bind_transform_feedback_buffers(struct vk_cmd_queue_entry *cmd,
3428                                                    struct rendering_state *state)
3429 {
3430    struct vk_cmd_bind_transform_feedback_buffers_ext *btfb = &cmd->u.bind_transform_feedback_buffers_ext;
3431
3432    for (unsigned i = 0; i < btfb->binding_count; i++) {
3433       int idx = i + btfb->first_binding;
3434       uint32_t size;
3435       if (btfb->sizes && btfb->sizes[i] != VK_WHOLE_SIZE)
3436          size = btfb->sizes[i];
3437       else
3438          size = lvp_buffer_from_handle(btfb->buffers[i])->size - btfb->offsets[i];
3439
3440       if (state->so_targets[idx])
3441          state->pctx->stream_output_target_destroy(state->pctx, state->so_targets[idx]);
3442
3443       state->so_targets[idx] = state->pctx->create_stream_output_target(state->pctx,
3444                                                                         lvp_buffer_from_handle(btfb->buffers[i])->bo,
3445                                                                         btfb->offsets[i],
3446                                                                         size);
3447    }
3448    state->num_so_targets = btfb->first_binding + btfb->binding_count;
3449 }
3450
3451 static void handle_begin_transform_feedback(struct vk_cmd_queue_entry *cmd,
3452                                             struct rendering_state *state)
3453 {
3454    struct vk_cmd_begin_transform_feedback_ext *btf = &cmd->u.begin_transform_feedback_ext;
3455    uint32_t offsets[4];
3456
3457    memset(offsets, 0, sizeof(uint32_t)*4);
3458
3459    for (unsigned i = 0; btf->counter_buffers && i < btf->counter_buffer_count; i++) {
3460       if (!btf->counter_buffers[i])
3461          continue;
3462
3463       pipe_buffer_read(state->pctx,
3464                        btf->counter_buffers ? lvp_buffer_from_handle(btf->counter_buffers[i])->bo : NULL,
3465                        btf->counter_buffer_offsets ? btf->counter_buffer_offsets[i] : 0,
3466                        4,
3467                        &offsets[i]);
3468    }
3469    state->pctx->set_stream_output_targets(state->pctx, state->num_so_targets,
3470                                           state->so_targets, offsets);
3471 }
3472
3473 static void handle_end_transform_feedback(struct vk_cmd_queue_entry *cmd,
3474                                           struct rendering_state *state)
3475 {
3476    struct vk_cmd_end_transform_feedback_ext *etf = &cmd->u.end_transform_feedback_ext;
3477
3478    if (etf->counter_buffer_count) {
3479       for (unsigned i = 0; etf->counter_buffers && i < etf->counter_buffer_count; i++) {
3480          if (!etf->counter_buffers[i])
3481             continue;
3482
3483          uint32_t offset;
3484          offset = state->pctx->stream_output_target_offset(state->so_targets[i]);
3485
3486          pipe_buffer_write(state->pctx,
3487                            etf->counter_buffers ? lvp_buffer_from_handle(etf->counter_buffers[i])->bo : NULL,
3488                            etf->counter_buffer_offsets ? etf->counter_buffer_offsets[i] : 0,
3489                            4,
3490                            &offset);
3491       }
3492    }
3493    state->pctx->set_stream_output_targets(state->pctx, 0, NULL, NULL);
3494 }
3495
3496 static void handle_draw_indirect_byte_count(struct vk_cmd_queue_entry *cmd,
3497                                             struct rendering_state *state)
3498 {
3499    struct vk_cmd_draw_indirect_byte_count_ext *dibc = &cmd->u.draw_indirect_byte_count_ext;
3500    struct pipe_draw_start_count_bias draw = {0};
3501
3502    pipe_buffer_read(state->pctx,
3503                     lvp_buffer_from_handle(dibc->counter_buffer)->bo,
3504                     dibc->counter_buffer_offset,
3505                     4, &draw.count);
3506
3507    state->info.start_instance = cmd->u.draw_indirect_byte_count_ext.first_instance;
3508    state->info.instance_count = cmd->u.draw_indirect_byte_count_ext.instance_count;
3509    state->info.index_size = 0;
3510
3511    draw.count /= cmd->u.draw_indirect_byte_count_ext.vertex_stride;
3512    state->pctx->set_patch_vertices(state->pctx, state->patch_vertices);
3513    state->pctx->draw_vbo(state->pctx, &state->info, 0, &state->indirect_info, &draw, 1);
3514 }
3515
3516 static void handle_begin_conditional_rendering(struct vk_cmd_queue_entry *cmd,
3517                                                struct rendering_state *state)
3518 {
3519    struct VkConditionalRenderingBeginInfoEXT *bcr = cmd->u.begin_conditional_rendering_ext.conditional_rendering_begin;
3520    state->render_cond = true;
3521    state->pctx->render_condition_mem(state->pctx,
3522                                      lvp_buffer_from_handle(bcr->buffer)->bo,
3523                                      bcr->offset,
3524                                      bcr->flags & VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT);
3525 }
3526
3527 static void handle_end_conditional_rendering(struct rendering_state *state)
3528 {
3529    state->render_cond = false;
3530    state->pctx->render_condition_mem(state->pctx, NULL, 0, false);
3531 }
3532
3533 static void handle_set_vertex_input(struct vk_cmd_queue_entry *cmd,
3534                                     struct rendering_state *state)
3535 {
3536    const struct vk_cmd_set_vertex_input_ext *vertex_input = &cmd->u.set_vertex_input_ext;
3537    const struct VkVertexInputBindingDescription2EXT *bindings = vertex_input->vertex_binding_descriptions;
3538    const struct VkVertexInputAttributeDescription2EXT *attrs = vertex_input->vertex_attribute_descriptions;
3539    int max_location = -1;
3540    for (unsigned i = 0; i < vertex_input->vertex_attribute_description_count; i++) {
3541       const struct VkVertexInputBindingDescription2EXT *binding = NULL;
3542       unsigned location = attrs[i].location;
3543
3544       for (unsigned j = 0; j < vertex_input->vertex_binding_description_count; j++) {
3545          const struct VkVertexInputBindingDescription2EXT *b = &bindings[j];
3546          if (b->binding == attrs[i].binding) {
3547             binding = b;
3548             break;
3549          }
3550       }
3551       assert(binding);
3552       state->velem.velems[location].src_offset = attrs[i].offset;
3553       state->velem.velems[location].vertex_buffer_index = attrs[i].binding;
3554       state->velem.velems[location].src_format = lvp_vk_format_to_pipe_format(attrs[i].format);
3555       state->vb[attrs[i].binding].stride = binding->stride;
3556       uint32_t d = binding->divisor;
3557       switch (binding->inputRate) {
3558       case VK_VERTEX_INPUT_RATE_VERTEX:
3559          state->velem.velems[location].instance_divisor = 0;
3560          break;
3561       case VK_VERTEX_INPUT_RATE_INSTANCE:
3562          state->velem.velems[location].instance_divisor = d ? d : UINT32_MAX;
3563          break;
3564       default:
3565          assert(0);
3566          break;
3567       }
3568
3569       if ((int)location > max_location)
3570          max_location = location;
3571    }
3572    state->velem.count = max_location + 1;
3573    state->vb_dirty = true;
3574    state->ve_dirty = true;
3575 }
3576
3577 static void handle_set_cull_mode(struct vk_cmd_queue_entry *cmd,
3578                                  struct rendering_state *state)
3579 {
3580    state->rs_state.cull_face = vk_cull_to_pipe(cmd->u.set_cull_mode.cull_mode);
3581    state->rs_dirty = true;
3582 }
3583
3584 static void handle_set_front_face(struct vk_cmd_queue_entry *cmd,
3585                                   struct rendering_state *state)
3586 {
3587    state->rs_state.front_ccw = (cmd->u.set_front_face.front_face == VK_FRONT_FACE_COUNTER_CLOCKWISE);
3588    state->rs_dirty = true;
3589 }
3590
3591 static void handle_set_primitive_topology(struct vk_cmd_queue_entry *cmd,
3592                                           struct rendering_state *state)
3593 {
3594    state->info.mode = vk_conv_topology(cmd->u.set_primitive_topology.primitive_topology);
3595    state->rs_dirty = true;
3596 }
3597
3598 static void handle_set_depth_test_enable(struct vk_cmd_queue_entry *cmd,
3599                                          struct rendering_state *state)
3600 {
3601    state->dsa_dirty |= state->dsa_state.depth_enabled != cmd->u.set_depth_test_enable.depth_test_enable;
3602    state->dsa_state.depth_enabled = cmd->u.set_depth_test_enable.depth_test_enable;
3603 }
3604
3605 static void handle_set_depth_write_enable(struct vk_cmd_queue_entry *cmd,
3606                                           struct rendering_state *state)
3607 {
3608    state->dsa_dirty |= state->dsa_state.depth_writemask != cmd->u.set_depth_write_enable.depth_write_enable;
3609    state->dsa_state.depth_writemask = cmd->u.set_depth_write_enable.depth_write_enable;
3610 }
3611
3612 static void handle_set_depth_compare_op(struct vk_cmd_queue_entry *cmd,
3613                                         struct rendering_state *state)
3614 {
3615    state->dsa_dirty |= state->dsa_state.depth_func != cmd->u.set_depth_compare_op.depth_compare_op;
3616    state->dsa_state.depth_func = cmd->u.set_depth_compare_op.depth_compare_op;
3617 }
3618
3619 static void handle_set_depth_bounds_test_enable(struct vk_cmd_queue_entry *cmd,
3620                                                 struct rendering_state *state)
3621 {
3622    state->dsa_dirty |= state->dsa_state.depth_bounds_test != cmd->u.set_depth_bounds_test_enable.depth_bounds_test_enable;
3623    state->dsa_state.depth_bounds_test = cmd->u.set_depth_bounds_test_enable.depth_bounds_test_enable;
3624 }
3625
3626 static void handle_set_stencil_test_enable(struct vk_cmd_queue_entry *cmd,
3627                                            struct rendering_state *state)
3628 {
3629    state->dsa_dirty |= state->dsa_state.stencil[0].enabled != cmd->u.set_stencil_test_enable.stencil_test_enable ||
3630                        state->dsa_state.stencil[1].enabled != cmd->u.set_stencil_test_enable.stencil_test_enable;
3631    state->dsa_state.stencil[0].enabled = cmd->u.set_stencil_test_enable.stencil_test_enable;
3632    state->dsa_state.stencil[1].enabled = cmd->u.set_stencil_test_enable.stencil_test_enable;
3633 }
3634
3635 static void handle_set_stencil_op(struct vk_cmd_queue_entry *cmd,
3636                                   struct rendering_state *state)
3637 {
3638    if (cmd->u.set_stencil_op.face_mask & VK_STENCIL_FACE_FRONT_BIT) {
3639       state->dsa_state.stencil[0].func = cmd->u.set_stencil_op.compare_op;
3640       state->dsa_state.stencil[0].fail_op = vk_conv_stencil_op(cmd->u.set_stencil_op.fail_op);
3641       state->dsa_state.stencil[0].zpass_op = vk_conv_stencil_op(cmd->u.set_stencil_op.pass_op);
3642       state->dsa_state.stencil[0].zfail_op = vk_conv_stencil_op(cmd->u.set_stencil_op.depth_fail_op);
3643    }
3644
3645    if (cmd->u.set_stencil_op.face_mask & VK_STENCIL_FACE_BACK_BIT) {
3646       state->dsa_state.stencil[1].func = cmd->u.set_stencil_op.compare_op;
3647       state->dsa_state.stencil[1].fail_op = vk_conv_stencil_op(cmd->u.set_stencil_op.fail_op);
3648       state->dsa_state.stencil[1].zpass_op = vk_conv_stencil_op(cmd->u.set_stencil_op.pass_op);
3649       state->dsa_state.stencil[1].zfail_op = vk_conv_stencil_op(cmd->u.set_stencil_op.depth_fail_op);
3650    }
3651    state->dsa_dirty = true;
3652 }
3653
3654 static void handle_set_line_stipple(struct vk_cmd_queue_entry *cmd,
3655                                     struct rendering_state *state)
3656 {
3657    state->rs_state.line_stipple_factor = cmd->u.set_line_stipple_ext.line_stipple_factor - 1;
3658    state->rs_state.line_stipple_pattern = cmd->u.set_line_stipple_ext.line_stipple_pattern;
3659    state->rs_dirty = true;
3660 }
3661
3662 static void handle_set_depth_bias_enable(struct vk_cmd_queue_entry *cmd,
3663                                          struct rendering_state *state)
3664 {
3665    state->rs_dirty |= state->depth_bias.enabled != cmd->u.set_depth_bias_enable.depth_bias_enable;
3666    state->depth_bias.enabled = cmd->u.set_depth_bias_enable.depth_bias_enable;
3667 }
3668
3669 static void handle_set_logic_op(struct vk_cmd_queue_entry *cmd,
3670                                 struct rendering_state *state)
3671 {
3672    unsigned op = vk_conv_logic_op(cmd->u.set_logic_op_ext.logic_op);
3673    state->rs_dirty |= state->blend_state.logicop_func != op;
3674    state->blend_state.logicop_func = op;
3675 }
3676
3677 static void handle_set_patch_control_points(struct vk_cmd_queue_entry *cmd,
3678                                             struct rendering_state *state)
3679 {
3680    state->patch_vertices = cmd->u.set_patch_control_points_ext.patch_control_points;
3681 }
3682
3683 static void handle_set_primitive_restart_enable(struct vk_cmd_queue_entry *cmd,
3684                                                 struct rendering_state *state)
3685 {
3686    state->info.primitive_restart = cmd->u.set_primitive_restart_enable.primitive_restart_enable;
3687 }
3688
3689 static void handle_set_rasterizer_discard_enable(struct vk_cmd_queue_entry *cmd,
3690                                                  struct rendering_state *state)
3691 {
3692    state->rs_dirty |= state->rs_state.rasterizer_discard != cmd->u.set_rasterizer_discard_enable.rasterizer_discard_enable;
3693    state->rs_state.rasterizer_discard = cmd->u.set_rasterizer_discard_enable.rasterizer_discard_enable;
3694 }
3695
3696 static void handle_set_color_write_enable(struct vk_cmd_queue_entry *cmd,
3697                                           struct rendering_state *state)
3698 {
3699    uint8_t disable_mask = 0; //PIPE_MAX_COLOR_BUFS is max attachment count
3700
3701    for (unsigned i = 0; i < cmd->u.set_color_write_enable_ext.attachment_count; i++) {
3702       /* this is inverted because cmdbufs are zero-initialized, meaning only 'true'
3703        * can be detected with a bool, and the default is to enable color writes
3704        */
3705       if (cmd->u.set_color_write_enable_ext.color_write_enables[i] != VK_TRUE)
3706          disable_mask |= BITFIELD_BIT(i);
3707    }
3708
3709    state->blend_dirty |= state->color_write_disables != disable_mask;
3710    state->color_write_disables = disable_mask;
3711 }
3712
3713 static void handle_set_polygon_mode(struct vk_cmd_queue_entry *cmd,
3714                                     struct rendering_state *state)
3715 {
3716    unsigned polygon_mode = vk_polygon_mode_to_pipe(cmd->u.set_polygon_mode_ext.polygon_mode);
3717    if (state->rs_state.fill_front != polygon_mode)
3718       state->rs_dirty = true;
3719    state->rs_state.fill_front = polygon_mode;
3720    if (state->rs_state.fill_back != polygon_mode)
3721       state->rs_dirty = true;
3722    state->rs_state.fill_back = polygon_mode;
3723 }
3724
3725 static void handle_set_tessellation_domain_origin(struct vk_cmd_queue_entry *cmd,
3726                                                   struct rendering_state *state)
3727 {
3728    bool tess_ccw = cmd->u.set_tessellation_domain_origin_ext.domain_origin == VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT;
3729    if (tess_ccw == state->tess_ccw)
3730       return;
3731    state->tess_ccw = tess_ccw;
3732    if (state->tess_states[state->tess_ccw])
3733       state->pctx->bind_tes_state(state->pctx, state->tess_states[state->tess_ccw]);
3734 }
3735
3736 static void handle_set_depth_clamp_enable(struct vk_cmd_queue_entry *cmd,
3737                                           struct rendering_state *state)
3738 {
3739    state->rs_dirty |= state->rs_state.depth_clamp != cmd->u.set_depth_clamp_enable_ext.depth_clamp_enable;
3740    state->rs_state.depth_clamp = !!cmd->u.set_depth_clamp_enable_ext.depth_clamp_enable;
3741    if (state->depth_clamp_sets_clip)
3742       state->rs_state.depth_clip_near = state->rs_state.depth_clip_far = !state->rs_state.depth_clamp;
3743 }
3744
3745 static void handle_set_depth_clip_enable(struct vk_cmd_queue_entry *cmd,
3746                                          struct rendering_state *state)
3747 {
3748    state->rs_dirty |= state->rs_state.depth_clip_far != !!cmd->u.set_depth_clip_enable_ext.depth_clip_enable;
3749    state->rs_state.depth_clip_near = state->rs_state.depth_clip_far = !!cmd->u.set_depth_clip_enable_ext.depth_clip_enable;
3750 }
3751
3752 static void handle_set_logic_op_enable(struct vk_cmd_queue_entry *cmd,
3753                                          struct rendering_state *state)
3754 {
3755    state->blend_dirty |= state->blend_state.logicop_enable != !!cmd->u.set_logic_op_enable_ext.logic_op_enable;
3756    state->blend_state.logicop_enable = !!cmd->u.set_logic_op_enable_ext.logic_op_enable;
3757 }
3758
3759 static void handle_set_sample_mask(struct vk_cmd_queue_entry *cmd,
3760                                    struct rendering_state *state)
3761 {
3762    unsigned mask = cmd->u.set_sample_mask_ext.sample_mask ? cmd->u.set_sample_mask_ext.sample_mask[0] : 0xffffffff;
3763    state->sample_mask_dirty |= state->sample_mask != mask;
3764    state->sample_mask = mask;
3765 }
3766
3767 static void handle_set_samples(struct vk_cmd_queue_entry *cmd,
3768                                struct rendering_state *state)
3769 {
3770    update_samples(state, cmd->u.set_rasterization_samples_ext.rasterization_samples);
3771 }
3772
3773 static void handle_set_alpha_to_coverage(struct vk_cmd_queue_entry *cmd,
3774                                          struct rendering_state *state)
3775 {
3776    state->blend_dirty |=
3777       state->blend_state.alpha_to_coverage != !!cmd->u.set_alpha_to_coverage_enable_ext.alpha_to_coverage_enable;
3778    state->blend_state.alpha_to_coverage = !!cmd->u.set_alpha_to_coverage_enable_ext.alpha_to_coverage_enable;
3779 }
3780
3781 static void handle_set_alpha_to_one(struct vk_cmd_queue_entry *cmd,
3782                                          struct rendering_state *state)
3783 {
3784    state->blend_dirty |=
3785       state->blend_state.alpha_to_one != !!cmd->u.set_alpha_to_one_enable_ext.alpha_to_one_enable;
3786    state->blend_state.alpha_to_one = !!cmd->u.set_alpha_to_one_enable_ext.alpha_to_one_enable;
3787    if (state->blend_state.alpha_to_one)
3788       state->rs_state.multisample = true;
3789 }
3790
3791 static void handle_set_halfz(struct vk_cmd_queue_entry *cmd,
3792                              struct rendering_state *state)
3793 {
3794    if (state->rs_state.clip_halfz == !cmd->u.set_depth_clip_negative_one_to_one_ext.negative_one_to_one)
3795       return;
3796    state->rs_dirty = true;
3797    state->rs_state.clip_halfz = !cmd->u.set_depth_clip_negative_one_to_one_ext.negative_one_to_one;
3798    /* handle dynamic state: convert from one transform to the other */
3799    for (unsigned i = 0; i < state->num_viewports; i++)
3800       set_viewport_depth_xform(state, i);
3801    state->vp_dirty = true;
3802 }
3803
3804 static void handle_set_line_rasterization_mode(struct vk_cmd_queue_entry *cmd,
3805                                                struct rendering_state *state)
3806 {
3807    VkLineRasterizationModeEXT lineRasterizationMode = cmd->u.set_line_rasterization_mode_ext.line_rasterization_mode;
3808    /* not even going to bother trying dirty tracking on this */
3809    state->rs_dirty = true;
3810    state->rs_state.line_smooth = lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
3811    state->rs_state.line_rectangular = lineRasterizationMode != VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT;;
3812    state->disable_multisample = lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT ||
3813                                 lineRasterizationMode == VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT;
3814 }
3815
3816 static void handle_set_line_stipple_enable(struct vk_cmd_queue_entry *cmd,
3817                                            struct rendering_state *state)
3818 {
3819    state->rs_dirty |= state->rs_state.line_stipple_enable != !!cmd->u.set_line_stipple_enable_ext.stippled_line_enable;
3820    state->rs_state.line_stipple_enable = cmd->u.set_line_stipple_enable_ext.stippled_line_enable;
3821 }
3822
3823 static void handle_set_provoking_vertex_mode(struct vk_cmd_queue_entry *cmd,
3824                                              struct rendering_state *state)
3825 {
3826    bool flatshade_first = cmd->u.set_provoking_vertex_mode_ext.provoking_vertex_mode != VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT;
3827    state->rs_dirty |= state->rs_state.flatshade_first != flatshade_first;
3828    state->rs_state.flatshade_first = flatshade_first;
3829 }
3830
3831 static void handle_set_color_blend_enable(struct vk_cmd_queue_entry *cmd,
3832                                           struct rendering_state *state)
3833 {
3834    for (unsigned i = 0; i < cmd->u.set_color_blend_enable_ext.attachment_count; i++) {
3835       if (state->blend_state.rt[cmd->u.set_color_blend_enable_ext.first_attachment + i].blend_enable != !!cmd->u.set_color_blend_enable_ext.color_blend_enables[i]) {
3836          state->blend_dirty = true;
3837       }
3838       state->blend_state.rt[cmd->u.set_color_blend_enable_ext.first_attachment + i].blend_enable = !!cmd->u.set_color_blend_enable_ext.color_blend_enables[i];
3839    }
3840 }
3841
3842 static void handle_set_color_write_mask(struct vk_cmd_queue_entry *cmd,
3843                                         struct rendering_state *state)
3844 {
3845    for (unsigned i = 0; i < cmd->u.set_color_write_mask_ext.attachment_count; i++) {
3846       if (state->blend_state.rt[cmd->u.set_color_write_mask_ext.first_attachment + i].colormask != cmd->u.set_color_write_mask_ext.color_write_masks[i])
3847          state->blend_dirty = true;
3848       state->blend_state.rt[cmd->u.set_color_write_mask_ext.first_attachment + i].colormask = cmd->u.set_color_write_mask_ext.color_write_masks[i];
3849    }
3850 }
3851
3852 static void handle_set_color_blend_equation(struct vk_cmd_queue_entry *cmd,
3853                                             struct rendering_state *state)
3854 {
3855    const VkColorBlendEquationEXT *cb = cmd->u.set_color_blend_equation_ext.color_blend_equations;
3856    state->blend_dirty = true;
3857    for (unsigned i = 0; i < cmd->u.set_color_blend_equation_ext.attachment_count; i++) {
3858       state->blend_state.rt[cmd->u.set_color_blend_equation_ext.first_attachment + i].rgb_func = vk_conv_blend_func(cb[i].colorBlendOp);
3859       state->blend_state.rt[cmd->u.set_color_blend_equation_ext.first_attachment + i].rgb_src_factor = vk_conv_blend_factor(cb[i].srcColorBlendFactor);
3860       state->blend_state.rt[cmd->u.set_color_blend_equation_ext.first_attachment + i].rgb_dst_factor = vk_conv_blend_factor(cb[i].dstColorBlendFactor);
3861       state->blend_state.rt[cmd->u.set_color_blend_equation_ext.first_attachment + i].alpha_func = vk_conv_blend_func(cb[i].alphaBlendOp);
3862       state->blend_state.rt[cmd->u.set_color_blend_equation_ext.first_attachment + i].alpha_src_factor = vk_conv_blend_factor(cb[i].srcAlphaBlendFactor);
3863       state->blend_state.rt[cmd->u.set_color_blend_equation_ext.first_attachment + i].alpha_dst_factor = vk_conv_blend_factor(cb[i].dstAlphaBlendFactor);
3864
3865       /* At least llvmpipe applies the blend factor prior to the blend function,
3866        * regardless of what function is used. (like i965 hardware).
3867        * It means for MIN/MAX the blend factor has to be stomped to ONE.
3868        */
3869       if (cb[i].colorBlendOp == VK_BLEND_OP_MIN ||
3870           cb[i].colorBlendOp == VK_BLEND_OP_MAX) {
3871          state->blend_state.rt[cmd->u.set_color_blend_equation_ext.first_attachment + i].rgb_src_factor = PIPE_BLENDFACTOR_ONE;
3872          state->blend_state.rt[cmd->u.set_color_blend_equation_ext.first_attachment + i].rgb_dst_factor = PIPE_BLENDFACTOR_ONE;
3873       }
3874
3875       if (cb[i].alphaBlendOp == VK_BLEND_OP_MIN ||
3876           cb[i].alphaBlendOp == VK_BLEND_OP_MAX) {
3877          state->blend_state.rt[cmd->u.set_color_blend_equation_ext.first_attachment + i].alpha_src_factor = PIPE_BLENDFACTOR_ONE;
3878          state->blend_state.rt[cmd->u.set_color_blend_equation_ext.first_attachment + i].alpha_dst_factor = PIPE_BLENDFACTOR_ONE;
3879       }
3880    }
3881 }
3882
3883 void lvp_add_enqueue_cmd_entrypoints(struct vk_device_dispatch_table *disp)
3884 {
3885    struct vk_device_dispatch_table cmd_enqueue_dispatch;
3886    vk_device_dispatch_table_from_entrypoints(&cmd_enqueue_dispatch,
3887       &vk_cmd_enqueue_device_entrypoints, true);
3888
3889 #define ENQUEUE_CMD(CmdName) \
3890    assert(cmd_enqueue_dispatch.CmdName != NULL); \
3891    disp->CmdName = cmd_enqueue_dispatch.CmdName;
3892
3893    /* This list needs to match what's in lvp_execute_cmd_buffer exactly */
3894    ENQUEUE_CMD(CmdBindPipeline)
3895    ENQUEUE_CMD(CmdSetViewport)
3896    ENQUEUE_CMD(CmdSetViewportWithCount)
3897    ENQUEUE_CMD(CmdSetScissor)
3898    ENQUEUE_CMD(CmdSetScissorWithCount)
3899    ENQUEUE_CMD(CmdSetLineWidth)
3900    ENQUEUE_CMD(CmdSetDepthBias)
3901    ENQUEUE_CMD(CmdSetBlendConstants)
3902    ENQUEUE_CMD(CmdSetDepthBounds)
3903    ENQUEUE_CMD(CmdSetStencilCompareMask)
3904    ENQUEUE_CMD(CmdSetStencilWriteMask)
3905    ENQUEUE_CMD(CmdSetStencilReference)
3906    ENQUEUE_CMD(CmdBindDescriptorSets)
3907    ENQUEUE_CMD(CmdBindIndexBuffer)
3908    ENQUEUE_CMD(CmdBindVertexBuffers2)
3909    ENQUEUE_CMD(CmdDraw)
3910    ENQUEUE_CMD(CmdDrawMultiEXT)
3911    ENQUEUE_CMD(CmdDrawIndexed)
3912    ENQUEUE_CMD(CmdDrawIndirect)
3913    ENQUEUE_CMD(CmdDrawIndexedIndirect)
3914    ENQUEUE_CMD(CmdDrawMultiIndexedEXT)
3915    ENQUEUE_CMD(CmdDispatch)
3916    ENQUEUE_CMD(CmdDispatchBase)
3917    ENQUEUE_CMD(CmdDispatchIndirect)
3918    ENQUEUE_CMD(CmdCopyBuffer2)
3919    ENQUEUE_CMD(CmdCopyImage2)
3920    ENQUEUE_CMD(CmdBlitImage2)
3921    ENQUEUE_CMD(CmdCopyBufferToImage2)
3922    ENQUEUE_CMD(CmdCopyImageToBuffer2)
3923    ENQUEUE_CMD(CmdUpdateBuffer)
3924    ENQUEUE_CMD(CmdFillBuffer)
3925    ENQUEUE_CMD(CmdClearColorImage)
3926    ENQUEUE_CMD(CmdClearDepthStencilImage)
3927    ENQUEUE_CMD(CmdClearAttachments)
3928    ENQUEUE_CMD(CmdResolveImage2)
3929    ENQUEUE_CMD(CmdBeginQueryIndexedEXT)
3930    ENQUEUE_CMD(CmdEndQueryIndexedEXT)
3931    ENQUEUE_CMD(CmdBeginQuery)
3932    ENQUEUE_CMD(CmdEndQuery)
3933    ENQUEUE_CMD(CmdResetQueryPool)
3934    ENQUEUE_CMD(CmdCopyQueryPoolResults)
3935    ENQUEUE_CMD(CmdPushConstants)
3936    ENQUEUE_CMD(CmdExecuteCommands)
3937    ENQUEUE_CMD(CmdDrawIndirectCount)
3938    ENQUEUE_CMD(CmdDrawIndexedIndirectCount)
3939    ENQUEUE_CMD(CmdPushDescriptorSetKHR)
3940 //   ENQUEUE_CMD(CmdPushDescriptorSetWithTemplateKHR)
3941    ENQUEUE_CMD(CmdBindTransformFeedbackBuffersEXT)
3942    ENQUEUE_CMD(CmdBeginTransformFeedbackEXT)
3943    ENQUEUE_CMD(CmdEndTransformFeedbackEXT)
3944    ENQUEUE_CMD(CmdDrawIndirectByteCountEXT)
3945    ENQUEUE_CMD(CmdBeginConditionalRenderingEXT)
3946    ENQUEUE_CMD(CmdEndConditionalRenderingEXT)
3947    ENQUEUE_CMD(CmdSetVertexInputEXT)
3948    ENQUEUE_CMD(CmdSetCullMode)
3949    ENQUEUE_CMD(CmdSetFrontFace)
3950    ENQUEUE_CMD(CmdSetPrimitiveTopology)
3951    ENQUEUE_CMD(CmdSetDepthTestEnable)
3952    ENQUEUE_CMD(CmdSetDepthWriteEnable)
3953    ENQUEUE_CMD(CmdSetDepthCompareOp)
3954    ENQUEUE_CMD(CmdSetDepthBoundsTestEnable)
3955    ENQUEUE_CMD(CmdSetStencilTestEnable)
3956    ENQUEUE_CMD(CmdSetStencilOp)
3957    ENQUEUE_CMD(CmdSetLineStippleEXT)
3958    ENQUEUE_CMD(CmdSetDepthBiasEnable)
3959    ENQUEUE_CMD(CmdSetLogicOpEXT)
3960    ENQUEUE_CMD(CmdSetPatchControlPointsEXT)
3961    ENQUEUE_CMD(CmdSetPrimitiveRestartEnable)
3962    ENQUEUE_CMD(CmdSetRasterizerDiscardEnable)
3963    ENQUEUE_CMD(CmdSetColorWriteEnableEXT)
3964    ENQUEUE_CMD(CmdBeginRendering)
3965    ENQUEUE_CMD(CmdEndRendering)
3966    ENQUEUE_CMD(CmdSetDeviceMask)
3967    ENQUEUE_CMD(CmdPipelineBarrier2)
3968    ENQUEUE_CMD(CmdResetEvent2)
3969    ENQUEUE_CMD(CmdSetEvent2)
3970    ENQUEUE_CMD(CmdWaitEvents2)
3971    ENQUEUE_CMD(CmdWriteTimestamp2)
3972
3973    ENQUEUE_CMD(CmdSetPolygonModeEXT)
3974    ENQUEUE_CMD(CmdSetTessellationDomainOriginEXT)
3975    ENQUEUE_CMD(CmdSetDepthClampEnableEXT)
3976    ENQUEUE_CMD(CmdSetDepthClipEnableEXT)
3977    ENQUEUE_CMD(CmdSetLogicOpEnableEXT)
3978    ENQUEUE_CMD(CmdSetSampleMaskEXT)
3979    ENQUEUE_CMD(CmdSetRasterizationSamplesEXT)
3980    ENQUEUE_CMD(CmdSetAlphaToCoverageEnableEXT)
3981    ENQUEUE_CMD(CmdSetAlphaToOneEnableEXT)
3982    ENQUEUE_CMD(CmdSetDepthClipNegativeOneToOneEXT)
3983    ENQUEUE_CMD(CmdSetLineRasterizationModeEXT)
3984    ENQUEUE_CMD(CmdSetLineStippleEnableEXT)
3985    ENQUEUE_CMD(CmdSetProvokingVertexModeEXT)
3986    ENQUEUE_CMD(CmdSetColorBlendEnableEXT)
3987    ENQUEUE_CMD(CmdSetColorBlendEquationEXT)
3988    ENQUEUE_CMD(CmdSetColorWriteMaskEXT)
3989
3990 #undef ENQUEUE_CMD
3991 }
3992
3993 static void lvp_execute_cmd_buffer(struct lvp_cmd_buffer *cmd_buffer,
3994                                    struct rendering_state *state)
3995 {
3996    struct vk_cmd_queue_entry *cmd;
3997    bool first = true;
3998    bool did_flush = false;
3999
4000    LIST_FOR_EACH_ENTRY(cmd, &cmd_buffer->vk.cmd_queue.cmds, cmd_link) {
4001       switch (cmd->type) {
4002       case VK_CMD_BIND_PIPELINE:
4003          handle_pipeline(cmd, state);
4004          break;
4005       case VK_CMD_SET_VIEWPORT:
4006          handle_set_viewport(cmd, state);
4007          break;
4008       case VK_CMD_SET_VIEWPORT_WITH_COUNT:
4009          handle_set_viewport_with_count(cmd, state);
4010          break;
4011       case VK_CMD_SET_SCISSOR:
4012          handle_set_scissor(cmd, state);
4013          break;
4014       case VK_CMD_SET_SCISSOR_WITH_COUNT:
4015          handle_set_scissor_with_count(cmd, state);
4016          break;
4017       case VK_CMD_SET_LINE_WIDTH:
4018          handle_set_line_width(cmd, state);
4019          break;
4020       case VK_CMD_SET_DEPTH_BIAS:
4021          handle_set_depth_bias(cmd, state);
4022          break;
4023       case VK_CMD_SET_BLEND_CONSTANTS:
4024          handle_set_blend_constants(cmd, state);
4025          break;
4026       case VK_CMD_SET_DEPTH_BOUNDS:
4027          handle_set_depth_bounds(cmd, state);
4028          break;
4029       case VK_CMD_SET_STENCIL_COMPARE_MASK:
4030          handle_set_stencil_compare_mask(cmd, state);
4031          break;
4032       case VK_CMD_SET_STENCIL_WRITE_MASK:
4033          handle_set_stencil_write_mask(cmd, state);
4034          break;
4035       case VK_CMD_SET_STENCIL_REFERENCE:
4036          handle_set_stencil_reference(cmd, state);
4037          break;
4038       case VK_CMD_BIND_DESCRIPTOR_SETS:
4039          handle_descriptor_sets(cmd, state);
4040          break;
4041       case VK_CMD_BIND_INDEX_BUFFER:
4042          handle_index_buffer(cmd, state);
4043          break;
4044       case VK_CMD_BIND_VERTEX_BUFFERS2:
4045          handle_vertex_buffers2(cmd, state);
4046          break;
4047       case VK_CMD_DRAW:
4048          emit_state(state);
4049          handle_draw(cmd, state);
4050          break;
4051       case VK_CMD_DRAW_MULTI_EXT:
4052          emit_state(state);
4053          handle_draw_multi(cmd, state);
4054          break;
4055       case VK_CMD_DRAW_INDEXED:
4056          emit_state(state);
4057          handle_draw_indexed(cmd, state);
4058          break;
4059       case VK_CMD_DRAW_INDIRECT:
4060          emit_state(state);
4061          handle_draw_indirect(cmd, state, false);
4062          break;
4063       case VK_CMD_DRAW_INDEXED_INDIRECT:
4064          emit_state(state);
4065          handle_draw_indirect(cmd, state, true);
4066          break;
4067       case VK_CMD_DRAW_MULTI_INDEXED_EXT:
4068          emit_state(state);
4069          handle_draw_multi_indexed(cmd, state);
4070          break;
4071       case VK_CMD_DISPATCH:
4072          emit_compute_state(state);
4073          handle_dispatch(cmd, state);
4074          break;
4075       case VK_CMD_DISPATCH_BASE:
4076          emit_compute_state(state);
4077          handle_dispatch_base(cmd, state);
4078          break;
4079       case VK_CMD_DISPATCH_INDIRECT:
4080          emit_compute_state(state);
4081          handle_dispatch_indirect(cmd, state);
4082          break;
4083       case VK_CMD_COPY_BUFFER2:
4084          handle_copy_buffer(cmd, state);
4085          break;
4086       case VK_CMD_COPY_IMAGE2:
4087          handle_copy_image(cmd, state);
4088          break;
4089       case VK_CMD_BLIT_IMAGE2:
4090          handle_blit_image(cmd, state);
4091          break;
4092       case VK_CMD_COPY_BUFFER_TO_IMAGE2:
4093          handle_copy_buffer_to_image(cmd, state);
4094          break;
4095       case VK_CMD_COPY_IMAGE_TO_BUFFER2:
4096          handle_copy_image_to_buffer2(cmd, state);
4097          break;
4098       case VK_CMD_UPDATE_BUFFER:
4099          handle_update_buffer(cmd, state);
4100          break;
4101       case VK_CMD_FILL_BUFFER:
4102          handle_fill_buffer(cmd, state);
4103          break;
4104       case VK_CMD_CLEAR_COLOR_IMAGE:
4105          handle_clear_color_image(cmd, state);
4106          break;
4107       case VK_CMD_CLEAR_DEPTH_STENCIL_IMAGE:
4108          handle_clear_ds_image(cmd, state);
4109          break;
4110       case VK_CMD_CLEAR_ATTACHMENTS:
4111          handle_clear_attachments(cmd, state);
4112          break;
4113       case VK_CMD_RESOLVE_IMAGE2:
4114          handle_resolve_image(cmd, state);
4115          break;
4116       case VK_CMD_PIPELINE_BARRIER2:
4117          /* skip flushes since every cmdbuf does a flush
4118             after iterating its cmds and so this is redundant
4119           */
4120          if (first || did_flush || cmd->cmd_link.next == &cmd_buffer->vk.cmd_queue.cmds)
4121             continue;
4122          handle_pipeline_barrier(cmd, state);
4123          did_flush = true;
4124          continue;
4125       case VK_CMD_BEGIN_QUERY_INDEXED_EXT:
4126          handle_begin_query_indexed_ext(cmd, state);
4127          break;
4128       case VK_CMD_END_QUERY_INDEXED_EXT:
4129          handle_end_query_indexed_ext(cmd, state);
4130          break;
4131       case VK_CMD_BEGIN_QUERY:
4132          handle_begin_query(cmd, state);
4133          break;
4134       case VK_CMD_END_QUERY:
4135          handle_end_query(cmd, state);
4136          break;
4137       case VK_CMD_RESET_QUERY_POOL:
4138          handle_reset_query_pool(cmd, state);
4139          break;
4140       case VK_CMD_COPY_QUERY_POOL_RESULTS:
4141          handle_copy_query_pool_results(cmd, state);
4142          break;
4143       case VK_CMD_PUSH_CONSTANTS:
4144          handle_push_constants(cmd, state);
4145          break;
4146       case VK_CMD_EXECUTE_COMMANDS:
4147          handle_execute_commands(cmd, state);
4148          break;
4149       case VK_CMD_DRAW_INDIRECT_COUNT:
4150          emit_state(state);
4151          handle_draw_indirect_count(cmd, state, false);
4152          break;
4153       case VK_CMD_DRAW_INDEXED_INDIRECT_COUNT:
4154          emit_state(state);
4155          handle_draw_indirect_count(cmd, state, true);
4156          break;
4157       case VK_CMD_PUSH_DESCRIPTOR_SET_KHR:
4158          handle_push_descriptor_set(cmd, state);
4159          break;
4160       case VK_CMD_PUSH_DESCRIPTOR_SET_WITH_TEMPLATE_KHR:
4161          handle_push_descriptor_set_with_template(cmd, state);
4162          break;
4163       case VK_CMD_BIND_TRANSFORM_FEEDBACK_BUFFERS_EXT:
4164          handle_bind_transform_feedback_buffers(cmd, state);
4165          break;
4166       case VK_CMD_BEGIN_TRANSFORM_FEEDBACK_EXT:
4167          handle_begin_transform_feedback(cmd, state);
4168          break;
4169       case VK_CMD_END_TRANSFORM_FEEDBACK_EXT:
4170          handle_end_transform_feedback(cmd, state);
4171          break;
4172       case VK_CMD_DRAW_INDIRECT_BYTE_COUNT_EXT:
4173          emit_state(state);
4174          handle_draw_indirect_byte_count(cmd, state);
4175          break;
4176       case VK_CMD_BEGIN_CONDITIONAL_RENDERING_EXT:
4177          handle_begin_conditional_rendering(cmd, state);
4178          break;
4179       case VK_CMD_END_CONDITIONAL_RENDERING_EXT:
4180          handle_end_conditional_rendering(state);
4181          break;
4182       case VK_CMD_SET_VERTEX_INPUT_EXT:
4183          handle_set_vertex_input(cmd, state);
4184          break;
4185       case VK_CMD_SET_CULL_MODE:
4186          handle_set_cull_mode(cmd, state);
4187          break;
4188       case VK_CMD_SET_FRONT_FACE:
4189          handle_set_front_face(cmd, state);
4190          break;
4191       case VK_CMD_SET_PRIMITIVE_TOPOLOGY:
4192          handle_set_primitive_topology(cmd, state);
4193          break;
4194       case VK_CMD_SET_DEPTH_TEST_ENABLE:
4195          handle_set_depth_test_enable(cmd, state);
4196          break;
4197       case VK_CMD_SET_DEPTH_WRITE_ENABLE:
4198          handle_set_depth_write_enable(cmd, state);
4199          break;
4200       case VK_CMD_SET_DEPTH_COMPARE_OP:
4201          handle_set_depth_compare_op(cmd, state);
4202          break;
4203       case VK_CMD_SET_DEPTH_BOUNDS_TEST_ENABLE:
4204          handle_set_depth_bounds_test_enable(cmd, state);
4205          break;
4206       case VK_CMD_SET_STENCIL_TEST_ENABLE:
4207          handle_set_stencil_test_enable(cmd, state);
4208          break;
4209       case VK_CMD_SET_STENCIL_OP:
4210          handle_set_stencil_op(cmd, state);
4211          break;
4212       case VK_CMD_SET_LINE_STIPPLE_EXT:
4213          handle_set_line_stipple(cmd, state);
4214          break;
4215       case VK_CMD_SET_DEPTH_BIAS_ENABLE:
4216          handle_set_depth_bias_enable(cmd, state);
4217          break;
4218       case VK_CMD_SET_LOGIC_OP_EXT:
4219          handle_set_logic_op(cmd, state);
4220          break;
4221       case VK_CMD_SET_PATCH_CONTROL_POINTS_EXT:
4222          handle_set_patch_control_points(cmd, state);
4223          break;
4224       case VK_CMD_SET_PRIMITIVE_RESTART_ENABLE:
4225          handle_set_primitive_restart_enable(cmd, state);
4226          break;
4227       case VK_CMD_SET_RASTERIZER_DISCARD_ENABLE:
4228          handle_set_rasterizer_discard_enable(cmd, state);
4229          break;
4230       case VK_CMD_SET_COLOR_WRITE_ENABLE_EXT:
4231          handle_set_color_write_enable(cmd, state);
4232          break;
4233       case VK_CMD_BEGIN_RENDERING:
4234          handle_begin_rendering(cmd, state);
4235          break;
4236       case VK_CMD_END_RENDERING:
4237          handle_end_rendering(cmd, state);
4238          break;
4239       case VK_CMD_SET_DEVICE_MASK:
4240          /* no-op */
4241          break;
4242       case VK_CMD_RESET_EVENT2:
4243          handle_event_reset2(cmd, state);
4244          break;
4245       case VK_CMD_SET_EVENT2:
4246          handle_event_set2(cmd, state);
4247          break;
4248       case VK_CMD_WAIT_EVENTS2:
4249          handle_wait_events2(cmd, state);
4250          break;
4251       case VK_CMD_WRITE_TIMESTAMP2:
4252          handle_write_timestamp2(cmd, state);
4253          break;
4254
4255       case VK_CMD_SET_POLYGON_MODE_EXT:
4256          handle_set_polygon_mode(cmd, state);
4257          break;
4258       case VK_CMD_SET_TESSELLATION_DOMAIN_ORIGIN_EXT:
4259          handle_set_tessellation_domain_origin(cmd, state);
4260          break;
4261       case VK_CMD_SET_DEPTH_CLAMP_ENABLE_EXT:
4262          handle_set_depth_clamp_enable(cmd, state);
4263          break;
4264       case VK_CMD_SET_DEPTH_CLIP_ENABLE_EXT:
4265          handle_set_depth_clip_enable(cmd, state);
4266          break;
4267       case VK_CMD_SET_LOGIC_OP_ENABLE_EXT:
4268          handle_set_logic_op_enable(cmd, state);
4269          break;
4270       case VK_CMD_SET_SAMPLE_MASK_EXT:
4271          handle_set_sample_mask(cmd, state);
4272          break;
4273       case VK_CMD_SET_RASTERIZATION_SAMPLES_EXT:
4274          handle_set_samples(cmd, state);
4275          break;
4276       case VK_CMD_SET_ALPHA_TO_COVERAGE_ENABLE_EXT:
4277          handle_set_alpha_to_coverage(cmd, state);
4278          break;
4279       case VK_CMD_SET_ALPHA_TO_ONE_ENABLE_EXT:
4280          handle_set_alpha_to_one(cmd, state);
4281          break;
4282       case VK_CMD_SET_DEPTH_CLIP_NEGATIVE_ONE_TO_ONE_EXT:
4283          handle_set_halfz(cmd, state);
4284          break;
4285       case VK_CMD_SET_LINE_RASTERIZATION_MODE_EXT:
4286          handle_set_line_rasterization_mode(cmd, state);
4287          break;
4288       case VK_CMD_SET_LINE_STIPPLE_ENABLE_EXT:
4289          handle_set_line_stipple_enable(cmd, state);
4290          break;
4291       case VK_CMD_SET_PROVOKING_VERTEX_MODE_EXT:
4292          handle_set_provoking_vertex_mode(cmd, state);
4293          break;
4294       case VK_CMD_SET_COLOR_BLEND_ENABLE_EXT:
4295          handle_set_color_blend_enable(cmd, state);
4296          break;
4297       case VK_CMD_SET_COLOR_WRITE_MASK_EXT:
4298          handle_set_color_write_mask(cmd, state);
4299          break;
4300       case VK_CMD_SET_COLOR_BLEND_EQUATION_EXT:
4301          handle_set_color_blend_equation(cmd, state);
4302          break;
4303
4304       default:
4305          fprintf(stderr, "Unsupported command %s\n", vk_cmd_queue_type_names[cmd->type]);
4306          unreachable("Unsupported command");
4307          break;
4308       }
4309       first = false;
4310       did_flush = false;
4311    }
4312 }
4313
4314 VkResult lvp_execute_cmds(struct lvp_device *device,
4315                           struct lvp_queue *queue,
4316                           struct lvp_cmd_buffer *cmd_buffer)
4317 {
4318    struct rendering_state *state = queue->state;
4319    memset(state, 0, sizeof(*state));
4320    state->pctx = queue->ctx;
4321    state->uploader = queue->uploader;
4322    state->cso = queue->cso;
4323    state->blend_dirty = true;
4324    state->dsa_dirty = true;
4325    state->rs_dirty = true;
4326    state->vp_dirty = true;
4327    state->rs_state.point_tri_clip = true;
4328    state->rs_state.unclamped_fragment_depth_values = device->vk.enabled_extensions.EXT_depth_range_unrestricted;
4329    state->sample_mask_dirty = true;
4330    state->min_samples_dirty = true;
4331    state->sample_mask = UINT32_MAX;
4332    for (enum pipe_shader_type s = PIPE_SHADER_VERTEX; s < PIPE_SHADER_TYPES; s++) {
4333       for (unsigned i = 0; i < ARRAY_SIZE(state->cso_ss_ptr[s]); i++)
4334          state->cso_ss_ptr[s][i] = &state->ss[s][i];
4335    }
4336    /* create a gallium context */
4337    lvp_execute_cmd_buffer(cmd_buffer, state);
4338
4339    state->start_vb = -1;
4340    state->num_vb = 0;
4341    cso_unbind_context(queue->cso);
4342    for (unsigned i = 0; i < ARRAY_SIZE(state->so_targets); i++) {
4343       if (state->so_targets[i]) {
4344          state->pctx->stream_output_target_destroy(state->pctx, state->so_targets[i]);
4345       }
4346    }
4347
4348    free(state->color_att);
4349    return VK_SUCCESS;
4350 }
4351
4352 size_t
4353 lvp_get_rendering_state_size(void)
4354 {
4355    return sizeof(struct rendering_state);
4356 }