lavapipe: BindIndexBuffer2
authorMike Blumenkrantz <michael.blumenkrantz@gmail.com>
Wed, 9 Aug 2023 19:25:52 +0000 (15:25 -0400)
committerMarge Bot <emma+marge@anholt.net>
Thu, 10 Aug 2023 01:56:03 +0000 (01:56 +0000)
Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/24589>

src/gallium/frontends/lavapipe/lvp_execute.c

index 04a8520..9ecbb70 100644 (file)
@@ -136,6 +136,7 @@ struct rendering_state {
    uint8_t patch_vertices;
    uint8_t index_size;
    unsigned index_offset;
+   unsigned index_buffer_size; //UINT32_MAX for unset
    struct pipe_resource *index_buffer;
    struct pipe_constant_buffer const_buffer[LVP_SHADER_STAGES][16];
    struct lvp_descriptor_set *desc_sets[2][MAX_SETS];
@@ -2476,7 +2477,7 @@ static void handle_draw_indexed(struct vk_cmd_queue_entry *cmd,
    if (state->info.primitive_restart)
       state->info.restart_index = util_prim_restart_index_from_size(state->info.index_size);
 
-   draw.count = cmd->u.draw_indexed.index_count;
+   draw.count = MIN2(cmd->u.draw_indexed.index_count, state->index_buffer_size / state->index_size);
    draw.index_bias = cmd->u.draw_indexed.vertex_offset;
    /* TODO: avoid calculating multiple times if cmdbuf is submitted again */
    draw.start = util_clamped_uadd(state->index_offset / state->index_size,
@@ -2507,6 +2508,10 @@ static void handle_draw_multi_indexed(struct vk_cmd_queue_entry *cmd,
 
    unsigned size = cmd->u.draw_multi_indexed_ext.draw_count * sizeof(struct pipe_draw_start_count_bias);
    memcpy(draws, cmd->u.draw_multi_indexed_ext.index_info, size);
+   if (state->index_buffer_size != UINT32_MAX) {
+      for (unsigned i = 0; i < cmd->u.draw_multi_indexed_ext.draw_count; i++)
+         draws[i].count = MIN2(draws[i].count, state->index_buffer_size / state->index_size - draws[i].start);
+   }
 
    /* only the first member is read if index_bias_varies is true */
    if (cmd->u.draw_multi_indexed_ext.draw_count &&
@@ -2538,12 +2543,12 @@ static void handle_draw_indirect(struct vk_cmd_queue_entry *cmd,
       state->info.max_index = ~0U;
       if (state->info.primitive_restart)
          state->info.restart_index = util_prim_restart_index_from_size(state->info.index_size);
-      if (state->index_offset) {
+      if (state->index_offset || state->index_buffer_size != UINT32_MAX) {
          struct pipe_transfer *xfer;
          uint8_t *mem = pipe_buffer_map(state->pctx, state->index_buffer, 0, &xfer);
          state->pctx->buffer_unmap(state->pctx, xfer);
          index = get_buffer_resource(state->pctx, mem + state->index_offset);
-         index->width0 = state->index_buffer->width0 - state->index_offset;
+         index->width0 = MIN2(state->index_buffer->width0 - state->index_offset, state->index_buffer_size);
          state->info.index.resource = index;
       }
    } else
@@ -2562,6 +2567,25 @@ static void handle_index_buffer(struct vk_cmd_queue_entry *cmd,
 {
    struct vk_cmd_bind_index_buffer *ib = &cmd->u.bind_index_buffer;
    state->index_size = vk_index_type_to_bytes(ib->index_type);
+   state->index_buffer_size = UINT32_MAX;
+
+   if (ib->buffer) {
+      state->index_offset = ib->offset;
+      state->index_buffer = lvp_buffer_from_handle(ib->buffer)->bo;
+   } else {
+      state->index_offset = 0;
+      state->index_buffer = state->device->zero_buffer;
+   }
+
+   state->ib_dirty = true;
+}
+
+static void handle_index_buffer2(struct vk_cmd_queue_entry *cmd,
+                                 struct rendering_state *state)
+{
+   struct vk_cmd_bind_index_buffer2_khr *ib = &cmd->u.bind_index_buffer2_khr;
+   state->index_size = vk_index_type_to_bytes(ib->index_type);
+   state->index_buffer_size = ib->size;
 
    if (ib->buffer) {
       state->index_offset = ib->offset;
@@ -3054,12 +3078,12 @@ static void handle_draw_indirect_count(struct vk_cmd_queue_entry *cmd,
       state->info.index_size = state->index_size;
       state->info.index.resource = state->index_buffer;
       state->info.max_index = ~0U;
-      if (state->index_offset) {
+      if (state->index_offset || state->index_buffer_size != UINT32_MAX) {
          struct pipe_transfer *xfer;
          uint8_t *mem = pipe_buffer_map(state->pctx, state->index_buffer, 0, &xfer);
          state->pctx->buffer_unmap(state->pctx, xfer);
          index = get_buffer_resource(state->pctx, mem + state->index_offset);
-         index->width0 = state->index_buffer->width0 - state->index_offset;
+         index->width0 = MIN2(state->index_buffer->width0 - state->index_offset, state->index_buffer_size);
          state->info.index.resource = index;
       }
    } else
@@ -4106,6 +4130,7 @@ void lvp_add_enqueue_cmd_entrypoints(struct vk_device_dispatch_table *disp)
    ENQUEUE_CMD(CmdSetStencilReference)
    ENQUEUE_CMD(CmdBindDescriptorSets)
    ENQUEUE_CMD(CmdBindIndexBuffer)
+   ENQUEUE_CMD(CmdBindIndexBuffer2KHR)
    ENQUEUE_CMD(CmdBindVertexBuffers2)
    ENQUEUE_CMD(CmdDraw)
    ENQUEUE_CMD(CmdDrawMultiEXT)
@@ -4268,6 +4293,9 @@ static void lvp_execute_cmd_buffer(struct list_head *cmds,
       case VK_CMD_BIND_INDEX_BUFFER:
          handle_index_buffer(cmd, state);
          break;
+      case VK_CMD_BIND_INDEX_BUFFER2_KHR:
+         handle_index_buffer2(cmd, state);
+         break;
       case VK_CMD_BIND_VERTEX_BUFFERS2:
          handle_vertex_buffers2(cmd, state);
          break;