.depth_compare_op = 0,
.depth_bounds_test_enable = 0,
.stencil_test_enable = 0,
+ .dyn_vbo_stride = 0,
+ .dyn_vbo_size = 0,
};
/**
ANV_CMP_COPY(stencil_op.back.compare_op, ANV_CMD_DIRTY_DYNAMIC_STENCIL_OP);
}
+ ANV_CMP_COPY(dyn_vbo_stride, ANV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE);
+ ANV_CMP_COPY(dyn_vbo_size, ANV_CMD_DIRTY_DYNAMIC_VERTEX_INPUT_BINDING_STRIDE);
+
#undef ANV_CMP_COPY
return changed;
}
}
-void anv_CmdBindVertexBuffers(
- VkCommandBuffer commandBuffer,
- uint32_t firstBinding,
- uint32_t bindingCount,
- const VkBuffer* pBuffers,
- const VkDeviceSize* pOffsets)
+void anv_CmdBindVertexBuffers2EXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets,
+ const VkDeviceSize* pSizes,
+ const VkDeviceSize* pStrides)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
/* We have to defer setting up vertex buffer since we need the buffer
* stride from the pipeline. */
+ if (pSizes)
+ cmd_buffer->state.gfx.dynamic.dyn_vbo_size = true;
+ if (pStrides)
+ cmd_buffer->state.gfx.dynamic.dyn_vbo_stride = true;
+
assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
vb[firstBinding + i].offset = pOffsets[i];
+ vb[firstBinding + i].size = pSizes ? pSizes[i] : 0;
+ vb[firstBinding + i].stride = pStrides ? pStrides[i] : 0;
cmd_buffer->state.gfx.vb_dirty |= 1 << (firstBinding + i);
}
}
+void anv_CmdBindVertexBuffers(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets)
+{
+ return anv_CmdBindVertexBuffers2EXT(commandBuffer, firstBinding,
+ bindingCount, pBuffers, pOffsets,
+ NULL, NULL);
+}
+
void anv_CmdBindTransformFeedbackBuffersEXT(
VkCommandBuffer commandBuffer,
uint32_t firstBinding,
struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
+ /* If dynamic, use stride/size from vertex binding, otherwise use
+ * stride/size that was setup in the pipeline object.
+ */
+ bool dynamic_stride = cmd_buffer->state.gfx.dynamic.dyn_vbo_stride;
+ bool dynamic_size = cmd_buffer->state.gfx.dynamic.dyn_vbo_size;
+
+ uint32_t stride = dynamic_stride ?
+ cmd_buffer->state.vertex_bindings[vb].stride : pipeline->vb[vb].stride;
+ uint32_t size = dynamic_size ?
+ cmd_buffer->state.vertex_bindings[vb].size : buffer->size;
+
struct GENX(VERTEX_BUFFER_STATE) state;
if (buffer) {
state = (struct GENX(VERTEX_BUFFER_STATE)) {
.BufferAccessType = pipeline->vb[vb].instanced ? INSTANCEDATA : VERTEXDATA,
.InstanceDataStepRate = pipeline->vb[vb].instance_divisor,
#endif
-
.AddressModifyEnable = true,
- .BufferPitch = pipeline->vb[vb].stride,
+ .BufferPitch = stride,
.BufferStartingAddress = anv_address_add(buffer->address, offset),
.NullVertexBuffer = offset >= buffer->size,
#if GEN_GEN >= 8
- .BufferSize = buffer->size - offset
+ .BufferSize = size - offset
#else
- .EndAddress = anv_address_add(buffer->address, buffer->size - 1),
+ .EndAddress = anv_address_add(buffer->address, size - 1),
#endif
};
} else {