return SkToBool(fMapPtr);
}
-bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes) {
+bool GrVkBuffer::vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
+ bool* createdNewBuffer) {
SkASSERT(!this->vkIsMapped());
VALIDATE();
if (srcSizeInBytes > fDesc.fSizeInBytes) {
// in use by the command buffer, so we need to create a new one
fResource->unref(gpu);
fResource = Create(gpu, fDesc);
+ if (createdNewBuffer) {
+ *createdNewBuffer = true;
+ }
}
void* mapPtr;
void* vkMap(const GrVkGpu* gpu);
void vkUnmap(const GrVkGpu* gpu);
- bool vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes);
+ // If the caller passes in a non null createdNewBuffer, this function will set the bool to true
+ // if it creates a new VkBuffer to upload the data to.
+ bool vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
+ bool* createdNewBuffer = nullptr);
void vkAbandon();
void vkRelease(const GrVkGpu* gpu);
this->writeSamplers(gpu, textureBindings);
}
-
if (fVertexUniformBuffer.get() || fFragmentUniformBuffer.get()) {
- fUniformPoolManager.getNewDescriptorSet(gpu,
+ if (fDataManager.uploadUniformBuffers(gpu, fVertexUniformBuffer, fFragmentUniformBuffer) ||
+ VK_NULL_HANDLE == fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet]) {
+ fUniformPoolManager.getNewDescriptorSet(gpu,
&fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet]);
- this->writeUniformBuffers(gpu);
+ this->writeUniformBuffers(gpu);
+ }
}
}
void GrVkPipelineState::writeUniformBuffers(const GrVkGpu* gpu) {
- fDataManager.uploadUniformBuffers(gpu, fVertexUniformBuffer, fFragmentUniformBuffer);
-
VkWriteDescriptorSet descriptorWrites[2];
memset(descriptorWrites, 0, 2 * sizeof(VkWriteDescriptorSet));
}
};
-void GrVkPipelineStateDataManager::uploadUniformBuffers(const GrVkGpu* gpu,
+bool GrVkPipelineStateDataManager::uploadUniformBuffers(const GrVkGpu* gpu,
GrVkUniformBuffer* vertexBuffer,
GrVkUniformBuffer* fragmentBuffer) const {
+ bool updatedBuffer = false;
if (vertexBuffer && fVertexUniformsDirty) {
vertexBuffer->addMemoryBarrier(gpu,
VK_ACCESS_UNIFORM_READ_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
VK_PIPELINE_STAGE_HOST_BIT,
false);
- SkAssertResult(vertexBuffer->updateData(gpu, fVertexUniformData.get(), fVertexUniformSize));
+ SkAssertResult(vertexBuffer->updateData(gpu, fVertexUniformData.get(), fVertexUniformSize,
+ &updatedBuffer));
fVertexUniformsDirty = false;
}
VK_PIPELINE_STAGE_HOST_BIT,
false);
SkAssertResult(fragmentBuffer->updateData(gpu, fFragmentUniformData.get(),
- fFragmentUniformSize));
+ fFragmentUniformSize, &updatedBuffer));
fFragmentUniformsDirty = false;
}
+ return updatedBuffer;
}
SkFAIL("Only supported in NVPR, which is not in vulkan");
}
- void uploadUniformBuffers(const GrVkGpu* gpu,
+ // Returns true if either the vertex or fragment buffer needed to generate a new underlying
+ // VkBuffer object in order upload data. If true is returned, this is a signal to the caller
+ // that they will need to update the descriptor set that is using these buffers.
+ bool uploadUniformBuffers(const GrVkGpu* gpu,
GrVkUniformBuffer* vertexBuffer,
GrVkUniformBuffer* fragmentBuffer) const;
private:
void unmap(const GrVkGpu* gpu) {
this->vkUnmap(gpu);
}
- bool updateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes) {
- return this->vkUpdateData(gpu, src, srcSizeInBytes);
+ // The output variable createdNewBuffer must be set to true if a new VkBuffer is created in
+ // order to upload the data
+ bool updateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
+ bool* createdNewBuffer) {
+ return this->vkUpdateData(gpu, src, srcSizeInBytes, createdNewBuffer);
}
void release(const GrVkGpu* gpu) {
this->vkRelease(gpu);