return SkToBool(ConfigInfo::kBlitSrc_Flag & flags);
}
+ // Tells of if we can pass in straight GLSL string into vkCreateShaderModule
bool canUseGLSLForShaderModule() const {
return fCanUseGLSLForShaderModule;
}
+ // On Adreno vulkan, they do not respect the imageOffset parameter at least in
+ // copyImageToBuffer. This flag says that we must do the copy starting from the origin always.
bool mustDoCopiesFromOrigin() const {
return fMustDoCopiesFromOrigin;
}
+ // Check whether we support using draws for copies.
bool supportsCopiesAsDraws() const {
return fSupportsCopiesAsDraws;
}
+ // On Nvidia there is a current bug where we must the current command buffer before copy
+ // operations or else the copy will not happen. This includes copies, blits, resolves, and copy
+ // as draws.
bool mustSubmitCommandsBeforeCopyOp() const {
return fMustSubmitCommandsBeforeCopyOp;
}
+ // Sometimes calls to QueueWaitIdle return before actually signalling the fences
+ // on the command buffers even though they have completed. This causes an assert to fire when
+ // destroying the command buffers. Therefore we add a sleep to make sure the fence signals.
bool mustSleepOnTearDown() const {
return fMustSleepOnTearDown;
}
+ // Returns true if while adding commands to secondary command buffers, we must make a new
+ // secondary command buffer everytime we want to bind a new VkPipeline. This is to work around a
+ // driver bug specifically on AMD.
+ bool newSecondaryCBOnPipelineChange() const {
+ return fNewSecondaryCBOnPipelineChange;
+ }
+
/**
* Returns both a supported and most prefered stencil format to use in draws.
*/
StencilFormat fPreferedStencilFormat;
- // Tells of if we can pass in straight GLSL string into vkCreateShaderModule
bool fCanUseGLSLForShaderModule;
- // On Adreno vulkan, they do not respect the imageOffset parameter at least in
- // copyImageToBuffer. This flag says that we must do the copy starting from the origin always.
bool fMustDoCopiesFromOrigin;
- // Check whether we support using draws for copies.
bool fSupportsCopiesAsDraws;
- // On Nvidia there is a current bug where we must the current command buffer before copy
- // operations or else the copy will not happen. This includes copies, blits, resolves, and copy
- // as draws.
bool fMustSubmitCommandsBeforeCopyOp;
- // Sometimes calls to QueueWaitIdle return before actually signalling the fences
- // on the command buffers even though they have completed. This causes an assert to fire when
- // destroying the command buffers. Therefore we add a sleep to make sure the fence signals.
bool fMustSleepOnTearDown;
+ bool fNewSecondaryCBOnPipelineChange;
+
typedef GrCaps INHERITED;
};
const LoadAndStoreInfo& stencilInfo)
: fGpu(gpu)
, fRenderTarget(nullptr)
- , fClearColor(GrColor4f::FromGrColor(colorInfo.fClearColor)){
+ , fClearColor(GrColor4f::FromGrColor(colorInfo.fClearColor))
+ , fLastPipelineState(nullptr) {
get_vk_load_store_ops(colorInfo, &fVkColorLoadOp, &fVkColorStoreOp);
get_vk_load_store_ops(stencilInfo, &fVkStencilLoadOp, &fVkStencilStoreOp);
- fCurrentCmdBuffer = -1;
+ fCurrentCmdInfo = -1;
}
void GrVkGpuCommandBuffer::init(GrVkRenderTarget* target) {
CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
SkASSERT(fCommandBufferInfos.count() == 1);
- fCurrentCmdBuffer = 0;
+ fCurrentCmdInfo = 0;
const GrVkResourceProvider::CompatibleRPHandle& rpHandle = target->compatibleRenderPassHandle();
if (rpHandle.isValid()) {
cbInfo.fIsEmpty = true;
cbInfo.fStartsWithClear = false;
- cbInfo.fCommandBuffer = fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer();
- cbInfo.fCommandBuffer->begin(fGpu, target->framebuffer(), cbInfo.fRenderPass);
+ cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
+ cbInfo.currentCmdBuf()->begin(fGpu, target->framebuffer(), cbInfo.fRenderPass);
}
GrVkGpuCommandBuffer::~GrVkGpuCommandBuffer() {
for (int i = 0; i < fCommandBufferInfos.count(); ++i) {
CommandBufferInfo& cbInfo = fCommandBufferInfos[i];
- cbInfo.fCommandBuffer->unref(fGpu);
+ for (int j = 0; j < cbInfo.fCommandBuffers.count(); ++j) {
+ cbInfo.fCommandBuffers[j]->unref(fGpu);
+ }
cbInfo.fRenderPass->unref(fGpu);
}
}
GrRenderTarget* GrVkGpuCommandBuffer::renderTarget() { return fRenderTarget; }
void GrVkGpuCommandBuffer::end() {
- if (fCurrentCmdBuffer >= 0) {
- fCommandBufferInfos[fCurrentCmdBuffer].fCommandBuffer->end(fGpu);
+ if (fCurrentCmdInfo >= 0) {
+ fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
}
}
SkIRect iBounds;
cbInfo.fBounds.roundOut(&iBounds);
- fGpu->submitSecondaryCommandBuffer(cbInfo.fCommandBuffer, cbInfo.fRenderPass,
+ fGpu->submitSecondaryCommandBuffer(cbInfo.fCommandBuffers, cbInfo.fRenderPass,
&cbInfo.fColorClearValue, fRenderTarget, iBounds);
}
}
}
SkASSERT(target == fRenderTarget);
- CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdBuffer];
+ CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
if (cbInfo.fIsEmpty) {
// We will change the render pass to do a clear load instead
GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_DONT_CARE,
}
SkASSERT(target == fRenderTarget);
- CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdBuffer];
+ CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment();
// this should only be called internally when we know we have a
attachment.colorAttachment = 0; // this value shouldn't matter
attachment.clearValue.depthStencil = vkStencilColor;
- cbInfo.fCommandBuffer->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
+ cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
cbInfo.fIsEmpty = false;
// Update command buffer bounds
}
SkASSERT(target == fRenderTarget);
- CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdBuffer];
+ CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
VkClearColorValue vkColor;
GrColorToRGBAFloat(color, vkColor.float32);
attachment.colorAttachment = colorIndex;
attachment.clearValue.color = vkColor;
- cbInfo.fCommandBuffer->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
+ cbInfo.currentCmdBuf()->clearAttachments(fGpu, 1, &attachment, 1, &clearRect);
cbInfo.fIsEmpty = false;
// Update command buffer bounds
}
void GrVkGpuCommandBuffer::addAdditionalCommandBuffer() {
- fCommandBufferInfos[fCurrentCmdBuffer].fCommandBuffer->end(fGpu);
+ CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
+ cbInfo.currentCmdBuf()->end(fGpu);
+ cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
+ cbInfo.currentCmdBuf()->begin(fGpu, fRenderTarget->framebuffer(), cbInfo.fRenderPass);
+}
+
+void GrVkGpuCommandBuffer::addAdditionalRenderPass() {
+ fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf()->end(fGpu);
CommandBufferInfo& cbInfo = fCommandBufferInfos.push_back();
- fCurrentCmdBuffer++;
+ fCurrentCmdInfo++;
GrVkRenderPass::LoadStoreOps vkColorOps(VK_ATTACHMENT_LOAD_OP_LOAD,
VK_ATTACHMENT_STORE_OP_STORE);
vkStencilOps);
}
- cbInfo.fCommandBuffer = fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer();
+ cbInfo.fCommandBuffers.push_back(fGpu->resourceProvider().findOrCreateSecondaryCommandBuffer());
// It shouldn't matter what we set the clear color to here since we will assume loading of the
// attachment.
memset(&cbInfo.fColorClearValue, 0, sizeof(VkClearValue));
cbInfo.fIsEmpty = true;
cbInfo.fStartsWithClear = false;
- cbInfo.fCommandBuffer->begin(fGpu, fRenderTarget->framebuffer(), cbInfo.fRenderPass);
+ cbInfo.currentCmdBuf()->begin(fGpu, fRenderTarget->framebuffer(), cbInfo.fRenderPass);
}
void GrVkGpuCommandBuffer::inlineUpload(GrOpFlushState* state, GrDrawOp::DeferredUploadFn& upload,
if (!fRenderTarget) {
this->init(target);
}
- if (!fCommandBufferInfos[fCurrentCmdBuffer].fIsEmpty) {
- this->addAdditionalCommandBuffer();
+ if (!fCommandBufferInfos[fCurrentCmdInfo].fIsEmpty) {
+ this->addAdditionalRenderPass();
}
- fCommandBufferInfos[fCurrentCmdBuffer].fPreDrawUploads.emplace_back(state, upload);
+ fCommandBufferInfos[fCurrentCmdInfo].fPreDrawUploads.emplace_back(state, upload);
}
////////////////////////////////////////////////////////////////////////////////
void GrVkGpuCommandBuffer::bindGeometry(const GrPrimitiveProcessor& primProc,
const GrNonInstancedMesh& mesh) {
- CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdBuffer];
+ CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
// There is no need to put any memory barriers to make sure host writes have finished here.
// When a command buffer is submitted to a queue, there is an implicit memory barrier that
// occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
SkASSERT(vbuf);
SkASSERT(!vbuf->isMapped());
- cbInfo.fCommandBuffer->bindVertexBuffer(fGpu, vbuf);
+ cbInfo.currentCmdBuf()->bindVertexBuffer(fGpu, vbuf);
if (mesh.isIndexed()) {
SkASSERT(!mesh.indexBuffer()->isCPUBacked());
SkASSERT(ibuf);
SkASSERT(!ibuf->isMapped());
- cbInfo.fCommandBuffer->bindIndexBuffer(fGpu, ibuf);
+ cbInfo.currentCmdBuf()->bindIndexBuffer(fGpu, ibuf);
}
}
const GrPipeline& pipeline,
const GrPrimitiveProcessor& primProc,
GrPrimitiveType primitiveType) {
- CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdBuffer];
+ CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
+ SkASSERT(cbInfo.fRenderPass);
sk_sp<GrVkPipelineState> pipelineState =
fGpu->resourceProvider().findOrCreateCompatiblePipelineState(pipeline,
return pipelineState;
}
+ if (!cbInfo.fIsEmpty &&
+ fLastPipelineState && fLastPipelineState != pipelineState.get() &&
+ fGpu->vkCaps().newSecondaryCBOnPipelineChange()) {
+ this->addAdditionalCommandBuffer();
+ }
+ fLastPipelineState = pipelineState.get();
+
pipelineState->setData(fGpu, primProc, pipeline);
- pipelineState->bind(fGpu, cbInfo.fCommandBuffer);
+ pipelineState->bind(fGpu, cbInfo.currentCmdBuf());
- GrVkPipeline::SetDynamicState(fGpu, cbInfo.fCommandBuffer, pipeline);
+ GrVkPipeline::SetDynamicState(fGpu, cbInfo.currentCmdBuf(), pipeline);
return pipelineState;
}
if (!meshCount) {
return;
}
- CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdBuffer];
- SkASSERT(cbInfo.fRenderPass);
-
prepare_sampled_images(primProc, fGpu);
GrFragmentProcessor::Iter iter(pipeline);
while (const GrFragmentProcessor* fp = iter.next()) {
return;
}
+ CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
+
for (int i = 0; i < meshCount; ++i) {
const GrMesh& mesh = meshes[i];
GrMesh::Iterator iter;
this->bindGeometry(primProc, *nonIdxMesh);
if (nonIdxMesh->isIndexed()) {
- cbInfo.fCommandBuffer->drawIndexed(fGpu,
+ cbInfo.currentCmdBuf()->drawIndexed(fGpu,
nonIdxMesh->indexCount(),
1,
nonIdxMesh->startIndex(),
nonIdxMesh->startVertex(),
0);
} else {
- cbInfo.fCommandBuffer->draw(fGpu,
+ cbInfo.currentCmdBuf()->draw(fGpu,
nonIdxMesh->vertexCount(),
1,
nonIdxMesh->startVertex(),