};
/**
+ * Barriers for blending. When a shader reads the dst directly, an Xfer barrier is sometimes
+ * required after a pixel has been written, before it can be safely read again.
+ */
+enum GrXferBarrierType {
+ kTexture_GrXferBarrierType, //<! Required when a shader reads and renders to the same texture.
+};
+
+/**
* GrXferProcessor is responsible for implementing the xfer mode that blends the src color and dst
* color. It does this by emitting fragment shader code and controlling the fixed-function blend
* state. The inputs to its shader code are the final computed src color and fractional pixel
GrColor* overrideColor,
const GrDrawTargetCaps& caps) = 0;
+ /**
+ * Returns whether this XP will require an Xfer barrier on the given rt. If true, outBarrierType
+ * is updated to contain the type of barrier needed.
+ */
+ bool willNeedXferBarrier(const GrRenderTarget* rt,
+ const GrDrawTargetCaps& caps,
+ GrXferBarrierType* outBarrierType) const;
+
struct BlendInfo {
void reset() {
fSrcBlend = kOne_GrBlendCoeff;
if (!pipelineBuilder.willXPNeedDstCopy(*this->caps(), colorPOI, coveragePOI)) {
return true;
}
- SkIRect copyRect;
+
GrRenderTarget* rt = pipelineBuilder.getRenderTarget();
+
+ if (this->caps()->textureBarrierSupport()) {
+ if (GrTexture* rtTex = rt->asTexture()) {
+ // The render target is a texture, se we can read from it directly in the shader. The XP
+ // will be responsible to detect this situation and request a texture barrier.
+ dstCopy->setTexture(rtTex);
+ dstCopy->setOffset(0, 0);
+ return true;
+ }
+ }
+
+ SkIRect copyRect;
pipelineBuilder.clip().getConservativeBounds(rt, ©Rect);
if (drawBounds) {
const SkIRect& srcRect,
const SkIPoint& dstPoint) = 0;
+ // Called before certain draws in order to guarantee coherent results from dst reads.
+ virtual void xferBarrier(GrXferBarrierType) = 0;
+
struct DrawArgs {
typedef GrDrawTarget::DrawInfo DrawInfo;
DrawArgs(const GrPrimitiveProcessor* primProc,
gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint);
}
+void GrTargetCommands::XferBarrier::execute(GrGpu* gpu, const SetState* state) {
+ gpu->xferBarrier(fBarrierType);
+}
+
GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrSurface* dst,
GrSurface* src,
const SkIRect& srcRect,
fPrevState = ss;
iodb->recordTraceMarkersIfNecessary(ss);
}
+
+ this->recordXferBarrierIfNecessary(iodb, pipelineInfo);
return true;
}
fPrevState = ss;
iodb->recordTraceMarkersIfNecessary(ss);
}
+
+ this->recordXferBarrierIfNecessary(iodb, pipelineInfo);
return true;
}
+void GrTargetCommands::recordXferBarrierIfNecessary(GrInOrderDrawBuffer* iodb,
+ const GrDrawTarget::PipelineInfo& info) {
+ SkASSERT(fPrevState);
+ const GrXferProcessor& xp = *fPrevState->getXferProcessor();
+ GrRenderTarget* rt = fPrevState->getRenderTarget();
+
+ GrXferBarrierType barrierType;
+ if (!xp.willNeedXferBarrier(rt, *iodb->caps(), &barrierType)) {
+ return;
+ }
+
+ XferBarrier* xb = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, XferBarrier, ());
+ xb->fBarrierType = barrierType;
+
+ iodb->recordTraceMarkersIfNecessary(xb);
+}
+
kDrawPath_CmdType = 6,
kDrawPaths_CmdType = 7,
kDrawBatch_CmdType = 8,
+ kXferBarrier_CmdType = 9,
};
Cmd(CmdType type) : fMarkerID(-1), fType(type) {}
GrBatch*,
const GrDrawTarget::PipelineInfo&);
+ void recordXferBarrierIfNecessary(GrInOrderDrawBuffer*, const GrDrawTarget::PipelineInfo&);
+
struct Draw : public Cmd {
Draw(const GrDrawTarget::DrawInfo& info) : Cmd(kDraw_CmdType), fInfo(info) {}
const GrPipeline* getPipeline() const {
return reinterpret_cast<const GrPipeline*>(fPipeline.get());
}
+ GrRenderTarget* getRenderTarget() const {
+ return this->getPipeline()->getRenderTarget();
+ }
+ const GrXferProcessor* getXferProcessor() const {
+ return this->getPipeline()->getXferProcessor();
+ }
void execute(GrGpu*, const SetState*) override;
GrBatchTarget* fBatchTarget;
};
+ struct XferBarrier : public Cmd {
+ XferBarrier() : Cmd(kXferBarrier_CmdType) {}
+
+ void execute(GrGpu*, const SetState*) override;
+
+ GrXferBarrierType fBarrierType;
+ };
+
static const int kCmdBufferInitialSizeInBytes = 8 * 1024;
typedef void* TCmdAlign; // This wouldn't be enough align if a command used long double.
return false;
}
+ void xferBarrier(GrXferBarrierType) override {}
+
private:
void onResetContext(uint32_t resetBits) override {}
this->onGetGLProcessorKey(caps, b);
}
+bool GrXferProcessor::willNeedXferBarrier(const GrRenderTarget* rt,
+ const GrDrawTargetCaps& caps,
+ GrXferBarrierType* outBarrierType) const {
+ if (static_cast<const GrSurface*>(rt) == this->getDstCopyTexture()) {
+ // Texture barriers are required when a shader reads and renders to the same texture.
+ SkASSERT(rt);
+ SkASSERT(caps.textureBarrierSupport());
+ *outBarrierType = kTexture_GrXferBarrierType;
+ return true;
+ }
+ return false;
+}
+
///////////////////////////////////////////////////////////////////////////////
GrXferProcessor* GrXPFactory::createXferProcessor(const GrProcOptInfo& colorPOI,
return false;
}
+void GrGLGpu::xferBarrier(GrXferBarrierType type) {
+ switch (type) {
+ case kTexture_GrXferBarrierType:
+ SkASSERT(this->caps()->textureBarrierSupport());
+ GL_CALL(TextureBarrier());
+ return;
+ }
+}
+
void GrGLGpu::didAddGpuTraceMarker() {
if (this->caps()->gpuTracingSupport()) {
const GrTraceMarkerSet& markerArray = this->getActiveTraceMarkers();
const SkIRect& srcRect,
const SkIPoint& dstPoint) override;
+ void xferBarrier(GrXferBarrierType) override;
+
void buildProgramDesc(GrProgramDesc*,
const GrPrimitiveProcessor&,
const GrPipeline&,
if (pipeline.mustSkip()) {
continue;
}
+
+ GrXferBarrierType barrierType;
+ if (pipeline.getXferProcessor()->willNeedXferBarrier(rt, *gpu->caps(), &barrierType)) {
+ gpu->xferBarrier(barrierType);
+ }
+
GrBatchTracker bt;
primProc->initBatchTracker(&bt, pipeline.getInitBatchTracker());