};
}
-int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchClassID;
+int32_t GrBatch::gCurrBatchClassID = GrBatch::kIllegalBatchID;
+
+GrBATCH_SPEW(int32_t GrBatch::gCurrBatchUniqueID = GrBatch::kIllegalBatchID;)
void* GrBatch::operator new(size_t size) {
return MemoryPoolAccessor().pool()->allocate(size);
* the draw, ie whether or not the GrBatch is allowed to tweak alpha for coverage, then this
* information will be communicated to the GrBatch prior to geometry generation.
*/
+#define GR_BATCH_SPEW 0
+#if GR_BATCH_SPEW
+ #define GrBATCH_INFO(...) SkDebugf(__VA_ARGS__)
+ #define GrBATCH_SPEW(code) code
+#else
+ #define GrBATCH_SPEW(code)
+ #define GrBATCH_INFO(...)
+#endif
class GrBatch : public GrNonAtomicRef {
public:
- GrBatch() : fClassID(kIllegalBatchClassID), fNumberOfDraws(0) { SkDEBUGCODE(fUsed = false;) }
+ GrBatch()
+ : fClassID(kIllegalBatchID)
+ , fNumberOfDraws(0)
+#if GR_BATCH_SPEW
+ , fUniqueID(GenID(&gCurrBatchUniqueID))
+#endif
+ { SkDEBUGCODE(fUsed = false;) }
virtual ~GrBatch() {}
virtual const char* name() const = 0;
template <typename T> const T& cast() const { return *static_cast<const T*>(this); }
template <typename T> T* cast() { return static_cast<T*>(this); }
- uint32_t classID() const { SkASSERT(kIllegalBatchClassID != fClassID); return fClassID; }
+ uint32_t classID() const { SkASSERT(kIllegalBatchID != fClassID); return fClassID; }
// TODO no GrPrimitiveProcessors yet read fragment position
bool willReadFragmentPosition() const { return false; }
const GrPipeline* pipeline() const { return fPipeline; }
void setPipeline(const GrPipeline* pipeline) { fPipeline.reset(SkRef(pipeline)); }
+#if GR_BATCH_SPEW
+ uint32_t uniqueID() const { return fUniqueID; }
+#endif
+
protected:
template <typename PROC_SUBCLASS> void initClassID() {
- static uint32_t kClassID = GenClassID();
+ static uint32_t kClassID = GenID(&gCurrBatchClassID);
fClassID = kClassID;
}
SkRect fBounds;
private:
- static uint32_t GenClassID() {
+ static uint32_t GenID(int32_t* idCounter) {
// fCurrProcessorClassID has been initialized to kIllegalProcessorClassID. The
// atomic inc returns the old value not the incremented value. So we add
// 1 to the returned value.
- uint32_t id = static_cast<uint32_t>(sk_atomic_inc(&gCurrBatchClassID)) + 1;
+ uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
if (!id) {
SkFAIL("This should never wrap as it should only be called once for each GrBatch "
"subclass.");
}
enum {
- kIllegalBatchClassID = 0,
+ kIllegalBatchID = 0,
};
SkAutoTUnref<const GrPipeline> fPipeline;
static int32_t gCurrBatchClassID;
int fNumberOfDraws;
SkDEBUGCODE(bool fUsed;)
+#if GR_BATCH_SPEW
+ static int32_t gCurrBatchUniqueID;
+ uint32_t fUniqueID;
+#endif
typedef SkRefCnt INHERITED;
};
GrColorIsPMAssert(color);
clr->fColor = color;
clr->fRect = rect;
+ GrBATCH_INFO("Recording clear %d\n", clr->uniqueID());
return clr;
}
(renderTarget));
clr->fRect = rect;
clr->fInsideClip = insideClip;
+ GrBATCH_INFO("Recording clear stencil clip %d\n", clr->uniqueID());
return clr;
}
Clear* clr = GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), Clear, (renderTarget));
clr->fColor = GrColor_ILLEGAL;
+ GrBATCH_INFO("Recording discard %d\n", clr->uniqueID());
return clr;
}
CopySurface* cs = GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), CopySurface, (dst, src));
cs->fSrcRect = srcRect;
cs->fDstPoint = dstPoint;
+ GrBATCH_INFO("Recording copysurface %d\n", cs->uniqueID());
return cs;
}
XferBarrier* xb = GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), XferBarrier, (rt));
xb->fBarrierType = barrierType;
+ GrBATCH_INFO("Recording xfer barrier %d\n", xb->uniqueID());
return xb;
}
GrTargetCommands::Cmd* GrInOrderCommandBuilder::recordDrawBatch(State* state, GrBatch* batch) {
// Check if there is a Batch Draw we can batch with
batch->setPipeline(state->getPipeline());
+ GrBATCH_INFO("In-Recording (%s, %u)\n", batch->name(), batch->uniqueID());
if (!this->cmdBuffer()->empty() &&
Cmd::kDrawBatch_CmdType == this->cmdBuffer()->back().type()) {
DrawBatch* previous = static_cast<DrawBatch*>(&this->cmdBuffer()->back());
if (previous->fState == state && previous->fBatch->combineIfPossible(batch)) {
+ GrBATCH_INFO("\tBatching with (%s, %u)\n",
+ previous->fBatch->name(), previous->fBatch->uniqueID());
return NULL;
}
}
int i = 0;
batch->setPipeline(state->getPipeline());
GrRenderTarget* rt = state->getPipeline()->getRenderTarget();
+
+ GrBATCH_INFO("Re-Recording (%s, B%u)\n"
+ "\tRenderTarget %p\n"
+ "\tBounds (%f, %f, %f, %f)\n",
+ batch->name(),
+ batch->uniqueID(), rt,
+ batch->bounds().fLeft, batch->bounds().fRight,
+ batch->bounds().fTop, batch->bounds().fBottom);
+#if GR_BATCH_SPEW
+ SkDebugf("\tColorStages:\n");
+ for (int i = 0; i < state->getPipeline()->numColorFragmentStages(); i++) {
+ SkDebugf("\t\t%s\n", state->getPipeline()->getColorStage(i).processor()->name());
+ }
+ SkDebugf("\tCoverageStages:\n");
+ for (int i = 0; i < state->getPipeline()->numCoverageFragmentStages(); i++) {
+ SkDebugf("\t\t%s\n", state->getPipeline()->getCoverageStage(i).processor()->name());
+ }
+ SkDebugf("\tXP: %s\n", state->getPipeline()->getXferProcessor()->name());
+#endif
+ GrBATCH_INFO("\tOutcome:\n");
if (!this->cmdBuffer()->empty()) {
GrTargetCommands::CmdBuffer::ReverseIter reverseIter(*this->cmdBuffer());
DrawBatch* previous = static_cast<DrawBatch*>(reverseIter.get());
if (previous->fBatch->pipeline()->getRenderTarget() != rt) {
+ GrBATCH_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
+ previous->fBatch->name(), previous->fBatch->uniqueID());
break;
}
// We cannot continue to search backwards if the render target changes
if (previous->fBatch->combineIfPossible(batch)) {
+ GrBATCH_INFO("\t\tCombining with (%s, B%u)\n",
+ previous->fBatch->name(), previous->fBatch->uniqueID());
return NULL;
}
if (intersect(previous->fBatch->bounds(), batch->bounds())) {
+ GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n",
+ previous->fBatch->name(), previous->fBatch->uniqueID());
break;
}
} else if (Cmd::kClear_CmdType == reverseIter->type()) {
// We cannot continue to search backwards if the render target changes
if (previous->renderTarget() != rt) {
+ GrBATCH_INFO("\t\tBreaking because of Clear's Rendertarget change\n");
break;
}
// We set the color to illegal if we are doing a discard.
if (previous->fColor == GrColor_ILLEGAL ||
intersect(batch->bounds(), previous->fRect)) {
+ GrBATCH_INFO("\t\tBreaking because of Clear intersection\n");
break;
}
} else {
+ GrBATCH_INFO("\t\tBreaking because of other %08x\n", reverseIter->type());
// TODO temporary until we can navigate the other types of commands
break;
}
} while (reverseIter.previous() && ++i < kMaxLookback);
+#if GR_BATCH_SPEW
+ if (!reverseIter.get()) {
+ GrBATCH_INFO("\t\tNo more commands to try and batch with\n");
+ } else if (i >= kMaxLookback) {
+ GrBATCH_INFO("\t\tReached max lookback %d\n", i);
+ }
+#endif
+ }
+#if GR_BATCH_SPEW
+ else {
+ GrBATCH_INFO("\t\tBreaking because empty command buffer\n");
}
+#endif
return GrNEW_APPEND_TO_RECORDER(*this->cmdBuffer(), DrawBatch, (state, batch,
this->batchTarget()));
#include "GrBufferedDrawTarget.h"
+GrBATCH_SPEW(int32_t GrTargetCommands::Cmd::gUniqueID = 0;)
+
void GrTargetCommands::reset() {
fCmdBuffer.reset();
fBatchTarget.reset();
}
void GrTargetCommands::flush(GrBufferedDrawTarget* bufferedDrawTarget) {
+ GrBATCH_INFO("Flushing\n");
if (fCmdBuffer.empty()) {
return;
}
kXferBarrier_CmdType = 9,
};
- Cmd(CmdType type) : fMarkerID(-1), fType(type) {}
+ Cmd(CmdType type)
+ : fMarkerID(-1)
+ , fType(type)
+#if GR_BATCH_SPEW
+ , fUniqueID(GenID(&gUniqueID))
+#endif
+{}
virtual ~Cmd() {}
virtual void execute(GrGpu*) = 0;
bool isTraced() const { return -1 != fMarkerID; }
void setMarkerID(int markerID) { SkASSERT(-1 == fMarkerID); fMarkerID = markerID; }
int markerID() const { return fMarkerID; }
+ GrBATCH_SPEW(uint32_t uniqueID() const { return fUniqueID;} )
private:
+ // TODO move this to a common header so it can be shared with GrBatch
+ static uint32_t GenID(int32_t* idCounter) {
+ uint32_t id = static_cast<uint32_t>(sk_atomic_inc(idCounter)) + 1;
+ if (!id) {
+ SkFAIL("This should never wrap\n");
+ }
+ return id;
+ }
int fMarkerID;
CmdType fType;
+ GrBATCH_SPEW(uint32_t fUniqueID);
+ GrBATCH_SPEW(static int32_t gUniqueID;)
};
void reset();