DEFINE_bool(batchClip, false, "Clip each GrBatch to its device bounds for testing.");
DEFINE_bool(batchBounds, false, "Draw a wireframe bounds of each GrBatch.");
DEFINE_int32(batchLookback, -1, "Maximum GrBatch lookback for combining, negative means default.");
+DEFINE_int32(batchLookahead, -1, "Maximum GrBatch lookahead for combining, negative means "
+ "default.");
Error GPUSink::draw(const Src& src, SkBitmap* dst, SkWStream*, SkString* log) const {
GrContextOptions grOptions;
grOptions.fClipBatchToBounds = FLAGS_batchClip;
grOptions.fDrawBatchBounds = FLAGS_batchBounds;
grOptions.fMaxBatchLookback = FLAGS_batchLookback;
+ grOptions.fMaxBatchLookahead = FLAGS_batchLookahead;
src.modifyGrContextOptions(&grOptions);
////////////////////////////////////////////////////////////////////////////////
// Experimentally we have found that most batching occurs within the first 10 comparisons.
-static const int kDefaultMaxBatchLookback = 10;
+static const int kDefaultMaxBatchLookback = 10;
+static const int kDefaultMaxBatchLookahead = 10;
GrDrawTarget::GrDrawTarget(GrRenderTarget* rt, GrGpu* gpu, GrResourceProvider* resourceProvider,
GrAuditTrail* auditTrail, const Options& options)
fDrawBatchBounds = options.fDrawBatchBounds;
fMaxBatchLookback = (options.fMaxBatchLookback < 0) ? kDefaultMaxBatchLookback :
options.fMaxBatchLookback;
+ fMaxBatchLookahead = (options.fMaxBatchLookahead < 0) ? kDefaultMaxBatchLookahead :
+ options.fMaxBatchLookahead;
rt->setLastDrawTarget(this);
#if 0
SkDebugf("*******************************\n");
#endif
- SkDebugf("%d: %s\n", i, fBatches[i]->name());
+ if (fBatches[i]) {
+ SkDebugf("%d: <combined forward>\n", i);
+ } else {
+ SkDebugf("%d: %s\n", i, fBatches[i]->name());
#if 0
- SkString str = fBatches[i]->dumpInfo();
- SkDebugf("%s\n", str.c_str());
+ SkString str = fBatches[i]->dumpInfo();
+ SkDebugf("%s\n", str.c_str());
#endif
+ }
}
}
#endif
// Loop over the batches that haven't yet generated their geometry
for (int i = 0; i < fBatches.count(); ++i) {
- fBatches[i]->prepare(flushState);
+ if (fBatches[i]) {
+ fBatches[i]->prepare(flushState);
+ }
}
}
// Draw all the generated geometry.
SkRandom random;
for (int i = 0; i < fBatches.count(); ++i) {
+ if (!fBatches[i]) {
+ continue;
+ }
if (fDrawBatchBounds) {
const SkRect& bounds = fBatches[i]->bounds();
SkIRect ibounds;
fBatches.push_back().reset(SkRef(batch));
}
+void GrDrawTarget::forwardCombine() {
+ for (int i = 0; i < fBatches.count() - 2; ++i) {
+ GrBatch* batch = fBatches[i];
+ int maxCandidateIdx = SkTMin(i + fMaxBatchLookahead, fBatches.count() - 1);
+ int j = i + 1;
+ while (true) {
+ GrBatch* candidate = fBatches[j];
+ // We cannot continue to search if the render target changes
+ if (candidate->renderTargetUniqueID() != batch->renderTargetUniqueID()) {
+ GrBATCH_INFO("\t\tBreaking because of (%s, B%u) Rendertarget\n",
+ candidate->name(), candidate->uniqueID());
+ break;
+ }
+ if (j == i +1) {
+ // We assume batch would have combined with candidate when the candidate was added
+ // via backwards combining in recordBatch.
+ SkASSERT(!batch->combineIfPossible(candidate, *this->caps()));
+ } else if (batch->combineIfPossible(candidate, *this->caps())) {
+ GrBATCH_INFO("\t\tCombining with (%s, B%u)\n", candidate->name(),
+ candidate->uniqueID());
+ GR_AUDIT_TRAIL_BATCHING_RESULT_COMBINED(fAuditTrail, candidate);
+ fBatches[j].reset(SkRef(batch));
+ fBatches[i].reset(nullptr);
+ break;
+ }
+ // Stop going traversing if we would cause a painter's order violation.
+ // TODO: The bounds used here do not fully consider the clip. It may be advantageous
+ // to clip each batch's bounds to the clip.
+ if (intersect(candidate->bounds(), batch->bounds())) {
+ GrBATCH_INFO("\t\tIntersects with (%s, B%u)\n", candidate->name(),
+ candidate->uniqueID());
+ break;
+ }
+ ++j;
+ if (j > maxCandidateIdx) {
+ GrBATCH_INFO("\t\tReached max lookahead or end of batch array %d\n", i);
+ break;
+ }
+ }
+ }
+}
+
///////////////////////////////////////////////////////////////////////////////
bool GrDrawTarget::installPipelineInDrawBatch(const GrPipelineBuilder* pipelineBuilder,
public:
/** Options for GrDrawTarget behavior. */
struct Options {
- Options () : fClipBatchToBounds(false), fDrawBatchBounds(false), fMaxBatchLookback(-1) {}
+ Options ()
+ : fClipBatchToBounds(false)
+ , fDrawBatchBounds(false)
+ , fMaxBatchLookback(-1)
+ , fMaxBatchLookahead(-1) {}
bool fClipBatchToBounds;
bool fDrawBatchBounds;
int fMaxBatchLookback;
+ int fMaxBatchLookahead;
};
GrDrawTarget(GrRenderTarget*, GrGpu*, GrResourceProvider*, GrAuditTrail*, const Options&);
#ifdef ENABLE_MDB
this->setFlag(kClosed_Flag);
#endif
+ this->forwardCombine();
}
+
bool isClosed() const { return this->isSetFlag(kClosed_Flag); }
// TODO: this entry point is only needed in the non-MDB world. Remove when
};
void recordBatch(GrBatch*);
+ void forwardCombine();
bool installPipelineInDrawBatch(const GrPipelineBuilder* pipelineBuilder,
const GrScissorState* scissor,
GrDrawBatch* batch);
void getPathStencilSettingsForFilltype(GrPathRendering::FillType,
const GrStencilAttachment*,
GrStencilSettings*);
- bool setupClip(const GrPipelineBuilder&,
- GrPipelineBuilder::AutoRestoreFragmentProcessorState*,
- GrPipelineBuilder::AutoRestoreStencil*,
- GrScissorState*,
- const SkRect* devBounds);
void addDependency(GrDrawTarget* dependedOn);
bool fDrawBatchBounds;
int fMaxBatchLookback;
+ int fMaxBatchLookahead;
typedef SkRefCnt INHERITED;
};