3 * Copyright 2011 Google Inc.
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
11 #include "GrAARectRenderer.h"
13 #include "GrBatchTarget.h"
14 #include "GrBufferAllocPool.h"
15 #include "GrDefaultGeoProcFactory.h"
16 #include "GrFontCache.h"
17 #include "GrGpuResource.h"
18 #include "GrGpuResourcePriv.h"
19 #include "GrDistanceFieldTextContext.h"
20 #include "GrDrawTargetCaps.h"
22 #include "GrIndexBuffer.h"
23 #include "GrInOrderDrawBuffer.h"
24 #include "GrLayerCache.h"
25 #include "GrOvalRenderer.h"
26 #include "GrPathRenderer.h"
27 #include "GrPathUtils.h"
28 #include "GrRenderTargetPriv.h"
29 #include "GrResourceCache.h"
30 #include "GrSoftwarePathRenderer.h"
31 #include "GrStencilAndCoverTextContext.h"
32 #include "GrStrokeInfo.h"
33 #include "GrSurfacePriv.h"
34 #include "GrTexturePriv.h"
35 #include "GrTraceMarker.h"
36 #include "GrTracing.h"
37 #include "SkDashPathPriv.h"
38 #include "SkConfig8888.h"
41 #include "SkStrokeRec.h"
44 #include "SkTraceEvent.h"
46 #include "effects/GrConfigConversionEffect.h"
47 #include "effects/GrDashingEffect.h"
48 #include "effects/GrSingleTextureEffect.h"
50 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
51 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
53 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
54 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
56 #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
57 #define RETURN_IF_ABANDONED if (!fDrawBuffer) { return; }
58 #define RETURN_FALSE_IF_ABANDONED if (!fDrawBuffer) { return false; }
59 #define RETURN_NULL_IF_ABANDONED if (!fDrawBuffer) { return NULL; }
61 class GrContext::AutoCheckFlush {
63 AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(context); }
66 if (fContext->fFlushToReduceCacheSize) {
75 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext,
76 const Options* opts) {
79 context = SkNEW_ARGS(GrContext, (Options()));
81 context = SkNEW_ARGS(GrContext, (*opts));
84 if (context->init(backend, backendContext)) {
92 GrContext::GrContext(const Options& opts) : fOptions(opts) {
94 fPathRendererChain = NULL;
95 fSoftwarePathRenderer = NULL;
96 fResourceCache = NULL;
99 fDrawBufferVBAllocPool = NULL;
100 fDrawBufferIBAllocPool = NULL;
101 fFlushToReduceCacheSize = false;
102 fAARectRenderer = NULL;
103 fOvalRenderer = NULL;
104 fMaxTextureSizeOverride = 1 << 20;
107 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
108 SkASSERT(NULL == fGpu);
110 fGpu = GrGpu::Create(backend, backendContext, this);
118 void GrContext::initCommon() {
119 fResourceCache = SkNEW(GrResourceCache);
120 fResourceCache->setOverBudgetCallback(OverBudgetCB, this);
122 fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
124 fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this)));
126 fAARectRenderer = SkNEW_ARGS(GrAARectRenderer, (fGpu));
127 fOvalRenderer = SkNEW_ARGS(GrOvalRenderer, (fGpu));
129 fDidTestPMConversions = false;
131 this->setupDrawBuffer();
134 GrContext::~GrContext() {
141 for (int i = 0; i < fCleanUpData.count(); ++i) {
142 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
145 SkDELETE(fResourceCache);
146 SkDELETE(fFontCache);
147 SkDELETE(fDrawBuffer);
148 SkDELETE(fDrawBufferVBAllocPool);
149 SkDELETE(fDrawBufferIBAllocPool);
151 fAARectRenderer->unref();
152 fOvalRenderer->unref();
155 SkSafeUnref(fPathRendererChain);
156 SkSafeUnref(fSoftwarePathRenderer);
159 void GrContext::abandonContext() {
160 // abandon first to so destructors
161 // don't try to free the resources in the API.
162 fResourceCache->abandonAll();
164 fGpu->contextAbandoned();
166 // a path renderer may be holding onto resources that
168 SkSafeSetNull(fPathRendererChain);
169 SkSafeSetNull(fSoftwarePathRenderer);
174 delete fDrawBufferVBAllocPool;
175 fDrawBufferVBAllocPool = NULL;
177 delete fDrawBufferIBAllocPool;
178 fDrawBufferIBAllocPool = NULL;
180 fAARectRenderer->reset();
181 fOvalRenderer->reset();
183 fFontCache->freeAll();
184 fLayerCache->freeAll();
187 void GrContext::resetContext(uint32_t state) {
188 fGpu->markContextDirty(state);
191 void GrContext::freeGpuResources() {
195 fDrawBuffer->purgeResources();
198 fAARectRenderer->reset();
199 fOvalRenderer->reset();
201 fFontCache->freeAll();
202 fLayerCache->freeAll();
203 // a path renderer may be holding onto resources
204 SkSafeSetNull(fPathRendererChain);
205 SkSafeSetNull(fSoftwarePathRenderer);
208 void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
210 *resourceCount = fResourceCache->getBudgetedResourceCount();
213 *resourceBytes = fResourceCache->getBudgetedResourceBytes();
217 GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
218 SkGpuDevice* gpuDevice,
219 const SkDeviceProperties&
221 bool enableDistanceFieldFonts) {
222 if (fGpu->caps()->pathRenderingSupport() && renderTarget->isMultisampled()) {
223 GrStencilBuffer* sb = renderTarget->renderTargetPriv().attachStencilBuffer();
225 return GrStencilAndCoverTextContext::Create(this, gpuDevice, leakyProperties);
229 return GrDistanceFieldTextContext::Create(this, gpuDevice, leakyProperties,
230 enableDistanceFieldFonts);
233 ////////////////////////////////////////////////////////////////////////////////
234 enum ScratchTextureFlags {
235 kExact_ScratchTextureFlag = 0x1,
236 kNoPendingIO_ScratchTextureFlag = 0x2,
237 kNoCreate_ScratchTextureFlag = 0x4,
240 bool GrContext::isConfigTexturable(GrPixelConfig config) const {
241 return fGpu->caps()->isConfigTexturable(config);
244 bool GrContext::npotTextureTileSupport() const {
245 return fGpu->caps()->npotTextureTileSupport();
248 GrTexture* GrContext::createTexture(const GrSurfaceDesc& desc, bool budgeted, const void* srcData,
250 RETURN_NULL_IF_ABANDONED
251 if ((desc.fFlags & kRenderTarget_GrSurfaceFlag) &&
252 !this->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
255 if (!GrPixelConfigIsCompressed(desc.fConfig)) {
256 static const uint32_t kFlags = kExact_ScratchTextureFlag |
257 kNoCreate_ScratchTextureFlag;
258 if (GrTexture* texture = this->internalRefScratchTexture(desc, kFlags)) {
259 if (!srcData || texture->writePixels(0, 0, desc.fWidth, desc.fHeight, desc.fConfig,
260 srcData, rowBytes)) {
262 texture->resourcePriv().makeUnbudgeted();
269 return fGpu->createTexture(desc, budgeted, srcData, rowBytes);
272 GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& desc, ScratchTexMatch match,
273 bool calledDuringFlush) {
274 RETURN_NULL_IF_ABANDONED
275 // Currently we don't recycle compressed textures as scratch.
276 if (GrPixelConfigIsCompressed(desc.fConfig)) {
280 if (kExact_ScratchTexMatch == match) {
281 flags |= kExact_ScratchTextureFlag;
283 if (calledDuringFlush) {
284 flags |= kNoPendingIO_ScratchTextureFlag;
286 return this->internalRefScratchTexture(desc, flags);
290 GrTexture* GrContext::internalRefScratchTexture(const GrSurfaceDesc& inDesc, uint32_t flags) {
291 SkASSERT(!GrPixelConfigIsCompressed(inDesc.fConfig));
293 SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc);
295 if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
296 if (!(kExact_ScratchTextureFlag & flags)) {
297 // bin by pow2 with a reasonable min
298 static const int MIN_SIZE = 16;
299 GrSurfaceDesc* wdesc = desc.writable();
300 wdesc->fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth));
301 wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight));
305 GrTexturePriv::ComputeScratchKey(*desc, &key);
306 uint32_t scratchFlags = 0;
307 if (kNoPendingIO_ScratchTextureFlag & flags) {
308 scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
309 } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
310 // If it is not a render target then it will most likely be populated by
311 // writePixels() which will trigger a flush if the texture has pending IO.
312 scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
314 GrGpuResource* resource = fResourceCache->findAndRefScratchResource(key, scratchFlags);
316 GrSurface* surface = static_cast<GrSurface*>(resource);
317 GrRenderTarget* rt = surface->asRenderTarget();
318 if (rt && fGpu->caps()->discardRenderTargetSupport()) {
321 return surface->asTexture();
325 if (!(kNoCreate_ScratchTextureFlag & flags)) {
326 return fGpu->createTexture(*desc, true, NULL, 0);
332 void GrContext::OverBudgetCB(void* data) {
335 GrContext* context = reinterpret_cast<GrContext*>(data);
337 // Flush the InOrderDrawBuffer to possibly free up some textures
338 context->fFlushToReduceCacheSize = true;
341 int GrContext::getMaxTextureSize() const {
342 return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
345 int GrContext::getMaxRenderTargetSize() const {
346 return fGpu->caps()->maxRenderTargetSize();
349 int GrContext::getMaxSampleCount() const {
350 return fGpu->caps()->maxSampleCount();
353 ///////////////////////////////////////////////////////////////////////////////
355 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
356 RETURN_NULL_IF_ABANDONED
357 return fGpu->wrapBackendTexture(desc);
360 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
361 RETURN_NULL_IF_ABANDONED
362 return fGpu->wrapBackendRenderTarget(desc);
365 ////////////////////////////////////////////////////////////////////////////////
367 void GrContext::clear(const SkIRect* rect,
370 GrRenderTarget* renderTarget) {
372 ASSERT_OWNED_RESOURCE(renderTarget);
373 SkASSERT(renderTarget);
375 AutoCheckFlush acf(this);
376 GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this);
377 GrDrawTarget* target = this->prepareToDraw();
378 if (NULL == target) {
381 target->clear(rect, color, canIgnoreRect, renderTarget);
384 void GrContext::drawPaint(GrRenderTarget* rt,
386 const GrPaint& origPaint,
387 const SkMatrix& viewMatrix) {
389 // set rect to be big enough to fill the space, but not super-huge, so we
390 // don't overflow fixed-point implementations
393 SkIntToScalar(rt->width()),
394 SkIntToScalar(rt->height()));
395 SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
397 // by definition this fills the entire clip, no need for AA
398 if (paint->isAntiAlias()) {
399 paint.writable()->setAntiAlias(false);
402 bool isPerspective = viewMatrix.hasPerspective();
404 // We attempt to map r by the inverse matrix and draw that. mapRect will
405 // map the four corners and bound them with a new rect. This will not
406 // produce a correct result for some perspective matrices.
407 if (!isPerspective) {
409 if (!viewMatrix.invert(&inverse)) {
410 SkDebugf("Could not invert matrix\n");
414 this->drawRect(rt, clip, *paint, viewMatrix, r);
416 SkMatrix localMatrix;
417 if (!viewMatrix.invert(&localMatrix)) {
418 SkDebugf("Could not invert matrix\n");
422 AutoCheckFlush acf(this);
423 GrPipelineBuilder pipelineBuilder;
424 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, paint, &acf);
425 if (NULL == target) {
429 GR_CREATE_TRACE_MARKER("GrContext::drawPaintWithPerspective", target);
430 target->drawRect(&pipelineBuilder,
440 void GrContext::dumpFontCache() const {
445 ////////////////////////////////////////////////////////////////////////////////
447 static inline bool is_irect(const SkRect& r) {
448 return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) &&
449 SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
452 static bool apply_aa_to_rect(GrDrawTarget* target,
453 GrPipelineBuilder* pipelineBuilder,
454 SkRect* devBoundRect,
456 SkScalar strokeWidth,
457 const SkMatrix& combinedMatrix,
459 if (pipelineBuilder->getRenderTarget()->isMultisampled()) {
463 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
464 if (strokeWidth >= 0) {
466 if (!combinedMatrix.preservesAxisAlignment()) {
470 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
472 if (!combinedMatrix.preservesRightAngles()) {
478 combinedMatrix.mapRect(devBoundRect, rect);
479 if (!combinedMatrix.rectStaysRect()) {
483 if (strokeWidth < 0) {
484 return !is_irect(*devBoundRect);
490 static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
491 return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
492 point.fY >= rect.fTop && point.fY <= rect.fBottom;
495 class StrokeRectBatch : public GrBatch {
499 SkMatrix fViewMatrix;
501 SkScalar fStrokeWidth;
504 static GrBatch* Create(const Geometry& geometry) {
505 return SkNEW_ARGS(StrokeRectBatch, (geometry));
508 const char* name() const override { return "StrokeRectBatch"; }
510 void getInvariantOutputColor(GrInitInvariantOutput* out) const override {
511 // When this is called on a batch, there is only one geometry bundle
512 out->setKnownFourComponents(fGeoData[0].fColor);
515 void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override {
516 out->setKnownSingleComponent(0xff);
519 void initBatchTracker(const GrPipelineInfo& init) override {
520 // Handle any color overrides
521 if (init.fColorIgnored) {
522 fGeoData[0].fColor = GrColor_ILLEGAL;
523 } else if (GrColor_ILLEGAL != init.fOverrideColor) {
524 fGeoData[0].fColor = init.fOverrideColor;
527 // setup batch properties
528 fBatch.fColorIgnored = init.fColorIgnored;
529 fBatch.fColor = fGeoData[0].fColor;
530 fBatch.fUsesLocalCoords = init.fUsesLocalCoords;
531 fBatch.fCoverageIgnored = init.fCoverageIgnored;
534 void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) override {
535 SkAutoTUnref<const GrGeometryProcessor> gp(
536 GrDefaultGeoProcFactory::Create(GrDefaultGeoProcFactory::kPosition_GPType,
541 batchTarget->initDraw(gp, pipeline);
543 // TODO this is hacky, but the only way we have to initialize the GP is to use the
544 // GrPipelineInfo struct so we can generate the correct shader. Once we have GrBatch
545 // everywhere we can remove this nastiness
547 init.fColorIgnored = fBatch.fColorIgnored;
548 init.fOverrideColor = GrColor_ILLEGAL;
549 init.fCoverageIgnored = fBatch.fCoverageIgnored;
550 init.fUsesLocalCoords = this->usesLocalCoords();
551 gp->initBatchTracker(batchTarget->currentBatchTracker(), init);
553 size_t vertexStride = gp->getVertexStride();
555 SkASSERT(vertexStride == sizeof(GrDefaultGeoProcFactory::PositionAttr));
557 Geometry& args = fGeoData[0];
559 int vertexCount = kVertsPerHairlineRect;
560 if (args.fStrokeWidth > 0) {
561 vertexCount = kVertsPerStrokeRect;
564 const GrVertexBuffer* vertexBuffer;
567 void* vertices = batchTarget->vertexPool()->makeSpace(vertexStride,
573 SkDebugf("Could not allocate vertices\n");
577 SkPoint* vertex = reinterpret_cast<SkPoint*>(vertices);
579 GrPrimitiveType primType;
581 if (args.fStrokeWidth > 0) {;
582 primType = kTriangleStrip_GrPrimitiveType;
584 this->setStrokeRectStrip(vertex, args.fRect, args.fStrokeWidth);
587 primType = kLineStrip_GrPrimitiveType;
588 vertex[0].set(args.fRect.fLeft, args.fRect.fTop);
589 vertex[1].set(args.fRect.fRight, args.fRect.fTop);
590 vertex[2].set(args.fRect.fRight, args.fRect.fBottom);
591 vertex[3].set(args.fRect.fLeft, args.fRect.fBottom);
592 vertex[4].set(args.fRect.fLeft, args.fRect.fTop);
595 GrDrawTarget::DrawInfo drawInfo;
596 drawInfo.setPrimitiveType(primType);
597 drawInfo.setVertexBuffer(vertexBuffer);
598 drawInfo.setStartVertex(firstVertex);
599 drawInfo.setVertexCount(vertexCount);
600 drawInfo.setStartIndex(0);
601 drawInfo.setIndexCount(0);
602 drawInfo.setInstanceCount(0);
603 drawInfo.setVerticesPerInstance(0);
604 drawInfo.setIndicesPerInstance(0);
605 batchTarget->draw(drawInfo);
608 SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
611 StrokeRectBatch(const Geometry& geometry) {
612 this->initClassID<StrokeRectBatch>();
614 fBatch.fHairline = geometry.fStrokeWidth == 0;
616 fGeoData.push_back(geometry);
619 /* create a triangle strip that strokes the specified rect. There are 8
620 unique vertices, but we repeat the last 2 to close up. Alternatively we
621 could use an indices array, and then only send 8 verts, but not sure that
624 void setStrokeRectStrip(SkPoint verts[10], const SkRect& rect, SkScalar width) {
625 const SkScalar rad = SkScalarHalf(width);
626 // TODO we should be able to enable this assert, but we'd have to filter these draws
628 //SkASSERT(rad < rect.width() / 2 && rad < rect.height() / 2);
630 verts[0].set(rect.fLeft + rad, rect.fTop + rad);
631 verts[1].set(rect.fLeft - rad, rect.fTop - rad);
632 verts[2].set(rect.fRight - rad, rect.fTop + rad);
633 verts[3].set(rect.fRight + rad, rect.fTop - rad);
634 verts[4].set(rect.fRight - rad, rect.fBottom - rad);
635 verts[5].set(rect.fRight + rad, rect.fBottom + rad);
636 verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
637 verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
643 GrColor color() const { return fBatch.fColor; }
644 bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
645 bool colorIgnored() const { return fBatch.fColorIgnored; }
646 const SkMatrix& viewMatrix() const { return fGeoData[0].fViewMatrix; }
647 bool hairline() const { return fBatch.fHairline; }
649 bool onCombineIfPossible(GrBatch* t) override {
650 // StrokeRectBatch* that = t->cast<StrokeRectBatch>();
652 // NonAA stroke rects cannot batch right now
653 // TODO make these batchable
657 struct BatchTracker {
659 bool fUsesLocalCoords;
661 bool fCoverageIgnored;
665 const static int kVertsPerHairlineRect = 5;
666 const static int kVertsPerStrokeRect = 10;
669 SkSTArray<1, Geometry, true> fGeoData;
672 void GrContext::drawRect(GrRenderTarget* rt,
674 const GrPaint& paint,
675 const SkMatrix& viewMatrix,
677 const GrStrokeInfo* strokeInfo) {
679 if (strokeInfo && strokeInfo->isDashed()) {
682 this->drawPath(rt, clip, paint, viewMatrix, path, *strokeInfo);
686 AutoCheckFlush acf(this);
687 GrPipelineBuilder pipelineBuilder;
688 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf);
689 if (NULL == target) {
693 GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
694 SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth();
696 // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
697 // cases where the RT is fully inside a stroke.
700 pipelineBuilder.getRenderTarget()->getBoundsRect(&rtRect);
701 SkRect clipSpaceRTRect = rtRect;
702 bool checkClip = GrClip::kWideOpen_ClipType != clip.clipType();
704 clipSpaceRTRect.offset(SkIntToScalar(clip.origin().fX),
705 SkIntToScalar(clip.origin().fY));
707 // Does the clip contain the entire RT?
708 if (!checkClip || clip.quickContains(clipSpaceRTRect)) {
710 if (!viewMatrix.invert(&invM)) {
713 // Does the rect bound the RT?
714 SkPoint srcSpaceRTQuad[4];
715 invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
716 if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
717 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
718 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
719 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
722 if (paint.isOpaqueAndConstantColor(&clearColor)) {
723 target->clear(NULL, clearColor, true, rt);
730 GrColor color = paint.getColor();
732 bool needAA = paint.isAntiAlias() && !pipelineBuilder.getRenderTarget()->isMultisampled();
733 bool doAA = needAA && apply_aa_to_rect(target, &pipelineBuilder, &devBoundRect, rect, width,
738 const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec();
739 fAARectRenderer->strokeAARect(target,
748 fAARectRenderer->fillAARect(target,
759 StrokeRectBatch::Geometry geometry;
760 geometry.fViewMatrix = viewMatrix;
761 geometry.fColor = color;
762 geometry.fRect = rect;
763 geometry.fStrokeWidth = width;
765 SkAutoTUnref<GrBatch> batch(StrokeRectBatch::Create(geometry));
767 SkRect bounds = rect;
768 SkScalar rad = SkScalarHalf(width);
769 bounds.outset(rad, rad);
770 viewMatrix.mapRect(&bounds);
771 target->drawBatch(&pipelineBuilder, batch, &bounds);
774 target->drawSimpleRect(&pipelineBuilder, color, viewMatrix, rect);
778 void GrContext::drawNonAARectToRect(GrRenderTarget* rt,
780 const GrPaint& paint,
781 const SkMatrix& viewMatrix,
782 const SkRect& rectToDraw,
783 const SkRect& localRect,
784 const SkMatrix* localMatrix) {
786 AutoCheckFlush acf(this);
787 GrPipelineBuilder pipelineBuilder;
788 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf);
789 if (NULL == target) {
793 GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
795 target->drawRect(&pipelineBuilder,
803 static const GrGeometryProcessor* set_vertex_attributes(bool hasLocalCoords,
808 const SkMatrix& viewMatrix) {
811 uint32_t flags = GrDefaultGeoProcFactory::kPosition_GPType;
812 if (hasLocalCoords && hasColors) {
813 *colorOffset = sizeof(SkPoint);
814 *texOffset = sizeof(SkPoint) + sizeof(GrColor);
815 flags |= GrDefaultGeoProcFactory::kColor_GPType |
816 GrDefaultGeoProcFactory::kLocalCoord_GPType;
817 } else if (hasLocalCoords) {
818 *texOffset = sizeof(SkPoint);
819 flags |= GrDefaultGeoProcFactory::kLocalCoord_GPType;
820 } else if (hasColors) {
821 *colorOffset = sizeof(SkPoint);
822 flags |= GrDefaultGeoProcFactory::kColor_GPType;
824 return GrDefaultGeoProcFactory::Create(flags, color, viewMatrix, SkMatrix::I());
827 class DrawVerticesBatch : public GrBatch {
831 SkTDArray<SkPoint> fPositions;
832 SkTDArray<uint16_t> fIndices;
833 SkTDArray<GrColor> fColors;
834 SkTDArray<SkPoint> fLocalCoords;
837 static GrBatch* Create(const Geometry& geometry, GrPrimitiveType primitiveType,
838 const SkMatrix& viewMatrix,
839 const SkPoint* positions, int vertexCount,
840 const uint16_t* indices, int indexCount,
841 const GrColor* colors, const SkPoint* localCoords) {
842 return SkNEW_ARGS(DrawVerticesBatch, (geometry, primitiveType, viewMatrix, positions,
843 vertexCount, indices, indexCount, colors,
847 const char* name() const override { return "DrawVerticesBatch"; }
849 void getInvariantOutputColor(GrInitInvariantOutput* out) const override {
850 // When this is called on a batch, there is only one geometry bundle
851 if (this->hasColors()) {
852 out->setUnknownFourComponents();
854 out->setKnownFourComponents(fGeoData[0].fColor);
858 void getInvariantOutputCoverage(GrInitInvariantOutput* out) const override {
859 out->setKnownSingleComponent(0xff);
862 void initBatchTracker(const GrPipelineInfo& init) override {
863 // Handle any color overrides
864 if (init.fColorIgnored) {
865 fGeoData[0].fColor = GrColor_ILLEGAL;
866 } else if (GrColor_ILLEGAL != init.fOverrideColor) {
867 fGeoData[0].fColor = init.fOverrideColor;
870 // setup batch properties
871 fBatch.fColorIgnored = init.fColorIgnored;
872 fBatch.fColor = fGeoData[0].fColor;
873 fBatch.fUsesLocalCoords = init.fUsesLocalCoords;
874 fBatch.fCoverageIgnored = init.fCoverageIgnored;
877 void generateGeometry(GrBatchTarget* batchTarget, const GrPipeline* pipeline) override {
878 int colorOffset = -1, texOffset = -1;
879 SkAutoTUnref<const GrGeometryProcessor> gp(
880 set_vertex_attributes(this->hasLocalCoords(), this->hasColors(), &colorOffset,
881 &texOffset, this->color(), this->viewMatrix()));
883 batchTarget->initDraw(gp, pipeline);
885 // TODO this is hacky, but the only way we have to initialize the GP is to use the
886 // GrPipelineInfo struct so we can generate the correct shader. Once we have GrBatch
887 // everywhere we can remove this nastiness
889 init.fColorIgnored = fBatch.fColorIgnored;
890 init.fOverrideColor = GrColor_ILLEGAL;
891 init.fCoverageIgnored = fBatch.fCoverageIgnored;
892 init.fUsesLocalCoords = this->usesLocalCoords();
893 gp->initBatchTracker(batchTarget->currentBatchTracker(), init);
895 size_t vertexStride = gp->getVertexStride();
897 SkASSERT(vertexStride == sizeof(SkPoint) + (this->hasLocalCoords() ? sizeof(SkPoint) : 0)
898 + (this->hasColors() ? sizeof(GrColor) : 0));
900 int instanceCount = fGeoData.count();
902 const GrVertexBuffer* vertexBuffer;
905 void* vertices = batchTarget->vertexPool()->makeSpace(vertexStride,
911 SkDebugf("Could not allocate vertices\n");
915 const GrIndexBuffer* indexBuffer;
918 void* indices = NULL;
919 if (this->hasIndices()) {
920 indices = batchTarget->indexPool()->makeSpace(this->indexCount(),
925 SkDebugf("Could not allocate indices\n");
931 int vertexOffset = 0;
932 for (int i = 0; i < instanceCount; i++) {
933 const Geometry& args = fGeoData[i];
935 // TODO we can actually cache this interleaved and then just memcopy
936 if (this->hasIndices()) {
937 for (int j = 0; j < args.fIndices.count(); ++j, ++indexOffset) {
938 *((uint16_t*)indices + indexOffset) = args.fIndices[j] + vertexOffset;
942 for (int j = 0; j < args.fPositions.count(); ++j) {
943 *((SkPoint*)vertices) = args.fPositions[j];
944 if (this->hasColors()) {
945 *(GrColor*)((intptr_t)vertices + colorOffset) = args.fColors[j];
947 if (this->hasLocalCoords()) {
948 *(SkPoint*)((intptr_t)vertices + texOffset) = args.fLocalCoords[j];
950 vertices = (void*)((intptr_t)vertices + vertexStride);
955 GrDrawTarget::DrawInfo drawInfo;
956 drawInfo.setPrimitiveType(this->primitiveType());
957 drawInfo.setVertexBuffer(vertexBuffer);
958 drawInfo.setStartVertex(firstVertex);
959 drawInfo.setVertexCount(this->vertexCount());
960 if (this->hasIndices()) {
961 drawInfo.setIndexBuffer(indexBuffer);
962 drawInfo.setStartIndex(firstIndex);
963 drawInfo.setIndexCount(this->indexCount());
965 drawInfo.setStartIndex(0);
966 drawInfo.setIndexCount(0);
968 batchTarget->draw(drawInfo);
971 SkSTArray<1, Geometry, true>* geoData() { return &fGeoData; }
974 DrawVerticesBatch(const Geometry& geometry, GrPrimitiveType primitiveType,
975 const SkMatrix& viewMatrix,
976 const SkPoint* positions, int vertexCount,
977 const uint16_t* indices, int indexCount,
978 const GrColor* colors, const SkPoint* localCoords) {
979 this->initClassID<DrawVerticesBatch>();
982 fBatch.fViewMatrix = viewMatrix;
983 Geometry& installedGeo = fGeoData.push_back(geometry);
985 installedGeo.fPositions.append(vertexCount, positions);
987 installedGeo.fIndices.append(indexCount, indices);
988 fBatch.fHasIndices = true;
990 fBatch.fHasIndices = false;
994 installedGeo.fColors.append(vertexCount, colors);
995 fBatch.fHasColors = true;
997 fBatch.fHasColors = false;
1001 installedGeo.fLocalCoords.append(vertexCount, localCoords);
1002 fBatch.fHasLocalCoords = true;
1004 fBatch.fHasLocalCoords = false;
1006 fBatch.fVertexCount = vertexCount;
1007 fBatch.fIndexCount = indexCount;
1008 fBatch.fPrimitiveType = primitiveType;
1011 GrPrimitiveType primitiveType() const { return fBatch.fPrimitiveType; }
1012 bool batchablePrimitiveType() const {
1013 return kTriangles_GrPrimitiveType == fBatch.fPrimitiveType ||
1014 kLines_GrPrimitiveType == fBatch.fPrimitiveType ||
1015 kPoints_GrPrimitiveType == fBatch.fPrimitiveType;
1017 GrColor color() const { return fBatch.fColor; }
1018 bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
1019 bool colorIgnored() const { return fBatch.fColorIgnored; }
1020 const SkMatrix& viewMatrix() const { return fBatch.fViewMatrix; }
1021 bool hasColors() const { return fBatch.fHasColors; }
1022 bool hasIndices() const { return fBatch.fHasIndices; }
1023 bool hasLocalCoords() const { return fBatch.fHasLocalCoords; }
1024 int vertexCount() const { return fBatch.fVertexCount; }
1025 int indexCount() const { return fBatch.fIndexCount; }
1027 bool onCombineIfPossible(GrBatch* t) override {
1028 DrawVerticesBatch* that = t->cast<DrawVerticesBatch>();
1030 if (!this->batchablePrimitiveType() || this->primitiveType() != that->primitiveType()) {
1034 SkASSERT(this->usesLocalCoords() == that->usesLocalCoords());
1036 // We currently use a uniform viewmatrix for this batch
1037 if (!this->viewMatrix().cheapEqualTo(that->viewMatrix())) {
1041 if (this->hasColors() != that->hasColors()) {
1045 if (this->hasIndices() != that->hasIndices()) {
1049 if (this->hasLocalCoords() != that->hasLocalCoords()) {
1053 if (!this->hasColors() && this->color() != that->color()) {
1057 if (this->color() != that->color()) {
1058 fBatch.fColor = GrColor_ILLEGAL;
1060 fGeoData.push_back_n(that->geoData()->count(), that->geoData()->begin());
1061 fBatch.fVertexCount += that->vertexCount();
1062 fBatch.fIndexCount += that->indexCount();
1066 struct BatchTracker {
1067 GrPrimitiveType fPrimitiveType;
1068 SkMatrix fViewMatrix;
1070 bool fUsesLocalCoords;
1072 bool fCoverageIgnored;
1075 bool fHasLocalCoords;
1080 BatchTracker fBatch;
1081 SkSTArray<1, Geometry, true> fGeoData;
1084 void GrContext::drawVertices(GrRenderTarget* rt,
1086 const GrPaint& paint,
1087 const SkMatrix& viewMatrix,
1088 GrPrimitiveType primitiveType,
1090 const SkPoint positions[],
1091 const SkPoint texCoords[],
1092 const GrColor colors[],
1093 const uint16_t indices[],
1096 AutoCheckFlush acf(this);
1097 GrPipelineBuilder pipelineBuilder;
1098 GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
1100 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf);
1101 if (NULL == target) {
1105 GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
1107 DrawVerticesBatch::Geometry geometry;
1108 geometry.fColor = paint.getColor();
1110 SkAutoTUnref<GrBatch> batch(DrawVerticesBatch::Create(geometry, primitiveType, viewMatrix,
1111 positions, vertexCount, indices,
1112 indexCount,colors, texCoords));
1114 // TODO figure out bounds
1115 target->drawBatch(&pipelineBuilder, batch, NULL);
1118 ///////////////////////////////////////////////////////////////////////////////
1120 void GrContext::drawRRect(GrRenderTarget*rt,
1122 const GrPaint& paint,
1123 const SkMatrix& viewMatrix,
1124 const SkRRect& rrect,
1125 const GrStrokeInfo& strokeInfo) {
1127 if (rrect.isEmpty()) {
1131 if (strokeInfo.isDashed()) {
1133 path.addRRect(rrect);
1134 this->drawPath(rt, clip, paint, viewMatrix, path, strokeInfo);
1138 AutoCheckFlush acf(this);
1139 GrPipelineBuilder pipelineBuilder;
1140 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf);
1141 if (NULL == target) {
1145 GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
1147 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1149 GrColor color = paint.getColor();
1150 if (!fOvalRenderer->drawRRect(target,
1154 paint.isAntiAlias(),
1158 path.addRRect(rrect);
1159 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(),
1164 ///////////////////////////////////////////////////////////////////////////////
1166 void GrContext::drawDRRect(GrRenderTarget* rt,
1168 const GrPaint& paint,
1169 const SkMatrix& viewMatrix,
1170 const SkRRect& outer,
1171 const SkRRect& inner) {
1173 if (outer.isEmpty()) {
1177 AutoCheckFlush acf(this);
1178 GrPipelineBuilder pipelineBuilder;
1179 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf);
1181 GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
1183 GrColor color = paint.getColor();
1184 if (!fOvalRenderer->drawDRRect(target,
1188 paint.isAntiAlias(),
1192 path.addRRect(inner);
1193 path.addRRect(outer);
1194 path.setFillType(SkPath::kEvenOdd_FillType);
1196 GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle);
1197 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(),
1202 ///////////////////////////////////////////////////////////////////////////////
1204 void GrContext::drawOval(GrRenderTarget* rt,
1206 const GrPaint& paint,
1207 const SkMatrix& viewMatrix,
1209 const GrStrokeInfo& strokeInfo) {
1211 if (oval.isEmpty()) {
1215 if (strokeInfo.isDashed()) {
1218 this->drawPath(rt, clip, paint, viewMatrix, path, strokeInfo);
1222 AutoCheckFlush acf(this);
1223 GrPipelineBuilder pipelineBuilder;
1224 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf);
1225 if (NULL == target) {
1229 GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
1231 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1233 GrColor color = paint.getColor();
1234 if (!fOvalRenderer->drawOval(target,
1238 paint.isAntiAlias(),
1243 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(),
1248 // Can 'path' be drawn as a pair of filled nested rectangles?
1249 static bool is_nested_rects(GrDrawTarget* target,
1250 GrPipelineBuilder* pipelineBuilder,
1252 const SkMatrix& viewMatrix,
1254 const SkStrokeRec& stroke,
1256 SkASSERT(stroke.isFillStyle());
1258 if (path.isInverseFillType()) {
1262 // TODO: this restriction could be lifted if we were willing to apply
1263 // the matrix to all the points individually rather than just to the rect
1264 if (!viewMatrix.preservesAxisAlignment()) {
1268 SkPath::Direction dirs[2];
1269 if (!path.isNestedRects(rects, dirs)) {
1273 if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
1274 // The two rects need to be wound opposite to each other
1278 // Right now, nested rects where the margin is not the same width
1279 // all around do not render correctly
1280 const SkScalar* outer = rects[0].asScalars();
1281 const SkScalar* inner = rects[1].asScalars();
1285 SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
1286 bool allGoE1 = margin >= SK_Scalar1;
1288 for (int i = 1; i < 4; ++i) {
1289 SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
1290 if (temp < SK_Scalar1) {
1293 if (!SkScalarNearlyEqual(margin, temp)) {
1298 return allEq || allGoE1;
1301 void GrContext::drawPath(GrRenderTarget* rt,
1303 const GrPaint& paint,
1304 const SkMatrix& viewMatrix,
1306 const GrStrokeInfo& strokeInfo) {
1308 if (path.isEmpty()) {
1309 if (path.isInverseFillType()) {
1310 this->drawPaint(rt, clip, paint, viewMatrix);
1315 GrColor color = paint.getColor();
1316 if (strokeInfo.isDashed()) {
1318 if (path.isLine(pts)) {
1319 AutoCheckFlush acf(this);
1320 GrPipelineBuilder pipelineBuilder;
1321 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf);
1322 if (NULL == target) {
1326 if (GrDashingEffect::DrawDashLine(fGpu, target, &pipelineBuilder, color, viewMatrix,
1327 pts, paint, strokeInfo)) {
1332 // Filter dashed path into new path with the dashing applied
1333 const SkPathEffect::DashInfo& info = strokeInfo.getDashInfo();
1334 SkTLazy<SkPath> effectPath;
1335 GrStrokeInfo newStrokeInfo(strokeInfo, false);
1336 SkStrokeRec* stroke = newStrokeInfo.getStrokeRecPtr();
1337 if (SkDashPath::FilterDashPath(effectPath.init(), path, stroke, NULL, info)) {
1338 this->drawPath(rt, clip, paint, viewMatrix, *effectPath.get(), newStrokeInfo);
1342 this->drawPath(rt, clip, paint, viewMatrix, path, newStrokeInfo);
1346 // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1347 // Scratch textures can be recycled after they are returned to the texture
1348 // cache. This presents a potential hazard for buffered drawing. However,
1349 // the writePixels that uploads to the scratch will perform a flush so we're
1351 AutoCheckFlush acf(this);
1352 GrPipelineBuilder pipelineBuilder;
1353 GrDrawTarget* target = this->prepareToDraw(&pipelineBuilder, rt, clip, &paint, &acf);
1354 if (NULL == target) {
1358 GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex());
1360 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1362 bool useCoverageAA = paint.isAntiAlias() &&
1363 !pipelineBuilder.getRenderTarget()->isMultisampled();
1365 if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) {
1366 // Concave AA paths are expensive - try to avoid them for special cases
1369 if (is_nested_rects(target, &pipelineBuilder, color, viewMatrix, path, strokeRec, rects)) {
1370 fAARectRenderer->fillAANestedRects(target, &pipelineBuilder, color, viewMatrix, rects);
1376 bool isOval = path.isOval(&ovalRect);
1378 if (!isOval || path.isInverseFillType() ||
1379 !fOvalRenderer->drawOval(target,
1383 paint.isAntiAlias(),
1386 this->internalDrawPath(target, &pipelineBuilder, viewMatrix, color, paint.isAntiAlias(),
1391 void GrContext::internalDrawPath(GrDrawTarget* target,
1392 GrPipelineBuilder* pipelineBuilder,
1393 const SkMatrix& viewMatrix,
1397 const GrStrokeInfo& strokeInfo) {
1399 SkASSERT(!path.isEmpty());
1401 GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target);
1404 // An Assumption here is that path renderer would use some form of tweaking
1405 // the src color (either the input alpha or in the frag shader) to implement
1406 // aa. If we have some future driver-mojo path AA that can do the right
1407 // thing WRT to the blend then we'll need some query on the PR.
1408 bool useCoverageAA = useAA &&
1409 !pipelineBuilder->getRenderTarget()->isMultisampled();
1412 GrPathRendererChain::DrawType type =
1413 useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1414 GrPathRendererChain::kColor_DrawType;
1416 const SkPath* pathPtr = &path;
1417 SkTLazy<SkPath> tmpPath;
1418 SkTCopyOnFirstWrite<SkStrokeRec> stroke(strokeInfo.getStrokeRec());
1420 // Try a 1st time without stroking the path and without allowing the SW renderer
1421 GrPathRenderer* pr = this->getPathRenderer(target, pipelineBuilder, viewMatrix, *pathPtr,
1422 *stroke, false, type);
1425 if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, viewMatrix, NULL)) {
1426 // It didn't work the 1st time, so try again with the stroked path
1427 if (stroke->applyToPath(tmpPath.init(), *pathPtr)) {
1428 pathPtr = tmpPath.get();
1429 stroke.writable()->setFillStyle();
1430 if (pathPtr->isEmpty()) {
1436 // This time, allow SW renderer
1437 pr = this->getPathRenderer(target, pipelineBuilder, viewMatrix, *pathPtr, *stroke, true,
1443 SkDebugf("Unable to find path renderer compatible with path.\n");
1448 pr->drawPath(target, pipelineBuilder, color, viewMatrix, *pathPtr, *stroke, useCoverageAA);
1451 ////////////////////////////////////////////////////////////////////////////////
1453 void GrContext::flush(int flagsBitfield) {
1454 if (NULL == fDrawBuffer) {
1458 if (kDiscard_FlushBit & flagsBitfield) {
1459 fDrawBuffer->reset();
1461 fDrawBuffer->flush();
1463 fFlushToReduceCacheSize = false;
1466 bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes,
1467 const void* inPixels, size_t outRowBytes, void* outPixels) {
1468 SkSrcPixelInfo srcPI;
1469 if (!GrPixelConfig2ColorAndProfileType(srcConfig, &srcPI.fColorType, NULL)) {
1472 srcPI.fAlphaType = kUnpremul_SkAlphaType;
1473 srcPI.fPixels = inPixels;
1474 srcPI.fRowBytes = inRowBytes;
1476 SkDstPixelInfo dstPI;
1477 dstPI.fColorType = srcPI.fColorType;
1478 dstPI.fAlphaType = kPremul_SkAlphaType;
1479 dstPI.fPixels = outPixels;
1480 dstPI.fRowBytes = outRowBytes;
1482 return srcPI.convertPixelsTo(&dstPI, width, height);
1485 bool GrContext::writeSurfacePixels(GrSurface* surface,
1486 int left, int top, int width, int height,
1487 GrPixelConfig srcConfig, const void* buffer, size_t rowBytes,
1488 uint32_t pixelOpsFlags) {
1489 RETURN_FALSE_IF_ABANDONED
1491 GrTexture* texture = NULL;
1492 if (!(kUnpremul_PixelOpsFlag & pixelOpsFlags) && (texture = surface->asTexture()) &&
1493 fGpu->canWriteTexturePixels(texture, srcConfig)) {
1495 if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) &&
1496 surface->surfacePriv().hasPendingIO()) {
1499 return fGpu->writeTexturePixels(texture, left, top, width, height,
1500 srcConfig, buffer, rowBytes);
1501 // Don't need to check kFlushWrites_PixelOp here, we just did a direct write so the
1502 // upload is already flushed.
1506 // If we didn't do a direct texture write then we upload the pixels to a texture and draw.
1507 GrRenderTarget* renderTarget = surface->asRenderTarget();
1508 if (NULL == renderTarget) {
1512 // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1513 // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1514 // config. This scratch will then have R and B swapped. We correct for this by swapping again
1515 // when drawing the scratch to the dst using a conversion effect.
1516 bool swapRAndB = false;
1517 GrPixelConfig writeConfig = srcConfig;
1518 if (GrPixelConfigSwapRAndB(srcConfig) ==
1519 fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) {
1520 writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1525 desc.fWidth = width;
1526 desc.fHeight = height;
1527 desc.fConfig = writeConfig;
1528 SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, kApprox_ScratchTexMatch));
1533 SkAutoTUnref<const GrFragmentProcessor> fp;
1534 SkMatrix textureMatrix;
1535 textureMatrix.setIDiv(texture->width(), texture->height());
1537 // allocate a tmp buffer and sw convert the pixels to premul
1538 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1540 if (kUnpremul_PixelOpsFlag & pixelOpsFlags) {
1541 if (!GrPixelConfigIs8888(srcConfig)) {
1544 fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1545 // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1547 size_t tmpRowBytes = 4 * width;
1548 tmpPixels.reset(width * height);
1549 if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes,
1553 rowBytes = tmpRowBytes;
1554 buffer = tmpPixels.get();
1558 fp.reset(GrConfigConversionEffect::Create(texture,
1560 GrConfigConversionEffect::kNone_PMConversion,
1564 // Even if the client told us not to flush, we still flush here. The client may have known that
1565 // writes to the original surface caused no data hazards, but they can't know that the scratch
1566 // we just got is safe.
1567 if (texture->surfacePriv().hasPendingIO()) {
1570 if (!fGpu->writeTexturePixels(texture, 0, 0, width, height,
1571 writeConfig, buffer, rowBytes)) {
1576 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1578 // This function can be called in the midst of drawing another object (e.g., when uploading a
1579 // SW-rasterized clip while issuing a draw). So we push the current geometry state before
1580 // drawing a rect to the render target.
1581 // The bracket ensures we pop the stack if we wind up flushing below.
1583 GrDrawTarget* drawTarget = this->prepareToDraw();
1587 GrDrawTarget::AutoGeometryPush agp(drawTarget);
1589 GrPipelineBuilder pipelineBuilder;
1590 pipelineBuilder.addColorProcessor(fp);
1591 pipelineBuilder.setRenderTarget(renderTarget);
1592 drawTarget->drawSimpleRect(&pipelineBuilder,
1595 SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)));
1598 if (kFlushWrites_PixelOp & pixelOpsFlags) {
1599 this->flushSurfaceWrites(surface);
1605 // toggles between RGBA and BGRA
1606 static SkColorType toggle_colortype32(SkColorType ct) {
1607 if (kRGBA_8888_SkColorType == ct) {
1608 return kBGRA_8888_SkColorType;
1610 SkASSERT(kBGRA_8888_SkColorType == ct);
1611 return kRGBA_8888_SkColorType;
1615 bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1616 int left, int top, int width, int height,
1617 GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1619 RETURN_FALSE_IF_ABANDONED
1620 ASSERT_OWNED_RESOURCE(target);
1623 if (!(kDontFlush_PixelOpsFlag & flags) && target->surfacePriv().hasPendingWrite()) {
1627 // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1629 // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1630 // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1631 bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1632 width, height, dstConfig,
1634 // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1635 // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1636 // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1637 // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1639 GrPixelConfig readConfig = dstConfig;
1640 bool swapRAndB = false;
1641 if (GrPixelConfigSwapRAndB(dstConfig) ==
1642 fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1643 readConfig = GrPixelConfigSwapRAndB(readConfig);
1647 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1649 if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1650 // The unpremul flag is only allowed for these two configs.
1654 SkAutoTUnref<GrTexture> tempTexture;
1656 // If the src is a texture and we would have to do conversions after read pixels, we instead
1657 // do the conversions by drawing the src to a scratch texture. If we handle any of the
1658 // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1659 // on the read back pixels.
1660 GrTexture* src = target->asTexture();
1661 if (src && (swapRAndB || unpremul || flipY)) {
1662 // Make the scratch a render so we can read its pixels.
1664 desc.fFlags = kRenderTarget_GrSurfaceFlag;
1665 desc.fWidth = width;
1666 desc.fHeight = height;
1667 desc.fConfig = readConfig;
1668 desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1670 // When a full read back is faster than a partial we could always make the scratch exactly
1671 // match the passed rect. However, if we see many different size rectangles we will trash
1672 // our texture cache and pay the cost of creating and destroying many textures. So, we only
1673 // request an exact match when the caller is reading an entire RT.
1674 ScratchTexMatch match = kApprox_ScratchTexMatch;
1677 target->width() == width &&
1678 target->height() == height &&
1679 fGpu->fullReadPixelsIsFasterThanPartial()) {
1680 match = kExact_ScratchTexMatch;
1682 tempTexture.reset(this->refScratchTexture(desc, match));
1684 // compute a matrix to perform the draw
1685 SkMatrix textureMatrix;
1686 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1687 textureMatrix.postIDiv(src->width(), src->height());
1689 SkAutoTUnref<const GrFragmentProcessor> fp;
1691 fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1693 unpremul = false; // we no longer need to do this on CPU after the read back.
1696 // If we failed to create a PM->UPM effect and have no other conversions to perform then
1697 // there is no longer any point to using the scratch.
1698 if (fp || flipY || swapRAndB) {
1700 fp.reset(GrConfigConversionEffect::Create(
1701 src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion,
1704 swapRAndB = false; // we will handle the swap in the draw.
1706 // We protect the existing geometry here since it may not be
1707 // clear to the caller that a draw operation (i.e., drawSimpleRect)
1708 // can be invoked in this method
1710 GrDrawTarget::AutoGeometryPush agp(fDrawBuffer);
1711 GrPipelineBuilder pipelineBuilder;
1713 pipelineBuilder.addColorProcessor(fp);
1715 pipelineBuilder.setRenderTarget(tempTexture->asRenderTarget());
1716 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1717 fDrawBuffer->drawSimpleRect(&pipelineBuilder,
1721 // we want to read back from the scratch's origin
1724 target = tempTexture->asRenderTarget();
1726 this->flushSurfaceWrites(target);
1731 if (!fGpu->readPixels(target,
1732 left, top, width, height,
1733 readConfig, buffer, rowBytes)) {
1736 // Perform any conversions we weren't able to perform using a scratch texture.
1737 if (unpremul || swapRAndB) {
1738 SkDstPixelInfo dstPI;
1739 if (!GrPixelConfig2ColorAndProfileType(dstConfig, &dstPI.fColorType, NULL)) {
1742 dstPI.fAlphaType = kUnpremul_SkAlphaType;
1743 dstPI.fPixels = buffer;
1744 dstPI.fRowBytes = rowBytes;
1746 SkSrcPixelInfo srcPI;
1747 srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType;
1748 srcPI.fAlphaType = kPremul_SkAlphaType;
1749 srcPI.fPixels = buffer;
1750 srcPI.fRowBytes = rowBytes;
1752 return srcPI.convertPixelsTo(&dstPI, width, height);
1757 void GrContext::prepareSurfaceForExternalRead(GrSurface* surface) {
1760 ASSERT_OWNED_RESOURCE(surface);
1761 if (surface->surfacePriv().hasPendingIO()) {
1764 GrRenderTarget* rt = surface->asRenderTarget();
1766 fGpu->resolveRenderTarget(rt);
1770 void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) {
1772 SkASSERT(renderTarget);
1773 ASSERT_OWNED_RESOURCE(renderTarget);
1774 AutoCheckFlush acf(this);
1775 GrDrawTarget* target = this->prepareToDraw();
1776 if (NULL == target) {
1779 target->discard(renderTarget);
1782 void GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
1783 const SkIPoint& dstPoint, uint32_t pixelOpsFlags) {
1785 if (NULL == src || NULL == dst) {
1788 ASSERT_OWNED_RESOURCE(src);
1789 ASSERT_OWNED_RESOURCE(dst);
1791 // Since we're going to the draw target and not GPU, no need to check kNoFlush
1794 GrDrawTarget* target = this->prepareToDraw();
1795 if (NULL == target) {
1798 target->copySurface(dst, src, srcRect, dstPoint);
1800 if (kFlushWrites_PixelOp & pixelOpsFlags) {
1805 void GrContext::flushSurfaceWrites(GrSurface* surface) {
1807 if (surface->surfacePriv().hasPendingWrite()) {
1812 GrDrawTarget* GrContext::prepareToDraw(GrPipelineBuilder* pipelineBuilder,
1815 const GrPaint* paint,
1816 const AutoCheckFlush* acf) {
1817 if (NULL == fGpu || NULL == fDrawBuffer) {
1821 ASSERT_OWNED_RESOURCE(rt);
1822 SkASSERT(rt && paint && acf);
1823 pipelineBuilder->setFromPaint(*paint, rt, clip);
1827 GrDrawTarget* GrContext::prepareToDraw() {
1835 * This method finds a path renderer that can draw the specified path on
1836 * the provided target.
1837 * Due to its expense, the software path renderer has split out so it can
1838 * can be individually allowed/disallowed via the "allowSW" boolean.
1840 GrPathRenderer* GrContext::getPathRenderer(const GrDrawTarget* target,
1841 const GrPipelineBuilder* pipelineBuilder,
1842 const SkMatrix& viewMatrix,
1844 const SkStrokeRec& stroke,
1846 GrPathRendererChain::DrawType drawType,
1847 GrPathRendererChain::StencilSupport* stencilSupport) {
1849 if (NULL == fPathRendererChain) {
1850 fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1853 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(target,
1861 if (NULL == pr && allowSW) {
1862 if (NULL == fSoftwarePathRenderer) {
1863 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1865 pr = fSoftwarePathRenderer;
1871 ////////////////////////////////////////////////////////////////////////////////
1872 bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
1873 return fGpu->caps()->isConfigRenderable(config, withMSAA);
1876 int GrContext::getRecommendedSampleCount(GrPixelConfig config,
1877 SkScalar dpi) const {
1878 if (!this->isConfigRenderable(config, true)) {
1881 int chosenSampleCount = 0;
1882 if (fGpu->caps()->pathRenderingSupport()) {
1883 if (dpi >= 250.0f) {
1884 chosenSampleCount = 4;
1886 chosenSampleCount = 16;
1889 return chosenSampleCount <= fGpu->caps()->maxSampleCount() ?
1890 chosenSampleCount : 0;
1893 void GrContext::setupDrawBuffer() {
1894 SkASSERT(NULL == fDrawBuffer);
1895 SkASSERT(NULL == fDrawBufferVBAllocPool);
1896 SkASSERT(NULL == fDrawBufferIBAllocPool);
1898 fDrawBufferVBAllocPool =
1899 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1900 DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1901 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1902 fDrawBufferIBAllocPool =
1903 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1904 DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1905 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1907 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1908 fDrawBufferVBAllocPool,
1909 fDrawBufferIBAllocPool));
1912 GrDrawTarget* GrContext::getTextTarget() {
1913 return this->prepareToDraw();
1916 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1917 return fGpu->getQuadIndexBuffer();
1921 void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1922 GrConfigConversionEffect::PMConversion pmToUPM;
1923 GrConfigConversionEffect::PMConversion upmToPM;
1924 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1925 *pmToUPMValue = pmToUPM;
1926 *upmToPMValue = upmToPM;
1930 const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture,
1932 const SkMatrix& matrix) {
1933 if (!fDidTestPMConversions) {
1934 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1935 fDidTestPMConversions = true;
1937 GrConfigConversionEffect::PMConversion pmToUPM =
1938 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1939 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1940 return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1946 const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
1948 const SkMatrix& matrix) {
1949 if (!fDidTestPMConversions) {
1950 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1951 fDidTestPMConversions = true;
1953 GrConfigConversionEffect::PMConversion upmToPM =
1954 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1955 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1956 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1962 //////////////////////////////////////////////////////////////////////////////
1964 void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
1966 *maxTextures = fResourceCache->getMaxResourceCount();
1968 if (maxTextureBytes) {
1969 *maxTextureBytes = fResourceCache->getMaxResourceBytes();
1973 void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
1974 fResourceCache->setLimits(maxTextures, maxTextureBytes);
1977 void GrContext::addResourceToCache(const GrUniqueKey& key, GrGpuResource* resource) {
1978 ASSERT_OWNED_RESOURCE(resource);
1982 resource->resourcePriv().setUniqueKey(key);
1985 bool GrContext::isResourceInCache(const GrUniqueKey& key) const {
1986 return fResourceCache->hasUniqueKey(key);
1989 GrGpuResource* GrContext::findAndRefCachedResource(const GrUniqueKey& key) {
1990 return fResourceCache->findAndRefUniqueResource(key);
1993 //////////////////////////////////////////////////////////////////////////////
1995 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
1996 fGpu->addGpuTraceMarker(marker);
1998 fDrawBuffer->addGpuTraceMarker(marker);
2002 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
2003 fGpu->removeGpuTraceMarker(marker);
2005 fDrawBuffer->removeGpuTraceMarker(marker);