3 * Copyright 2011 Google Inc.
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
11 #include "effects/GrConfigConversionEffect.h"
12 #include "effects/GrDashingEffect.h"
13 #include "effects/GrSingleTextureEffect.h"
15 #include "GrAARectRenderer.h"
16 #include "GrBufferAllocPool.h"
18 #include "GrDistanceFieldTextContext.h"
19 #include "GrDrawTargetCaps.h"
20 #include "GrIndexBuffer.h"
21 #include "GrInOrderDrawBuffer.h"
22 #include "GrLayerCache.h"
23 #include "GrOvalRenderer.h"
24 #include "GrPathRenderer.h"
25 #include "GrPathUtils.h"
26 #include "GrResourceCache.h"
27 #include "GrResourceCache2.h"
28 #include "GrSoftwarePathRenderer.h"
29 #include "GrStencilBuffer.h"
30 #include "GrStencilAndCoverTextContext.h"
31 #include "GrStrokeInfo.h"
32 #include "GrSurfacePriv.h"
33 #include "GrTextStrike.h"
34 #include "GrTexturePriv.h"
35 #include "GrTraceMarker.h"
36 #include "GrTracing.h"
37 #include "SkDashPathPriv.h"
38 #include "SkConfig8888.h"
41 #include "SkStrokeRec.h"
44 #include "SkTraceEvent.h"
47 // change this to a 1 to see notifications when partial coverage fails
48 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
50 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
53 static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT;
54 static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024;
56 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
57 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
59 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
60 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
62 #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
64 // Glorified typedef to avoid including GrDrawState.h in GrContext.h
65 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
67 class GrContext::AutoCheckFlush {
69 AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(context); }
72 if (fContext->fFlushToReduceCacheSize) {
81 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext,
82 const Options* opts) {
85 context = SkNEW_ARGS(GrContext, (Options()));
87 context = SkNEW_ARGS(GrContext, (*opts));
90 if (context->init(backend, backendContext)) {
98 GrContext::GrContext(const Options& opts) : fOptions(opts) {
102 fPathRendererChain = NULL;
103 fSoftwarePathRenderer = NULL;
104 fResourceCache = NULL;
105 fResourceCache2 = NULL;
108 fDrawBufferVBAllocPool = NULL;
109 fDrawBufferIBAllocPool = NULL;
110 fFlushToReduceCacheSize = false;
111 fAARectRenderer = NULL;
112 fOvalRenderer = NULL;
114 fMaxTextureSizeOverride = 1 << 20;
117 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
118 SkASSERT(NULL == fGpu);
120 fGpu = GrGpu::Create(backend, backendContext, this);
128 void GrContext::initCommon() {
129 fDrawState = SkNEW(GrDrawState);
130 fGpu->setDrawState(fDrawState);
132 fResourceCache = SkNEW_ARGS(GrResourceCache, (fGpu->caps(),
133 MAX_RESOURCE_CACHE_COUNT,
134 MAX_RESOURCE_CACHE_BYTES));
135 fResourceCache->setOverbudgetCallback(OverbudgetCB, this);
136 fResourceCache2 = SkNEW(GrResourceCache2);
138 fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
140 fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this)));
142 fAARectRenderer = SkNEW_ARGS(GrAARectRenderer, (fGpu));
143 fOvalRenderer = SkNEW(GrOvalRenderer);
145 fDidTestPMConversions = false;
147 this->setupDrawBuffer();
150 GrContext::~GrContext() {
157 for (int i = 0; i < fCleanUpData.count(); ++i) {
158 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
161 SkDELETE(fResourceCache2);
162 fResourceCache2 = NULL;
163 SkDELETE(fResourceCache);
164 fResourceCache = NULL;
165 SkDELETE(fFontCache);
166 SkDELETE(fDrawBuffer);
167 SkDELETE(fDrawBufferVBAllocPool);
168 SkDELETE(fDrawBufferIBAllocPool);
170 fAARectRenderer->unref();
171 fOvalRenderer->unref();
174 SkSafeUnref(fPathRendererChain);
175 SkSafeUnref(fSoftwarePathRenderer);
179 void GrContext::abandonContext() {
180 // abandon first to so destructors
181 // don't try to free the resources in the API.
182 fResourceCache2->abandonAll();
184 fGpu->contextAbandoned();
186 // a path renderer may be holding onto resources that
188 SkSafeSetNull(fPathRendererChain);
189 SkSafeSetNull(fSoftwarePathRenderer);
194 delete fDrawBufferVBAllocPool;
195 fDrawBufferVBAllocPool = NULL;
197 delete fDrawBufferIBAllocPool;
198 fDrawBufferIBAllocPool = NULL;
200 fAARectRenderer->reset();
201 fOvalRenderer->reset();
203 fResourceCache->purgeAllUnlocked();
205 fFontCache->freeAll();
206 fLayerCache->freeAll();
209 void GrContext::resetContext(uint32_t state) {
210 fGpu->markContextDirty(state);
213 void GrContext::freeGpuResources() {
216 fGpu->purgeResources();
218 fDrawBuffer->purgeResources();
221 fAARectRenderer->reset();
222 fOvalRenderer->reset();
224 fResourceCache->purgeAllUnlocked();
225 fFontCache->freeAll();
226 fLayerCache->freeAll();
227 // a path renderer may be holding onto resources
228 SkSafeSetNull(fPathRendererChain);
229 SkSafeSetNull(fSoftwarePathRenderer);
232 void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
234 *resourceCount = fResourceCache->getCachedResourceCount();
237 *resourceBytes = fResourceCache->getCachedResourceBytes();
241 GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
242 const SkDeviceProperties&
244 bool enableDistanceFieldFonts) {
245 if (fGpu->caps()->pathRenderingSupport() && renderTarget->getStencilBuffer() &&
246 renderTarget->isMultisampled()) {
247 return GrStencilAndCoverTextContext::Create(this, leakyProperties);
250 return GrDistanceFieldTextContext::Create(this, leakyProperties, enableDistanceFieldFonts);
253 ////////////////////////////////////////////////////////////////////////////////
255 GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc,
256 const GrCacheID& cacheID,
257 const GrTextureParams* params) {
258 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
259 GrGpuResource* resource = fResourceCache->find(resourceKey);
262 return static_cast<GrSurface*>(resource)->asTexture();
268 bool GrContext::isTextureInCache(const GrSurfaceDesc& desc,
269 const GrCacheID& cacheID,
270 const GrTextureParams* params) const {
271 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
272 return fResourceCache->hasKey(resourceKey);
275 void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
276 ASSERT_OWNED_RESOURCE(sb);
278 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
281 fResourceCache->addResource(resourceKey, sb);
284 GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
286 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
289 GrGpuResource* resource = fResourceCache->find(resourceKey);
290 return static_cast<GrStencilBuffer*>(resource);
293 static void stretch_image(void* dst,
300 SkFixed dx = (srcW << 16) / dstW;
301 SkFixed dy = (srcH << 16) / dstH;
305 size_t dstXLimit = dstW*bpp;
306 for (int j = 0; j < dstH; ++j) {
308 const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)*srcW*bpp;
309 uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp;
310 for (size_t i = 0; i < dstXLimit; i += bpp) {
311 memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp);
320 // position + local coordinate
321 extern const GrVertexAttrib gVertexAttribs[] = {
322 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
323 {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding}
328 // The desired texture is NPOT and tiled but that isn't supported by
329 // the current hardware. Resize the texture to be a POT
330 GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc,
331 const GrCacheID& cacheID,
335 SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
336 if (NULL == clampedTexture) {
337 clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
339 if (NULL == clampedTexture) {
344 GrSurfaceDesc rtDesc = desc;
345 rtDesc.fFlags = rtDesc.fFlags |
346 kRenderTarget_GrSurfaceFlag |
347 kNoStencil_GrSurfaceFlag;
348 rtDesc.fWidth = GrNextPow2(desc.fWidth);
349 rtDesc.fHeight = GrNextPow2(desc.fHeight);
351 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
354 GrDrawTarget::AutoStateRestore asr(fDrawBuffer, GrDrawTarget::kReset_ASRInit);
355 GrDrawState* drawState = fDrawBuffer->drawState();
356 drawState->setRenderTarget(texture->asRenderTarget());
358 // if filtering is not desired then we want to ensure all
359 // texels in the resampled image are copies of texels from
361 GrTextureParams params(SkShader::kClamp_TileMode,
362 filter ? GrTextureParams::kBilerp_FilterMode :
363 GrTextureParams::kNone_FilterMode);
364 drawState->addColorTextureProcessor(clampedTexture, SkMatrix::I(), params);
366 drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs),
367 2 * sizeof(SkPoint));
369 GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, 0);
371 if (arg.succeeded()) {
372 SkPoint* verts = (SkPoint*) arg.vertices();
373 verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
374 verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
375 fDrawBuffer->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
378 // TODO: Our CPU stretch doesn't filter. But we create separate
379 // stretched textures when the texture params is either filtered or
380 // not. Either implement filtered stretch blit on CPU or just create
381 // one when FBO case fails.
383 rtDesc.fFlags = kNone_GrSurfaceFlags;
384 // no longer need to clamp at min RT size.
385 rtDesc.fWidth = GrNextPow2(desc.fWidth);
386 rtDesc.fHeight = GrNextPow2(desc.fHeight);
388 // We shouldn't be resizing a compressed texture.
389 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
391 size_t bpp = GrBytesPerPixel(desc.fConfig);
392 GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
393 stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
394 srcData, desc.fWidth, desc.fHeight, bpp);
396 size_t stretchedRowBytes = rtDesc.fWidth * bpp;
398 texture = fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes);
405 GrTexture* GrContext::createTexture(const GrTextureParams* params,
406 const GrSurfaceDesc& desc,
407 const GrCacheID& cacheID,
410 GrResourceKey* cacheKey) {
411 GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
414 if (GrTexturePriv::NeedsResizing(resourceKey)) {
415 // We do not know how to resize compressed textures.
416 SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
418 texture = this->createResizedTexture(desc, cacheID,
420 GrTexturePriv::NeedsBilerp(resourceKey));
422 texture = fGpu->createTexture(desc, srcData, rowBytes);
426 fResourceCache->addResource(resourceKey, texture);
429 *cacheKey = resourceKey;
436 GrTexture* GrContext::createNewScratchTexture(const GrSurfaceDesc& desc) {
437 GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
441 fResourceCache->addResource(texture->getScratchKey(), texture);
445 GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexMatch match,
446 bool calledDuringFlush) {
447 // kNoStencil has no meaning if kRT isn't set.
448 SkASSERT((inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
449 !(inDesc.fFlags & kNoStencil_GrSurfaceFlag));
451 // Make sure caller has checked for renderability if kRT is set.
452 SkASSERT(!(inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
453 this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0));
455 SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc);
457 if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
458 GrSurfaceFlags origFlags = desc->fFlags;
459 if (kApprox_ScratchTexMatch == match) {
460 // bin by pow2 with a reasonable min
461 static const int MIN_SIZE = 16;
462 GrSurfaceDesc* wdesc = desc.writable();
463 wdesc->fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth));
464 wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight));
468 GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc);
469 uint32_t scratchFlags = 0;
470 if (calledDuringFlush) {
471 scratchFlags = GrResourceCache2::kRequireNoPendingIO_ScratchFlag;
472 } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
473 // If it is not a render target then it will most likely be populated by
474 // writePixels() which will trigger a flush if the texture has pending IO.
475 scratchFlags = GrResourceCache2::kPreferNoPendingIO_ScratchFlag;
477 GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key, scratchFlags);
479 fResourceCache->makeResourceMRU(resource);
480 return static_cast<GrSurface*>(resource)->asTexture();
483 if (kExact_ScratchTexMatch == match) {
486 // We had a cache miss and we are in approx mode, relax the fit of the flags.
488 // We no longer try to reuse textures that were previously used as render targets in
489 // situations where no RT is needed; doing otherwise can confuse the video driver and
490 // cause significant performance problems in some cases.
491 if (desc->fFlags & kNoStencil_GrSurfaceFlag) {
492 desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrSurfaceFlag;
499 desc.writable()->fFlags = origFlags;
502 GrTexture* texture = this->createNewScratchTexture(*desc);
503 SkASSERT(NULL == texture ||
504 texture->getScratchKey() == GrTexturePriv::ComputeScratchKey(*desc));
508 bool GrContext::OverbudgetCB(void* data) {
511 GrContext* context = reinterpret_cast<GrContext*>(data);
513 // Flush the InOrderDrawBuffer to possibly free up some textures
514 context->fFlushToReduceCacheSize = true;
520 GrTexture* GrContext::createUncachedTexture(const GrSurfaceDesc& descIn,
523 GrSurfaceDesc descCopy = descIn;
524 return fGpu->createTexture(descCopy, srcData, rowBytes);
527 void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
528 fResourceCache->getLimits(maxTextures, maxTextureBytes);
531 void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
532 fResourceCache->setLimits(maxTextures, maxTextureBytes);
535 int GrContext::getMaxTextureSize() const {
536 return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
539 int GrContext::getMaxRenderTargetSize() const {
540 return fGpu->caps()->maxRenderTargetSize();
543 int GrContext::getMaxSampleCount() const {
544 return fGpu->caps()->maxSampleCount();
547 ///////////////////////////////////////////////////////////////////////////////
549 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
550 return fGpu->wrapBackendTexture(desc);
553 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
554 return fGpu->wrapBackendRenderTarget(desc);
557 ///////////////////////////////////////////////////////////////////////////////
559 bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
560 int width, int height) const {
561 const GrDrawTargetCaps* caps = fGpu->caps();
562 if (!caps->isConfigTexturable(kIndex_8_GrPixelConfig)) {
566 bool isPow2 = SkIsPow2(width) && SkIsPow2(height);
569 bool tiled = params && params->isTiled();
570 if (tiled && !caps->npotTextureTileSupport()) {
578 ////////////////////////////////////////////////////////////////////////////////
580 void GrContext::clear(const SkIRect* rect,
583 GrRenderTarget* renderTarget) {
584 ASSERT_OWNED_RESOURCE(renderTarget);
585 SkASSERT(renderTarget);
587 AutoRestoreEffects are;
588 AutoCheckFlush acf(this);
589 GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this);
590 GrDrawTarget* target = this->prepareToDraw(NULL, &are, &acf);
591 if (NULL == target) {
594 target->clear(rect, color, canIgnoreRect, renderTarget);
597 void GrContext::drawPaint(const GrPaint& origPaint) {
598 // set rect to be big enough to fill the space, but not super-huge, so we
599 // don't overflow fixed-point implementations
602 SkIntToScalar(getRenderTarget()->width()),
603 SkIntToScalar(getRenderTarget()->height()));
605 SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
607 GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::drawPaint", this);
609 // We attempt to map r by the inverse matrix and draw that. mapRect will
610 // map the four corners and bound them with a new rect. This will not
611 // produce a correct result for some perspective matrices.
612 if (!this->getMatrix().hasPerspective()) {
613 if (!fViewMatrix.invert(&inverse)) {
614 SkDebugf("Could not invert matrix\n");
619 if (!am.setIdentity(this, paint.writable())) {
620 SkDebugf("Could not invert matrix\n");
624 // by definition this fills the entire clip, no need for AA
625 if (paint->isAntiAlias()) {
626 paint.writable()->setAntiAlias(false);
628 this->drawRect(*paint, r);
632 void GrContext::dumpFontCache() const {
637 ////////////////////////////////////////////////////////////////////////////////
639 /* create a triangle strip that strokes the specified triangle. There are 8
640 unique vertices, but we repreat the last 2 to close up. Alternatively we
641 could use an indices array, and then only send 8 verts, but not sure that
644 static void setStrokeRectStrip(SkPoint verts[10], SkRect rect,
646 const SkScalar rad = SkScalarHalf(width);
649 verts[0].set(rect.fLeft + rad, rect.fTop + rad);
650 verts[1].set(rect.fLeft - rad, rect.fTop - rad);
651 verts[2].set(rect.fRight - rad, rect.fTop + rad);
652 verts[3].set(rect.fRight + rad, rect.fTop - rad);
653 verts[4].set(rect.fRight - rad, rect.fBottom - rad);
654 verts[5].set(rect.fRight + rad, rect.fBottom + rad);
655 verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
656 verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
661 static inline bool is_irect(const SkRect& r) {
662 return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) &&
663 SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
666 static bool apply_aa_to_rect(GrDrawTarget* target,
668 SkScalar strokeWidth,
669 const SkMatrix& combinedMatrix,
670 SkRect* devBoundRect) {
671 if (!target->getDrawState().canTweakAlphaForCoverage() &&
672 target->shouldDisableCoverageAAForBlend()) {
674 //SkDebugf("Turning off AA to correctly apply blend.\n");
678 const GrDrawState& drawState = target->getDrawState();
679 if (drawState.getRenderTarget()->isMultisampled()) {
683 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
684 if (strokeWidth >= 0) {
686 if (!combinedMatrix.preservesAxisAlignment()) {
690 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
692 if (!combinedMatrix.preservesRightAngles()) {
698 combinedMatrix.mapRect(devBoundRect, rect);
699 if (strokeWidth < 0) {
700 return !is_irect(*devBoundRect);
706 static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
707 return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
708 point.fY >= rect.fTop && point.fY <= rect.fBottom;
711 void GrContext::drawRect(const GrPaint& paint,
713 const GrStrokeInfo* strokeInfo) {
714 if (strokeInfo && strokeInfo->isDashed()) {
717 this->drawPath(paint, path, *strokeInfo);
721 AutoRestoreEffects are;
722 AutoCheckFlush acf(this);
723 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
724 if (NULL == target) {
728 GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
729 SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth();
730 SkMatrix matrix = target->drawState()->getViewMatrix();
732 // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
733 // cases where the RT is fully inside a stroke.
736 target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
737 SkRect clipSpaceRTRect = rtRect;
738 bool checkClip = false;
739 if (this->getClip()) {
741 clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
742 SkIntToScalar(this->getClip()->fOrigin.fY));
744 // Does the clip contain the entire RT?
745 if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
747 if (!matrix.invert(&invM)) {
750 // Does the rect bound the RT?
751 SkPoint srcSpaceRTQuad[4];
752 invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
753 if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
754 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
755 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
756 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
759 if (paint.isOpaqueAndConstantColor(&clearColor)) {
760 target->clear(NULL, clearColor, true, fRenderTarget);
768 bool needAA = paint.isAntiAlias() &&
769 !target->getDrawState().getRenderTarget()->isMultisampled();
770 bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, &devBoundRect);
772 const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec();
775 GrDrawState::AutoViewMatrixRestore avmr;
776 if (!avmr.setIdentity(target->drawState())) {
780 fAARectRenderer->strokeAARect(target, rect,
781 matrix, devBoundRect,
785 fAARectRenderer->fillAARect(target,
786 rect, matrix, devBoundRect);
792 // TODO: consider making static vertex buffers for these cases.
793 // Hairline could be done by just adding closing vertex to
794 // unitSquareVertexBuffer()
796 static const int worstCaseVertCount = 10;
797 target->drawState()->setDefaultVertexAttribs();
798 GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
800 if (!geo.succeeded()) {
801 SkDebugf("Failed to get space for vertices!\n");
805 GrPrimitiveType primType;
807 SkPoint* vertex = geo.positions();
811 primType = kTriangleStrip_GrPrimitiveType;
812 setStrokeRectStrip(vertex, rect, width);
816 primType = kLineStrip_GrPrimitiveType;
817 vertex[0].set(rect.fLeft, rect.fTop);
818 vertex[1].set(rect.fRight, rect.fTop);
819 vertex[2].set(rect.fRight, rect.fBottom);
820 vertex[3].set(rect.fLeft, rect.fBottom);
821 vertex[4].set(rect.fLeft, rect.fTop);
824 target->drawNonIndexed(primType, 0, vertCount);
827 target->drawSimpleRect(rect);
831 void GrContext::drawRectToRect(const GrPaint& paint,
832 const SkRect& dstRect,
833 const SkRect& localRect,
834 const SkMatrix* localMatrix) {
835 AutoRestoreEffects are;
836 AutoCheckFlush acf(this);
837 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
838 if (NULL == target) {
842 GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
844 target->drawRect(dstRect, &localRect, localMatrix);
849 extern const GrVertexAttrib gPosUVColorAttribs[] = {
850 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding },
851 {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding },
852 {kVec4ub_GrVertexAttribType, 2*sizeof(SkPoint), kColor_GrVertexAttribBinding}
855 static const size_t kPosUVAttribsSize = 2 * sizeof(SkPoint);
856 static const size_t kPosUVColorAttribsSize = 2 * sizeof(SkPoint) + sizeof(GrColor);
858 extern const GrVertexAttrib gPosColorAttribs[] = {
859 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
860 {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
863 static const size_t kPosAttribsSize = sizeof(SkPoint);
864 static const size_t kPosColorAttribsSize = sizeof(SkPoint) + sizeof(GrColor);
866 static void set_vertex_attributes(GrDrawState* drawState,
867 const SkPoint* texCoords,
868 const GrColor* colors,
874 if (texCoords && colors) {
875 *texOffset = sizeof(SkPoint);
876 *colorOffset = 2*sizeof(SkPoint);
877 drawState->setVertexAttribs<gPosUVColorAttribs>(3, kPosUVColorAttribsSize);
878 } else if (texCoords) {
879 *texOffset = sizeof(SkPoint);
880 drawState->setVertexAttribs<gPosUVColorAttribs>(2, kPosUVAttribsSize);
882 *colorOffset = sizeof(SkPoint);
883 drawState->setVertexAttribs<gPosColorAttribs>(2, kPosColorAttribsSize);
885 drawState->setVertexAttribs<gPosColorAttribs>(1, kPosAttribsSize);
891 void GrContext::drawVertices(const GrPaint& paint,
892 GrPrimitiveType primitiveType,
894 const SkPoint positions[],
895 const SkPoint texCoords[],
896 const GrColor colors[],
897 const uint16_t indices[],
899 AutoRestoreEffects are;
900 AutoCheckFlush acf(this);
901 GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
903 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
904 if (NULL == target) {
907 GrDrawState* drawState = target->drawState();
909 GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
911 int colorOffset = -1, texOffset = -1;
912 set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
914 size_t VertexStride = drawState->getVertexStride();
915 if (!geo.set(target, vertexCount, indexCount)) {
916 SkDebugf("Failed to get space for vertices!\n");
919 void* curVertex = geo.vertices();
921 for (int i = 0; i < vertexCount; ++i) {
922 *((SkPoint*)curVertex) = positions[i];
924 if (texOffset >= 0) {
925 *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
927 if (colorOffset >= 0) {
928 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
930 curVertex = (void*)((intptr_t)curVertex + VertexStride);
933 // we don't currently apply offscreen AA to this path. Need improved
934 // management of GrDrawTarget's geometry to avoid copying points per-tile.
936 uint16_t* curIndex = (uint16_t*)geo.indices();
937 for (int i = 0; i < indexCount; ++i) {
938 curIndex[i] = indices[i];
940 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
942 target->drawNonIndexed(primitiveType, 0, vertexCount);
946 ///////////////////////////////////////////////////////////////////////////////
948 void GrContext::drawRRect(const GrPaint& paint,
949 const SkRRect& rrect,
950 const GrStrokeInfo& strokeInfo) {
951 if (rrect.isEmpty()) {
955 if (strokeInfo.isDashed()) {
957 path.addRRect(rrect);
958 this->drawPath(paint, path, strokeInfo);
962 AutoRestoreEffects are;
963 AutoCheckFlush acf(this);
964 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
965 if (NULL == target) {
969 GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
971 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
973 if (!fOvalRenderer->drawRRect(target, this, paint.isAntiAlias(), rrect, strokeRec)) {
975 path.addRRect(rrect);
976 this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
980 ///////////////////////////////////////////////////////////////////////////////
982 void GrContext::drawDRRect(const GrPaint& paint,
983 const SkRRect& outer,
984 const SkRRect& inner) {
985 if (outer.isEmpty()) {
989 AutoRestoreEffects are;
990 AutoCheckFlush acf(this);
991 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
993 GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
995 if (!fOvalRenderer->drawDRRect(target, this, paint.isAntiAlias(), outer, inner)) {
997 path.addRRect(inner);
998 path.addRRect(outer);
999 path.setFillType(SkPath::kEvenOdd_FillType);
1001 GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle);
1002 this->internalDrawPath(target, paint.isAntiAlias(), path, fillRec);
1006 ///////////////////////////////////////////////////////////////////////////////
1008 void GrContext::drawOval(const GrPaint& paint,
1010 const GrStrokeInfo& strokeInfo) {
1011 if (oval.isEmpty()) {
1015 if (strokeInfo.isDashed()) {
1018 this->drawPath(paint, path, strokeInfo);
1022 AutoRestoreEffects are;
1023 AutoCheckFlush acf(this);
1024 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
1025 if (NULL == target) {
1029 GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
1031 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1034 if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, strokeRec)) {
1037 this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1041 // Can 'path' be drawn as a pair of filled nested rectangles?
1042 static bool is_nested_rects(GrDrawTarget* target,
1044 const SkStrokeRec& stroke,
1046 SkASSERT(stroke.isFillStyle());
1048 if (path.isInverseFillType()) {
1052 const GrDrawState& drawState = target->getDrawState();
1054 // TODO: this restriction could be lifted if we were willing to apply
1055 // the matrix to all the points individually rather than just to the rect
1056 if (!drawState.getViewMatrix().preservesAxisAlignment()) {
1060 if (!target->getDrawState().canTweakAlphaForCoverage() &&
1061 target->shouldDisableCoverageAAForBlend()) {
1065 SkPath::Direction dirs[2];
1066 if (!path.isNestedRects(rects, dirs)) {
1070 if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
1071 // The two rects need to be wound opposite to each other
1075 // Right now, nested rects where the margin is not the same width
1076 // all around do not render correctly
1077 const SkScalar* outer = rects[0].asScalars();
1078 const SkScalar* inner = rects[1].asScalars();
1082 SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
1083 bool allGoE1 = margin >= SK_Scalar1;
1085 for (int i = 1; i < 4; ++i) {
1086 SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
1087 if (temp < SK_Scalar1) {
1090 if (!SkScalarNearlyEqual(margin, temp)) {
1095 return allEq || allGoE1;
1098 void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const GrStrokeInfo& strokeInfo) {
1100 if (path.isEmpty()) {
1101 if (path.isInverseFillType()) {
1102 this->drawPaint(paint);
1107 if (strokeInfo.isDashed()) {
1109 if (path.isLine(pts)) {
1110 AutoRestoreEffects are;
1111 AutoCheckFlush acf(this);
1112 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
1113 if (NULL == target) {
1116 GrDrawState* drawState = target->drawState();
1118 SkMatrix origViewMatrix = drawState->getViewMatrix();
1119 GrDrawState::AutoViewMatrixRestore avmr;
1120 if (avmr.setIdentity(target->drawState())) {
1121 if (GrDashingEffect::DrawDashLine(pts, paint, strokeInfo, fGpu, target,
1128 // Filter dashed path into new path with the dashing applied
1129 const SkPathEffect::DashInfo& info = strokeInfo.getDashInfo();
1130 SkTLazy<SkPath> effectPath;
1131 GrStrokeInfo newStrokeInfo(strokeInfo, false);
1132 SkStrokeRec* stroke = newStrokeInfo.getStrokeRecPtr();
1133 if (SkDashPath::FilterDashPath(effectPath.init(), path, stroke, NULL, info)) {
1134 this->drawPath(paint, *effectPath.get(), newStrokeInfo);
1138 this->drawPath(paint, path, newStrokeInfo);
1142 // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1143 // Scratch textures can be recycled after they are returned to the texture
1144 // cache. This presents a potential hazard for buffered drawing. However,
1145 // the writePixels that uploads to the scratch will perform a flush so we're
1147 AutoRestoreEffects are;
1148 AutoCheckFlush acf(this);
1149 GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
1150 if (NULL == target) {
1153 GrDrawState* drawState = target->drawState();
1155 GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex());
1157 const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1159 bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled();
1161 if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) {
1162 // Concave AA paths are expensive - try to avoid them for special cases
1165 if (is_nested_rects(target, path, strokeRec, rects)) {
1166 SkMatrix origViewMatrix = drawState->getViewMatrix();
1167 GrDrawState::AutoViewMatrixRestore avmr;
1168 if (!avmr.setIdentity(target->drawState())) {
1172 fAARectRenderer->fillAANestedRects(target, rects, origViewMatrix);
1178 bool isOval = path.isOval(&ovalRect);
1180 if (!isOval || path.isInverseFillType()
1181 || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, strokeRec)) {
1182 this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1186 void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
1187 const GrStrokeInfo& strokeInfo) {
1188 SkASSERT(!path.isEmpty());
1190 GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target);
1193 // An Assumption here is that path renderer would use some form of tweaking
1194 // the src color (either the input alpha or in the frag shader) to implement
1195 // aa. If we have some future driver-mojo path AA that can do the right
1196 // thing WRT to the blend then we'll need some query on the PR.
1197 bool useCoverageAA = useAA &&
1198 !target->getDrawState().getRenderTarget()->isMultisampled() &&
1199 !target->shouldDisableCoverageAAForBlend();
1202 GrPathRendererChain::DrawType type =
1203 useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1204 GrPathRendererChain::kColor_DrawType;
1206 const SkPath* pathPtr = &path;
1207 SkTLazy<SkPath> tmpPath;
1208 SkTCopyOnFirstWrite<SkStrokeRec> stroke(strokeInfo.getStrokeRec());
1210 // Try a 1st time without stroking the path and without allowing the SW renderer
1211 GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type);
1214 if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) {
1215 // It didn't work the 1st time, so try again with the stroked path
1216 if (stroke->applyToPath(tmpPath.init(), *pathPtr)) {
1217 pathPtr = tmpPath.get();
1218 stroke.writable()->setFillStyle();
1219 if (pathPtr->isEmpty()) {
1225 // This time, allow SW renderer
1226 pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type);
1231 SkDebugf("Unable to find path renderer compatible with path.\n");
1236 pr->drawPath(*pathPtr, *stroke, target, useCoverageAA);
1239 ////////////////////////////////////////////////////////////////////////////////
1241 void GrContext::flush(int flagsBitfield) {
1242 if (NULL == fDrawBuffer) {
1246 if (kDiscard_FlushBit & flagsBitfield) {
1247 fDrawBuffer->reset();
1249 fDrawBuffer->flush();
1251 fResourceCache->purgeAsNeeded();
1252 fFlushToReduceCacheSize = false;
1255 bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes,
1256 const void* inPixels, size_t outRowBytes, void* outPixels) {
1257 SkSrcPixelInfo srcPI;
1258 if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
1261 srcPI.fAlphaType = kUnpremul_SkAlphaType;
1262 srcPI.fPixels = inPixels;
1263 srcPI.fRowBytes = inRowBytes;
1265 SkDstPixelInfo dstPI;
1266 dstPI.fColorType = srcPI.fColorType;
1267 dstPI.fAlphaType = kPremul_SkAlphaType;
1268 dstPI.fPixels = outPixels;
1269 dstPI.fRowBytes = outRowBytes;
1271 return srcPI.convertPixelsTo(&dstPI, width, height);
1274 bool GrContext::writeSurfacePixels(GrSurface* surface,
1275 int left, int top, int width, int height,
1276 GrPixelConfig srcConfig, const void* buffer, size_t rowBytes,
1277 uint32_t pixelOpsFlags) {
1280 GrTexture* texture = NULL;
1281 if (!(kUnpremul_PixelOpsFlag & pixelOpsFlags) && (texture = surface->asTexture()) &&
1282 fGpu->canWriteTexturePixels(texture, srcConfig)) {
1284 if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) &&
1285 surface->surfacePriv().hasPendingIO()) {
1288 return fGpu->writeTexturePixels(texture, left, top, width, height,
1289 srcConfig, buffer, rowBytes);
1290 // Don't need to check kFlushWrites_PixelOp here, we just did a direct write so the
1291 // upload is already flushed.
1295 // If we didn't do a direct texture write then we upload the pixels to a texture and draw.
1296 GrRenderTarget* renderTarget = surface->asRenderTarget();
1297 if (NULL == renderTarget) {
1301 // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1302 // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1303 // config. This scratch will then have R and B swapped. We correct for this by swapping again
1304 // when drawing the scratch to the dst using a conversion effect.
1305 bool swapRAndB = false;
1306 GrPixelConfig writeConfig = srcConfig;
1307 if (GrPixelConfigSwapRAndB(srcConfig) ==
1308 fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) {
1309 writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1314 desc.fWidth = width;
1315 desc.fHeight = height;
1316 desc.fConfig = writeConfig;
1317 SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, kApprox_ScratchTexMatch));
1322 SkAutoTUnref<const GrFragmentProcessor> fp;
1323 SkMatrix textureMatrix;
1324 textureMatrix.setIDiv(texture->width(), texture->height());
1326 // allocate a tmp buffer and sw convert the pixels to premul
1327 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1329 if (kUnpremul_PixelOpsFlag & pixelOpsFlags) {
1330 if (!GrPixelConfigIs8888(srcConfig)) {
1333 fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1334 // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1336 size_t tmpRowBytes = 4 * width;
1337 tmpPixels.reset(width * height);
1338 if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes,
1342 rowBytes = tmpRowBytes;
1343 buffer = tmpPixels.get();
1347 fp.reset(GrConfigConversionEffect::Create(texture,
1349 GrConfigConversionEffect::kNone_PMConversion,
1353 // Even if the client told us not to flush, we still flush here. The client may have known that
1354 // writes to the original surface caused no data hazards, but they can't know that the scratch
1355 // we just got is safe.
1356 if (texture->surfacePriv().hasPendingIO()) {
1359 if (!fGpu->writeTexturePixels(texture, 0, 0, width, height,
1360 writeConfig, buffer, rowBytes)) {
1365 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1367 // This function can be called in the midst of drawing another object (e.g., when uploading a
1368 // SW-rasterized clip while issuing a draw). So we push the current geometry state before
1369 // drawing a rect to the render target.
1370 // The bracket ensures we pop the stack if we wind up flushing below.
1372 GrDrawTarget* drawTarget = this->prepareToDraw(NULL, NULL, NULL);
1373 GrDrawTarget::AutoGeometryAndStatePush agasp(drawTarget, GrDrawTarget::kReset_ASRInit,
1375 GrDrawState* drawState = drawTarget->drawState();
1376 drawState->addColorProcessor(fp);
1377 drawState->setRenderTarget(renderTarget);
1378 drawState->disableState(GrDrawState::kClip_StateBit);
1379 drawTarget->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)));
1382 if (kFlushWrites_PixelOp & pixelOpsFlags) {
1383 this->flushSurfaceWrites(surface);
1389 // toggles between RGBA and BGRA
1390 static SkColorType toggle_colortype32(SkColorType ct) {
1391 if (kRGBA_8888_SkColorType == ct) {
1392 return kBGRA_8888_SkColorType;
1394 SkASSERT(kBGRA_8888_SkColorType == ct);
1395 return kRGBA_8888_SkColorType;
1399 bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1400 int left, int top, int width, int height,
1401 GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1403 ASSERT_OWNED_RESOURCE(target);
1406 if (!(kDontFlush_PixelOpsFlag & flags) && target->surfacePriv().hasPendingWrite()) {
1410 // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1412 // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1413 // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1414 bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1415 width, height, dstConfig,
1417 // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1418 // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1419 // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1420 // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1422 GrPixelConfig readConfig = dstConfig;
1423 bool swapRAndB = false;
1424 if (GrPixelConfigSwapRAndB(dstConfig) ==
1425 fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1426 readConfig = GrPixelConfigSwapRAndB(readConfig);
1430 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1432 if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1433 // The unpremul flag is only allowed for these two configs.
1437 // If the src is a texture and we would have to do conversions after read pixels, we instead
1438 // do the conversions by drawing the src to a scratch texture. If we handle any of the
1439 // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1440 // on the read back pixels.
1441 GrTexture* src = target->asTexture();
1442 if (src && (swapRAndB || unpremul || flipY)) {
1443 // Make the scratch a render so we can read its pixels.
1445 desc.fFlags = kRenderTarget_GrSurfaceFlag;
1446 desc.fWidth = width;
1447 desc.fHeight = height;
1448 desc.fConfig = readConfig;
1449 desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1451 // When a full read back is faster than a partial we could always make the scratch exactly
1452 // match the passed rect. However, if we see many different size rectangles we will trash
1453 // our texture cache and pay the cost of creating and destroying many textures. So, we only
1454 // request an exact match when the caller is reading an entire RT.
1455 ScratchTexMatch match = kApprox_ScratchTexMatch;
1458 target->width() == width &&
1459 target->height() == height &&
1460 fGpu->fullReadPixelsIsFasterThanPartial()) {
1461 match = kExact_ScratchTexMatch;
1463 SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, match));
1465 // compute a matrix to perform the draw
1466 SkMatrix textureMatrix;
1467 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1468 textureMatrix.postIDiv(src->width(), src->height());
1470 SkAutoTUnref<const GrFragmentProcessor> fp;
1472 fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1474 unpremul = false; // we no longer need to do this on CPU after the read back.
1477 // If we failed to create a PM->UPM effect and have no other conversions to perform then
1478 // there is no longer any point to using the scratch.
1479 if (fp || flipY || swapRAndB) {
1481 fp.reset(GrConfigConversionEffect::Create(
1482 src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion,
1485 swapRAndB = false; // we will handle the swap in the draw.
1487 // We protect the existing geometry here since it may not be
1488 // clear to the caller that a draw operation (i.e., drawSimpleRect)
1489 // can be invoked in this method
1491 GrDrawTarget::AutoGeometryAndStatePush agasp(fDrawBuffer,
1492 GrDrawTarget::kReset_ASRInit);
1493 GrDrawState* drawState = fDrawBuffer->drawState();
1495 drawState->addColorProcessor(fp);
1497 drawState->setRenderTarget(texture->asRenderTarget());
1498 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1499 fDrawBuffer->drawSimpleRect(rect);
1500 // we want to read back from the scratch's origin
1503 target = texture->asRenderTarget();
1505 this->flushSurfaceWrites(target);
1510 if (!fGpu->readPixels(target,
1511 left, top, width, height,
1512 readConfig, buffer, rowBytes)) {
1515 // Perform any conversions we weren't able to perform using a scratch texture.
1516 if (unpremul || swapRAndB) {
1517 SkDstPixelInfo dstPI;
1518 if (!GrPixelConfig2ColorType(dstConfig, &dstPI.fColorType)) {
1521 dstPI.fAlphaType = kUnpremul_SkAlphaType;
1522 dstPI.fPixels = buffer;
1523 dstPI.fRowBytes = rowBytes;
1525 SkSrcPixelInfo srcPI;
1526 srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType;
1527 srcPI.fAlphaType = kPremul_SkAlphaType;
1528 srcPI.fPixels = buffer;
1529 srcPI.fRowBytes = rowBytes;
1531 return srcPI.convertPixelsTo(&dstPI, width, height);
1536 void GrContext::prepareSurfaceForExternalRead(GrSurface* surface) {
1538 ASSERT_OWNED_RESOURCE(surface);
1539 if (surface->surfacePriv().hasPendingIO()) {
1542 GrRenderTarget* rt = surface->asRenderTarget();
1544 fGpu->resolveRenderTarget(rt);
1548 void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) {
1549 SkASSERT(renderTarget);
1550 ASSERT_OWNED_RESOURCE(renderTarget);
1551 AutoRestoreEffects are;
1552 AutoCheckFlush acf(this);
1553 GrDrawTarget* target = this->prepareToDraw(NULL, &are, &acf);
1554 if (NULL == target) {
1557 target->discard(renderTarget);
1560 void GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
1561 const SkIPoint& dstPoint, uint32_t pixelOpsFlags) {
1562 if (NULL == src || NULL == dst) {
1565 ASSERT_OWNED_RESOURCE(src);
1566 ASSERT_OWNED_RESOURCE(dst);
1568 // Since we're going to the draw target and not GPU, no need to check kNoFlush
1571 GrDrawTarget* target = this->prepareToDraw(NULL, NULL, NULL);
1572 if (NULL == target) {
1575 target->copySurface(dst, src, srcRect, dstPoint);
1577 if (kFlushWrites_PixelOp & pixelOpsFlags) {
1582 void GrContext::flushSurfaceWrites(GrSurface* surface) {
1583 if (surface->surfacePriv().hasPendingWrite()) {
1588 ////////////////////////////////////////////////////////////////////////////////
1590 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1591 AutoRestoreEffects* are,
1592 AutoCheckFlush* acf) {
1593 // All users of this draw state should be freeing up all effects when they're done.
1594 // Otherwise effects that own resources may keep those resources alive indefinitely.
1595 SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages() &&
1596 !fDrawState->hasGeometryProcessor());
1602 ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1606 are->set(fDrawState);
1607 fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1608 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1609 if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1610 !fDrawState->couldApplyCoverage(fGpu->caps())) {
1611 SkDebugf("Partial pixel coverage will be incorrectly blended.\n");
1614 // Clear any vertex attributes configured for the previous use of the
1615 // GrDrawState which can effect which blend optimizations are in effect.
1616 fDrawState->setDefaultVertexAttribs();
1618 fDrawState->reset(fViewMatrix);
1619 fDrawState->setRenderTarget(fRenderTarget.get());
1621 fDrawState->setState(GrDrawState::kClip_StateBit, fClip &&
1622 !fClip->fClipStack->isWideOpen());
1623 fDrawBuffer->setClip(fClip);
1624 SkASSERT(fDrawState == fDrawBuffer->drawState());
1629 * This method finds a path renderer that can draw the specified path on
1630 * the provided target.
1631 * Due to its expense, the software path renderer has split out so it can
1632 * can be individually allowed/disallowed via the "allowSW" boolean.
1634 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1635 const SkStrokeRec& stroke,
1636 const GrDrawTarget* target,
1638 GrPathRendererChain::DrawType drawType,
1639 GrPathRendererChain::StencilSupport* stencilSupport) {
1641 if (NULL == fPathRendererChain) {
1642 fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1645 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1651 if (NULL == pr && allowSW) {
1652 if (NULL == fSoftwarePathRenderer) {
1653 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1655 pr = fSoftwarePathRenderer;
1661 ////////////////////////////////////////////////////////////////////////////////
1662 bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
1663 return fGpu->caps()->isConfigRenderable(config, withMSAA);
1666 int GrContext::getRecommendedSampleCount(GrPixelConfig config,
1667 SkScalar dpi) const {
1668 if (!this->isConfigRenderable(config, true)) {
1671 int chosenSampleCount = 0;
1672 if (fGpu->caps()->pathRenderingSupport()) {
1673 if (dpi >= 250.0f) {
1674 chosenSampleCount = 4;
1676 chosenSampleCount = 16;
1679 return chosenSampleCount <= fGpu->caps()->maxSampleCount() ?
1680 chosenSampleCount : 0;
1683 void GrContext::setupDrawBuffer() {
1684 SkASSERT(NULL == fDrawBuffer);
1685 SkASSERT(NULL == fDrawBufferVBAllocPool);
1686 SkASSERT(NULL == fDrawBufferIBAllocPool);
1688 fDrawBufferVBAllocPool =
1689 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1690 DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1691 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1692 fDrawBufferIBAllocPool =
1693 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1694 DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1695 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1697 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1698 fDrawBufferVBAllocPool,
1699 fDrawBufferIBAllocPool));
1701 fDrawBuffer->setDrawState(fDrawState);
1704 GrDrawTarget* GrContext::getTextTarget() {
1705 return this->prepareToDraw(NULL, NULL, NULL);
1708 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1709 return fGpu->getQuadIndexBuffer();
1713 void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1714 GrConfigConversionEffect::PMConversion pmToUPM;
1715 GrConfigConversionEffect::PMConversion upmToPM;
1716 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1717 *pmToUPMValue = pmToUPM;
1718 *upmToPMValue = upmToPM;
1722 const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture,
1724 const SkMatrix& matrix) {
1725 if (!fDidTestPMConversions) {
1726 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1727 fDidTestPMConversions = true;
1729 GrConfigConversionEffect::PMConversion pmToUPM =
1730 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1731 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1732 return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1738 const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
1740 const SkMatrix& matrix) {
1741 if (!fDidTestPMConversions) {
1742 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1743 fDidTestPMConversions = true;
1745 GrConfigConversionEffect::PMConversion upmToPM =
1746 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1747 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1748 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1754 void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResource* resource) {
1755 fResourceCache->addResource(resourceKey, resource);
1758 GrGpuResource* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
1759 GrGpuResource* resource = fResourceCache->find(resourceKey);
1760 SkSafeRef(resource);
1764 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
1765 fGpu->addGpuTraceMarker(marker);
1767 fDrawBuffer->addGpuTraceMarker(marker);
1771 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
1772 fGpu->removeGpuTraceMarker(marker);
1774 fDrawBuffer->removeGpuTraceMarker(marker);
1778 ///////////////////////////////////////////////////////////////////////////////
1780 void GrContext::printCacheStats() const {
1781 fResourceCache->printStats();
1786 const GrContext::GPUStats* GrContext::gpuStats() const {
1787 return fGpu->gpuStats();