3 * Copyright 2011 Google Inc.
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
10 #include "GrContext.h"
12 #include "effects/GrSingleTextureEffect.h"
13 #include "effects/GrConfigConversionEffect.h"
15 #include "GrAARectRenderer.h"
16 #include "GrBufferAllocPool.h"
18 #include "GrDrawTargetCaps.h"
19 #include "GrIndexBuffer.h"
20 #include "GrInOrderDrawBuffer.h"
21 #include "GrLayerCache.h"
22 #include "GrOvalRenderer.h"
23 #include "GrPathRenderer.h"
24 #include "GrPathUtils.h"
25 #include "GrResourceCache.h"
26 #include "GrSoftwarePathRenderer.h"
27 #include "GrStencilBuffer.h"
28 #include "GrTextStrike.h"
29 #include "GrTracing.h"
33 #include "SkStrokeRec.h"
36 #include "SkTraceEvent.h"
38 // It can be useful to set this to false to test whether a bug is caused by using the
39 // InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
41 SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
42 "Defers rendering in GrContext via GrInOrderDrawBuffer.");
44 #define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
47 // change this to a 1 to see notifications when partial coverage fails
48 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
50 #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
53 static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT;
54 static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024;
56 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
57 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
59 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
60 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
62 #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
64 // Glorified typedef to avoid including GrDrawState.h in GrContext.h
65 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
67 class GrContext::AutoCheckFlush {
69 AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(NULL != context); }
72 if (fContext->fFlushToReduceCacheSize) {
81 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
82 GrContext* context = SkNEW(GrContext);
83 if (context->init(backend, backendContext)) {
91 GrContext::GrContext() {
95 fPathRendererChain = NULL;
96 fSoftwarePathRenderer = NULL;
100 fDrawBufferVBAllocPool = NULL;
101 fDrawBufferIBAllocPool = NULL;
102 fFlushToReduceCacheSize = false;
103 fAARectRenderer = NULL;
104 fOvalRenderer = NULL;
106 fMaxTextureSizeOverride = 1 << 20;
107 fGpuTracingEnabled = false;
110 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
111 SkASSERT(NULL == fGpu);
113 fGpu = GrGpu::Create(backend, backendContext, this);
118 fDrawState = SkNEW(GrDrawState);
119 fGpu->setDrawState(fDrawState);
121 fTextureCache = SkNEW_ARGS(GrResourceCache,
122 (MAX_RESOURCE_CACHE_COUNT,
123 MAX_RESOURCE_CACHE_BYTES));
124 fTextureCache->setOverbudgetCallback(OverbudgetCB, this);
126 fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
128 fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (fGpu)));
130 fLastDrawWasBuffered = kNo_BufferedDraw;
132 fAARectRenderer = SkNEW(GrAARectRenderer);
133 fOvalRenderer = SkNEW(GrOvalRenderer);
135 fDidTestPMConversions = false;
137 this->setupDrawBuffer();
142 GrContext::~GrContext() {
149 for (int i = 0; i < fCleanUpData.count(); ++i) {
150 (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
153 // Since the gpu can hold scratch textures, give it a chance to let go
154 // of them before freeing the texture cache
155 fGpu->purgeResources();
157 delete fTextureCache;
158 fTextureCache = NULL;
161 delete fDrawBufferVBAllocPool;
162 delete fDrawBufferIBAllocPool;
164 fAARectRenderer->unref();
165 fOvalRenderer->unref();
168 SkSafeUnref(fPathRendererChain);
169 SkSafeUnref(fSoftwarePathRenderer);
173 void GrContext::contextLost() {
174 this->contextDestroyed();
175 this->setupDrawBuffer();
178 void GrContext::contextDestroyed() {
179 // abandon first to so destructors
180 // don't try to free the resources in the API.
181 fGpu->abandonResources();
183 // a path renderer may be holding onto resources that
185 SkSafeSetNull(fPathRendererChain);
186 SkSafeSetNull(fSoftwarePathRenderer);
191 delete fDrawBufferVBAllocPool;
192 fDrawBufferVBAllocPool = NULL;
194 delete fDrawBufferIBAllocPool;
195 fDrawBufferIBAllocPool = NULL;
197 fAARectRenderer->reset();
198 fOvalRenderer->reset();
200 fTextureCache->purgeAllUnlocked();
202 fFontCache->freeAll();
203 fLayerCache->freeAll();
204 fGpu->markContextDirty();
207 void GrContext::resetContext(uint32_t state) {
208 fGpu->markContextDirty(state);
211 void GrContext::freeGpuResources() {
214 fGpu->purgeResources();
216 fAARectRenderer->reset();
217 fOvalRenderer->reset();
219 fTextureCache->purgeAllUnlocked();
220 fFontCache->freeAll();
221 fLayerCache->freeAll();
222 // a path renderer may be holding onto resources
223 SkSafeSetNull(fPathRendererChain);
224 SkSafeSetNull(fSoftwarePathRenderer);
227 size_t GrContext::getGpuTextureCacheBytes() const {
228 return fTextureCache->getCachedResourceBytes();
231 int GrContext::getGpuTextureCacheResourceCount() const {
232 return fTextureCache->getCachedResourceCount();
235 ////////////////////////////////////////////////////////////////////////////////
237 GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
238 const GrCacheID& cacheID,
239 const GrTextureParams* params) {
240 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
241 GrCacheable* resource = fTextureCache->find(resourceKey);
243 return static_cast<GrTexture*>(resource);
246 bool GrContext::isTextureInCache(const GrTextureDesc& desc,
247 const GrCacheID& cacheID,
248 const GrTextureParams* params) const {
249 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
250 return fTextureCache->hasKey(resourceKey);
253 void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
254 ASSERT_OWNED_RESOURCE(sb);
256 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
259 fTextureCache->addResource(resourceKey, sb);
262 GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
264 GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
267 GrCacheable* resource = fTextureCache->find(resourceKey);
268 return static_cast<GrStencilBuffer*>(resource);
271 static void stretchImage(void* dst,
278 SkFixed dx = (srcW << 16) / dstW;
279 SkFixed dy = (srcH << 16) / dstH;
283 size_t dstXLimit = dstW*bpp;
284 for (int j = 0; j < dstH; ++j) {
286 void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
287 void* dstRow = (uint8_t*)dst + j*dstW*bpp;
288 for (size_t i = 0; i < dstXLimit; i += bpp) {
289 memcpy((uint8_t*) dstRow + i,
290 (uint8_t*) srcRow + (x>>16)*bpp,
300 // position + local coordinate
301 extern const GrVertexAttrib gVertexAttribs[] = {
302 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
303 {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding}
308 // The desired texture is NPOT and tiled but that isn't supported by
309 // the current hardware. Resize the texture to be a POT
310 GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
311 const GrCacheID& cacheID,
315 SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
316 if (NULL == clampedTexture) {
317 clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
319 if (NULL == clampedTexture) {
324 GrTextureDesc rtDesc = desc;
325 rtDesc.fFlags = rtDesc.fFlags |
326 kRenderTarget_GrTextureFlagBit |
327 kNoStencil_GrTextureFlagBit;
328 rtDesc.fWidth = GrNextPow2(desc.fWidth);
329 rtDesc.fHeight = GrNextPow2(desc.fHeight);
331 GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
333 if (NULL != texture) {
334 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
335 GrDrawState* drawState = fGpu->drawState();
336 drawState->setRenderTarget(texture->asRenderTarget());
338 // if filtering is not desired then we want to ensure all
339 // texels in the resampled image are copies of texels from
341 GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode :
342 GrTextureParams::kNone_FilterMode);
343 drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params);
345 drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs));
347 GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
349 if (arg.succeeded()) {
350 SkPoint* verts = (SkPoint*) arg.vertices();
351 verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
352 verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
353 fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
356 // TODO: Our CPU stretch doesn't filter. But we create separate
357 // stretched textures when the texture params is either filtered or
358 // not. Either implement filtered stretch blit on CPU or just create
359 // one when FBO case fails.
361 rtDesc.fFlags = kNone_GrTextureFlags;
362 // no longer need to clamp at min RT size.
363 rtDesc.fWidth = GrNextPow2(desc.fWidth);
364 rtDesc.fHeight = GrNextPow2(desc.fHeight);
365 size_t bpp = GrBytesPerPixel(desc.fConfig);
366 SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
367 stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
368 srcData, desc.fWidth, desc.fHeight, bpp);
370 size_t stretchedRowBytes = rtDesc.fWidth * bpp;
372 SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(),
374 SkASSERT(NULL != texture);
380 GrTexture* GrContext::createTexture(const GrTextureParams* params,
381 const GrTextureDesc& desc,
382 const GrCacheID& cacheID,
385 GrResourceKey* cacheKey) {
386 GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
389 if (GrTexture::NeedsResizing(resourceKey)) {
390 texture = this->createResizedTexture(desc, cacheID,
392 GrTexture::NeedsBilerp(resourceKey));
394 texture= fGpu->createTexture(desc, srcData, rowBytes);
397 if (NULL != texture) {
398 // Adding a resource could put us overbudget. Try to free up the
399 // necessary space before adding it.
400 fTextureCache->purgeAsNeeded(1, texture->gpuMemorySize());
401 fTextureCache->addResource(resourceKey, texture);
403 if (NULL != cacheKey) {
404 *cacheKey = resourceKey;
411 static GrTexture* create_scratch_texture(GrGpu* gpu,
412 GrResourceCache* textureCache,
413 const GrTextureDesc& desc) {
414 GrTexture* texture = gpu->createTexture(desc, NULL, 0);
415 if (NULL != texture) {
416 GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc());
417 // Adding a resource could put us overbudget. Try to free up the
418 // necessary space before adding it.
419 textureCache->purgeAsNeeded(1, texture->gpuMemorySize());
420 // Make the resource exclusive so future 'find' calls don't return it
421 textureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
426 GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
428 SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
429 !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
431 // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
432 SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
433 !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
434 (inDesc.fConfig != kAlpha_8_GrPixelConfig));
436 if (!fGpu->caps()->reuseScratchTextures() &&
437 !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) {
438 // If we're never recycling this texture we can always make it the right size
439 return create_scratch_texture(fGpu, fTextureCache, inDesc);
442 GrTextureDesc desc = inDesc;
444 if (kApprox_ScratchTexMatch == match) {
445 // bin by pow2 with a reasonable min
446 static const int MIN_SIZE = 16;
447 desc.fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc.fWidth));
448 desc.fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc.fHeight));
451 GrCacheable* resource = NULL;
452 int origWidth = desc.fWidth;
453 int origHeight = desc.fHeight;
456 GrResourceKey key = GrTexture::ComputeScratchKey(desc);
457 // Ensure we have exclusive access to the texture so future 'find' calls don't return it
458 resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag);
459 if (NULL != resource) {
463 if (kExact_ScratchTexMatch == match) {
466 // We had a cache miss and we are in approx mode, relax the fit of the flags.
468 // We no longer try to reuse textures that were previously used as render targets in
469 // situations where no RT is needed; doing otherwise can confuse the video driver and
470 // cause significant performance problems in some cases.
471 if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
472 desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
479 if (NULL == resource) {
480 desc.fFlags = inDesc.fFlags;
481 desc.fWidth = origWidth;
482 desc.fHeight = origHeight;
483 resource = create_scratch_texture(fGpu, fTextureCache, desc);
486 return static_cast<GrTexture*>(resource);
489 void GrContext::addExistingTextureToCache(GrTexture* texture) {
491 if (NULL == texture) {
495 // This texture should already have a cache entry since it was once
497 SkASSERT(NULL != texture->getCacheEntry());
499 // Conceptually, the cache entry is going to assume responsibility
500 // for the creation ref. Assert refcnt == 1.
501 SkASSERT(texture->unique());
503 if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
504 // Since this texture came from an AutoScratchTexture it should
505 // still be in the exclusive pile. Recycle it.
506 fTextureCache->makeNonExclusive(texture->getCacheEntry());
508 } else if (texture->getDeferredRefCount() <= 0) {
509 // When we aren't reusing textures we know this scratch texture
510 // will never be reused and would be just wasting time in the cache
511 fTextureCache->makeNonExclusive(texture->getCacheEntry());
512 fTextureCache->deleteResource(texture->getCacheEntry());
514 // In this case (fDeferredRefCount > 0) but the cache is the only
515 // one holding a real ref. Mark the object so when the deferred
516 // ref count goes to 0 the texture will be deleted (remember
517 // in this code path scratch textures aren't getting reused).
518 texture->setNeedsDeferredUnref();
523 void GrContext::unlockScratchTexture(GrTexture* texture) {
524 ASSERT_OWNED_RESOURCE(texture);
525 SkASSERT(NULL != texture->getCacheEntry());
527 // If this is a scratch texture we detached it from the cache
528 // while it was locked (to avoid two callers simultaneously getting
529 // the same texture).
530 if (texture->getCacheEntry()->key().isScratch()) {
531 if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
532 fTextureCache->makeNonExclusive(texture->getCacheEntry());
534 } else if (texture->unique() && texture->getDeferredRefCount() <= 0) {
535 // Only the cache now knows about this texture. Since we're never
536 // reusing scratch textures (in this code path) it would just be
537 // wasting time sitting in the cache.
538 fTextureCache->makeNonExclusive(texture->getCacheEntry());
539 fTextureCache->deleteResource(texture->getCacheEntry());
541 // In this case (fRefCnt > 1 || defRefCnt > 0) but we don't really
542 // want to readd it to the cache (since it will never be reused).
543 // Instead, give up the cache's ref and leave the decision up to
544 // addExistingTextureToCache once its ref count reaches 0. For
545 // this to work we need to leave it in the exclusive list.
546 texture->setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
547 // Give up the cache's ref to the texture
553 void GrContext::purgeCache() {
554 if (NULL != fTextureCache) {
555 fTextureCache->purgeAsNeeded();
559 bool GrContext::OverbudgetCB(void* data) {
560 SkASSERT(NULL != data);
562 GrContext* context = reinterpret_cast<GrContext*>(data);
564 // Flush the InOrderDrawBuffer to possibly free up some textures
565 context->fFlushToReduceCacheSize = true;
571 GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
574 GrTextureDesc descCopy = descIn;
575 return fGpu->createTexture(descCopy, srcData, rowBytes);
578 void GrContext::getTextureCacheLimits(int* maxTextures,
579 size_t* maxTextureBytes) const {
580 fTextureCache->getLimits(maxTextures, maxTextureBytes);
583 void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
584 fTextureCache->setLimits(maxTextures, maxTextureBytes);
587 int GrContext::getMaxTextureSize() const {
588 return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
591 int GrContext::getMaxRenderTargetSize() const {
592 return fGpu->caps()->maxRenderTargetSize();
595 int GrContext::getMaxSampleCount() const {
596 return fGpu->caps()->maxSampleCount();
599 ///////////////////////////////////////////////////////////////////////////////
601 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
602 return fGpu->wrapBackendTexture(desc);
605 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
606 return fGpu->wrapBackendRenderTarget(desc);
609 ///////////////////////////////////////////////////////////////////////////////
611 bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
612 int width, int height) const {
613 const GrDrawTargetCaps* caps = fGpu->caps();
614 if (!caps->eightBitPaletteSupport()) {
618 bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
621 bool tiled = NULL != params && params->isTiled();
622 if (tiled && !caps->npotTextureTileSupport()) {
630 ////////////////////////////////////////////////////////////////////////////////
632 void GrContext::clear(const SkIRect* rect,
635 GrRenderTarget* target) {
636 AutoRestoreEffects are;
637 AutoCheckFlush acf(this);
638 this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->clear(rect, color,
639 canIgnoreRect, target);
642 void GrContext::drawPaint(const GrPaint& origPaint) {
643 // set rect to be big enough to fill the space, but not super-huge, so we
644 // don't overflow fixed-point implementations
647 SkIntToScalar(getRenderTarget()->width()),
648 SkIntToScalar(getRenderTarget()->height()));
650 SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
653 // We attempt to map r by the inverse matrix and draw that. mapRect will
654 // map the four corners and bound them with a new rect. This will not
655 // produce a correct result for some perspective matrices.
656 if (!this->getMatrix().hasPerspective()) {
657 if (!fViewMatrix.invert(&inverse)) {
658 GrPrintf("Could not invert matrix\n");
663 if (!am.setIdentity(this, paint.writable())) {
664 GrPrintf("Could not invert matrix\n");
668 // by definition this fills the entire clip, no need for AA
669 if (paint->isAntiAlias()) {
670 paint.writable()->setAntiAlias(false);
672 this->drawRect(*paint, r);
676 void GrContext::dumpFontCache() const {
681 ////////////////////////////////////////////////////////////////////////////////
683 /* create a triangle strip that strokes the specified triangle. There are 8
684 unique vertices, but we repreat the last 2 to close up. Alternatively we
685 could use an indices array, and then only send 8 verts, but not sure that
688 static void setStrokeRectStrip(SkPoint verts[10], SkRect rect,
690 const SkScalar rad = SkScalarHalf(width);
693 verts[0].set(rect.fLeft + rad, rect.fTop + rad);
694 verts[1].set(rect.fLeft - rad, rect.fTop - rad);
695 verts[2].set(rect.fRight - rad, rect.fTop + rad);
696 verts[3].set(rect.fRight + rad, rect.fTop - rad);
697 verts[4].set(rect.fRight - rad, rect.fBottom - rad);
698 verts[5].set(rect.fRight + rad, rect.fBottom + rad);
699 verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
700 verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
705 static bool isIRect(const SkRect& r) {
706 return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) &&
707 SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
710 static bool apply_aa_to_rect(GrDrawTarget* target,
712 SkScalar strokeWidth,
713 const SkMatrix& combinedMatrix,
714 SkRect* devBoundRect,
715 bool* useVertexCoverage) {
716 // we use a simple coverage ramp to do aa on axis-aligned rects
717 // we check if the rect will be axis-aligned, and the rect won't land on
720 // we are keeping around the "tweak the alpha" trick because
721 // it is our only hope for the fixed-pipe implementation.
722 // In a shader implementation we can give a separate coverage input
723 // TODO: remove this ugliness when we drop the fixed-pipe impl
724 *useVertexCoverage = false;
725 if (!target->getDrawState().canTweakAlphaForCoverage()) {
726 if (target->shouldDisableCoverageAAForBlend()) {
728 //GrPrintf("Turning off AA to correctly apply blend.\n");
732 *useVertexCoverage = true;
735 const GrDrawState& drawState = target->getDrawState();
736 if (drawState.getRenderTarget()->isMultisampled()) {
740 if (0 == strokeWidth && target->willUseHWAALines()) {
744 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
745 if (strokeWidth >= 0) {
747 if (!combinedMatrix.preservesAxisAlignment()) {
751 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
753 if (!combinedMatrix.preservesRightAngles()) {
759 combinedMatrix.mapRect(devBoundRect, rect);
761 if (strokeWidth < 0) {
762 return !isIRect(*devBoundRect);
768 static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
769 return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
770 point.fY >= rect.fTop && point.fY <= rect.fBottom;
773 void GrContext::drawRect(const GrPaint& paint,
775 const SkStrokeRec* stroke,
776 const SkMatrix* matrix) {
777 AutoRestoreEffects are;
778 AutoCheckFlush acf(this);
779 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
781 GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
783 SkScalar width = stroke == NULL ? -1 : stroke->getWidth();
784 SkMatrix combinedMatrix = target->drawState()->getViewMatrix();
785 if (NULL != matrix) {
786 combinedMatrix.preConcat(*matrix);
789 // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
790 // cases where the RT is fully inside a stroke.
793 target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
794 SkRect clipSpaceRTRect = rtRect;
795 bool checkClip = false;
796 if (NULL != this->getClip()) {
798 clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
799 SkIntToScalar(this->getClip()->fOrigin.fY));
801 // Does the clip contain the entire RT?
802 if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
804 if (!combinedMatrix.invert(&invM)) {
807 // Does the rect bound the RT?
808 SkPoint srcSpaceRTQuad[4];
809 invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
810 if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
811 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
812 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
813 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
816 if (paint.isOpaqueAndConstantColor(&clearColor)) {
817 target->clear(NULL, clearColor, true);
825 bool useVertexCoverage;
826 bool needAA = paint.isAntiAlias() &&
827 !target->getDrawState().getRenderTarget()->isMultisampled();
828 bool doAA = needAA && apply_aa_to_rect(target, rect, width, combinedMatrix, &devBoundRect,
831 GrDrawState::AutoViewMatrixRestore avmr;
832 if (!avmr.setIdentity(target->drawState())) {
836 fAARectRenderer->strokeAARect(this->getGpu(), target, rect,
837 combinedMatrix, devBoundRect,
838 stroke, useVertexCoverage);
841 fAARectRenderer->fillAARect(this->getGpu(), target,
842 rect, combinedMatrix, devBoundRect,
849 // TODO: consider making static vertex buffers for these cases.
850 // Hairline could be done by just adding closing vertex to
851 // unitSquareVertexBuffer()
853 static const int worstCaseVertCount = 10;
854 target->drawState()->setDefaultVertexAttribs();
855 GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
857 if (!geo.succeeded()) {
858 GrPrintf("Failed to get space for vertices!\n");
862 GrPrimitiveType primType;
864 SkPoint* vertex = geo.positions();
868 primType = kTriangleStrip_GrPrimitiveType;
869 setStrokeRectStrip(vertex, rect, width);
873 primType = kLineStrip_GrPrimitiveType;
874 vertex[0].set(rect.fLeft, rect.fTop);
875 vertex[1].set(rect.fRight, rect.fTop);
876 vertex[2].set(rect.fRight, rect.fBottom);
877 vertex[3].set(rect.fLeft, rect.fBottom);
878 vertex[4].set(rect.fLeft, rect.fTop);
881 GrDrawState::AutoViewMatrixRestore avmr;
882 if (NULL != matrix) {
883 GrDrawState* drawState = target->drawState();
884 avmr.set(drawState, *matrix);
887 target->drawNonIndexed(primType, 0, vertCount);
890 target->drawSimpleRect(rect, matrix);
894 void GrContext::drawRectToRect(const GrPaint& paint,
895 const SkRect& dstRect,
896 const SkRect& localRect,
897 const SkMatrix* dstMatrix,
898 const SkMatrix* localMatrix) {
899 AutoRestoreEffects are;
900 AutoCheckFlush acf(this);
901 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
903 GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
905 target->drawRect(dstRect, dstMatrix, &localRect, localMatrix);
910 extern const GrVertexAttrib gPosUVColorAttribs[] = {
911 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding },
912 {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding },
913 {kVec4ub_GrVertexAttribType, 2*sizeof(SkPoint), kColor_GrVertexAttribBinding}
916 extern const GrVertexAttrib gPosColorAttribs[] = {
917 {kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
918 {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
921 static void set_vertex_attributes(GrDrawState* drawState,
922 const SkPoint* texCoords,
923 const GrColor* colors,
929 if (NULL != texCoords && NULL != colors) {
930 *texOffset = sizeof(SkPoint);
931 *colorOffset = 2*sizeof(SkPoint);
932 drawState->setVertexAttribs<gPosUVColorAttribs>(3);
933 } else if (NULL != texCoords) {
934 *texOffset = sizeof(SkPoint);
935 drawState->setVertexAttribs<gPosUVColorAttribs>(2);
936 } else if (NULL != colors) {
937 *colorOffset = sizeof(SkPoint);
938 drawState->setVertexAttribs<gPosColorAttribs>(2);
940 drawState->setVertexAttribs<gPosColorAttribs>(1);
946 void GrContext::drawVertices(const GrPaint& paint,
947 GrPrimitiveType primitiveType,
949 const SkPoint positions[],
950 const SkPoint texCoords[],
951 const GrColor colors[],
952 const uint16_t indices[],
954 AutoRestoreEffects are;
955 AutoCheckFlush acf(this);
956 GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
958 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
960 GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
962 GrDrawState* drawState = target->drawState();
964 int colorOffset = -1, texOffset = -1;
965 set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
967 size_t vertexSize = drawState->getVertexSize();
968 if (sizeof(SkPoint) != vertexSize) {
969 if (!geo.set(target, vertexCount, 0)) {
970 GrPrintf("Failed to get space for vertices!\n");
973 void* curVertex = geo.vertices();
975 for (int i = 0; i < vertexCount; ++i) {
976 *((SkPoint*)curVertex) = positions[i];
978 if (texOffset >= 0) {
979 *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
981 if (colorOffset >= 0) {
982 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
984 curVertex = (void*)((intptr_t)curVertex + vertexSize);
987 target->setVertexSourceToArray(positions, vertexCount);
990 // we don't currently apply offscreen AA to this path. Need improved
991 // management of GrDrawTarget's geometry to avoid copying points per-tile.
993 if (NULL != indices) {
994 target->setIndexSourceToArray(indices, indexCount);
995 target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
996 target->resetIndexSource();
998 target->drawNonIndexed(primitiveType, 0, vertexCount);
1002 ///////////////////////////////////////////////////////////////////////////////
1004 void GrContext::drawRRect(const GrPaint& paint,
1005 const SkRRect& rrect,
1006 const SkStrokeRec& stroke) {
1007 if (rrect.isEmpty()) {
1011 AutoRestoreEffects are;
1012 AutoCheckFlush acf(this);
1013 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1015 GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
1017 if (!fOvalRenderer->drawRRect(target, this, paint.isAntiAlias(), rrect, stroke)) {
1019 path.addRRect(rrect);
1020 this->internalDrawPath(target, paint.isAntiAlias(), path, stroke);
1024 ///////////////////////////////////////////////////////////////////////////////
1026 void GrContext::drawDRRect(const GrPaint& paint,
1027 const SkRRect& outer,
1028 const SkRRect& inner) {
1029 if (outer.isEmpty()) {
1033 AutoRestoreEffects are;
1034 AutoCheckFlush acf(this);
1035 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1037 GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
1039 if (!fOvalRenderer->drawDRRect(target, this, paint.isAntiAlias(), outer, inner)) {
1041 path.addRRect(inner);
1042 path.addRRect(outer);
1043 path.setFillType(SkPath::kEvenOdd_FillType);
1045 SkStrokeRec fillRec(SkStrokeRec::kFill_InitStyle);
1046 this->internalDrawPath(target, paint.isAntiAlias(), path, fillRec);
1050 ///////////////////////////////////////////////////////////////////////////////
1052 void GrContext::drawOval(const GrPaint& paint,
1054 const SkStrokeRec& stroke) {
1055 if (oval.isEmpty()) {
1059 AutoRestoreEffects are;
1060 AutoCheckFlush acf(this);
1061 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1063 GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
1065 if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, stroke)) {
1068 this->internalDrawPath(target, paint.isAntiAlias(), path, stroke);
1072 // Can 'path' be drawn as a pair of filled nested rectangles?
1073 static bool is_nested_rects(GrDrawTarget* target,
1075 const SkStrokeRec& stroke,
1077 bool* useVertexCoverage) {
1078 SkASSERT(stroke.isFillStyle());
1080 if (path.isInverseFillType()) {
1084 const GrDrawState& drawState = target->getDrawState();
1086 // TODO: this restriction could be lifted if we were willing to apply
1087 // the matrix to all the points individually rather than just to the rect
1088 if (!drawState.getViewMatrix().preservesAxisAlignment()) {
1092 *useVertexCoverage = false;
1093 if (!target->getDrawState().canTweakAlphaForCoverage()) {
1094 if (target->shouldDisableCoverageAAForBlend()) {
1097 *useVertexCoverage = true;
1101 SkPath::Direction dirs[2];
1102 if (!path.isNestedRects(rects, dirs)) {
1106 if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
1107 // The two rects need to be wound opposite to each other
1111 // Right now, nested rects where the margin is not the same width
1112 // all around do not render correctly
1113 const SkScalar* outer = rects[0].asScalars();
1114 const SkScalar* inner = rects[1].asScalars();
1116 SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
1117 for (int i = 1; i < 4; ++i) {
1118 SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
1119 if (!SkScalarNearlyEqual(margin, temp)) {
1127 void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) {
1129 if (path.isEmpty()) {
1130 if (path.isInverseFillType()) {
1131 this->drawPaint(paint);
1136 // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1137 // Scratch textures can be recycled after they are returned to the texture
1138 // cache. This presents a potential hazard for buffered drawing. However,
1139 // the writePixels that uploads to the scratch will perform a flush so we're
1141 AutoRestoreEffects are;
1142 AutoCheckFlush acf(this);
1143 GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1144 GrDrawState* drawState = target->drawState();
1146 GR_CREATE_TRACE_MARKER("GrContext::drawPath", target);
1148 bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled();
1150 if (useCoverageAA && stroke.getWidth() < 0 && !path.isConvex()) {
1151 // Concave AA paths are expensive - try to avoid them for special cases
1152 bool useVertexCoverage;
1155 if (is_nested_rects(target, path, stroke, rects, &useVertexCoverage)) {
1156 SkMatrix origViewMatrix = drawState->getViewMatrix();
1157 GrDrawState::AutoViewMatrixRestore avmr;
1158 if (!avmr.setIdentity(target->drawState())) {
1162 fAARectRenderer->fillAANestedRects(this->getGpu(), target,
1171 bool isOval = path.isOval(&ovalRect);
1173 if (!isOval || path.isInverseFillType()
1174 || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, stroke)) {
1175 this->internalDrawPath(target, paint.isAntiAlias(), path, stroke);
1179 void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
1180 const SkStrokeRec& origStroke) {
1181 SkASSERT(!path.isEmpty());
1183 GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target);
1186 // An Assumption here is that path renderer would use some form of tweaking
1187 // the src color (either the input alpha or in the frag shader) to implement
1188 // aa. If we have some future driver-mojo path AA that can do the right
1189 // thing WRT to the blend then we'll need some query on the PR.
1190 bool useCoverageAA = useAA &&
1191 !target->getDrawState().getRenderTarget()->isMultisampled() &&
1192 !target->shouldDisableCoverageAAForBlend();
1195 GrPathRendererChain::DrawType type =
1196 useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1197 GrPathRendererChain::kColor_DrawType;
1199 const SkPath* pathPtr = &path;
1200 SkTLazy<SkPath> tmpPath;
1201 SkTCopyOnFirstWrite<SkStrokeRec> stroke(origStroke);
1203 // Try a 1st time without stroking the path and without allowing the SW renderer
1204 GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type);
1207 if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) {
1208 // It didn't work the 1st time, so try again with the stroked path
1209 if (stroke->applyToPath(tmpPath.init(), *pathPtr)) {
1210 pathPtr = tmpPath.get();
1211 stroke.writable()->setFillStyle();
1212 if (pathPtr->isEmpty()) {
1218 // This time, allow SW renderer
1219 pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type);
1224 GrPrintf("Unable to find path renderer compatible with path.\n");
1229 pr->drawPath(*pathPtr, *stroke, target, useCoverageAA);
1232 ////////////////////////////////////////////////////////////////////////////////
1234 void GrContext::flush(int flagsBitfield) {
1235 if (NULL == fDrawBuffer) {
1239 if (kDiscard_FlushBit & flagsBitfield) {
1240 fDrawBuffer->reset();
1242 fDrawBuffer->flush();
1244 fFlushToReduceCacheSize = false;
1247 bool GrContext::writeTexturePixels(GrTexture* texture,
1248 int left, int top, int width, int height,
1249 GrPixelConfig config, const void* buffer, size_t rowBytes,
1251 ASSERT_OWNED_RESOURCE(texture);
1253 if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) {
1254 if (NULL != texture->asRenderTarget()) {
1255 return this->writeRenderTargetPixels(texture->asRenderTarget(),
1256 left, top, width, height,
1257 config, buffer, rowBytes, flags);
1263 if (!(kDontFlush_PixelOpsFlag & flags)) {
1267 return fGpu->writeTexturePixels(texture, left, top, width, height,
1268 config, buffer, rowBytes);
1271 bool GrContext::readTexturePixels(GrTexture* texture,
1272 int left, int top, int width, int height,
1273 GrPixelConfig config, void* buffer, size_t rowBytes,
1275 ASSERT_OWNED_RESOURCE(texture);
1277 GrRenderTarget* target = texture->asRenderTarget();
1278 if (NULL != target) {
1279 return this->readRenderTargetPixels(target,
1280 left, top, width, height,
1281 config, buffer, rowBytes,
1284 // TODO: make this more efficient for cases where we're reading the entire
1285 // texture, i.e., use GetTexImage() instead
1287 // create scratch rendertarget and read from that
1288 GrAutoScratchTexture ast;
1290 desc.fFlags = kRenderTarget_GrTextureFlagBit;
1291 desc.fWidth = width;
1292 desc.fHeight = height;
1293 desc.fConfig = config;
1294 desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1295 ast.set(this, desc, kExact_ScratchTexMatch);
1296 GrTexture* dst = ast.texture();
1297 if (NULL != dst && NULL != (target = dst->asRenderTarget())) {
1298 this->copyTexture(texture, target, NULL);
1299 return this->readRenderTargetPixels(target,
1300 left, top, width, height,
1301 config, buffer, rowBytes,
1309 #include "SkConfig8888.h"
1311 // toggles between RGBA and BGRA
1312 static SkColorType toggle_colortype32(SkColorType ct) {
1313 if (kRGBA_8888_SkColorType == ct) {
1314 return kBGRA_8888_SkColorType;
1316 SkASSERT(kBGRA_8888_SkColorType == ct);
1317 return kRGBA_8888_SkColorType;
1321 bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1322 int left, int top, int width, int height,
1323 GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1325 ASSERT_OWNED_RESOURCE(target);
1327 if (NULL == target) {
1328 target = fRenderTarget.get();
1329 if (NULL == target) {
1334 if (!(kDontFlush_PixelOpsFlag & flags)) {
1338 // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1340 // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1341 // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1342 bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1343 width, height, dstConfig,
1345 // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1346 // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1347 // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1348 // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1350 GrPixelConfig readConfig = dstConfig;
1351 bool swapRAndB = false;
1352 if (GrPixelConfigSwapRAndB(dstConfig) ==
1353 fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1354 readConfig = GrPixelConfigSwapRAndB(readConfig);
1358 bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1360 if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1361 // The unpremul flag is only allowed for these two configs.
1365 // If the src is a texture and we would have to do conversions after read pixels, we instead
1366 // do the conversions by drawing the src to a scratch texture. If we handle any of the
1367 // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1368 // on the read back pixels.
1369 GrTexture* src = target->asTexture();
1370 GrAutoScratchTexture ast;
1371 if (NULL != src && (swapRAndB || unpremul || flipY)) {
1372 // Make the scratch a render target because we don't have a robust readTexturePixels as of
1373 // yet. It calls this function.
1375 desc.fFlags = kRenderTarget_GrTextureFlagBit;
1376 desc.fWidth = width;
1377 desc.fHeight = height;
1378 desc.fConfig = readConfig;
1379 desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1381 // When a full read back is faster than a partial we could always make the scratch exactly
1382 // match the passed rect. However, if we see many different size rectangles we will trash
1383 // our texture cache and pay the cost of creating and destroying many textures. So, we only
1384 // request an exact match when the caller is reading an entire RT.
1385 ScratchTexMatch match = kApprox_ScratchTexMatch;
1388 target->width() == width &&
1389 target->height() == height &&
1390 fGpu->fullReadPixelsIsFasterThanPartial()) {
1391 match = kExact_ScratchTexMatch;
1393 ast.set(this, desc, match);
1394 GrTexture* texture = ast.texture();
1396 // compute a matrix to perform the draw
1397 SkMatrix textureMatrix;
1398 textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1399 textureMatrix.postIDiv(src->width(), src->height());
1401 SkAutoTUnref<const GrEffectRef> effect;
1403 effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1404 if (NULL != effect) {
1405 unpremul = false; // we no longer need to do this on CPU after the read back.
1408 // If we failed to create a PM->UPM effect and have no other conversions to perform then
1409 // there is no longer any point to using the scratch.
1410 if (NULL != effect || flipY || swapRAndB) {
1412 effect.reset(GrConfigConversionEffect::Create(
1415 GrConfigConversionEffect::kNone_PMConversion,
1418 swapRAndB = false; // we will handle the swap in the draw.
1420 // We protect the existing geometry here since it may not be
1421 // clear to the caller that a draw operation (i.e., drawSimpleRect)
1422 // can be invoked in this method
1423 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
1424 GrDrawState* drawState = fGpu->drawState();
1426 drawState->addColorEffect(effect);
1428 drawState->setRenderTarget(texture->asRenderTarget());
1429 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1430 fGpu->drawSimpleRect(rect, NULL);
1431 // we want to read back from the scratch's origin
1434 target = texture->asRenderTarget();
1438 if (!fGpu->readPixels(target,
1439 left, top, width, height,
1440 readConfig, buffer, rowBytes)) {
1443 // Perform any conversions we weren't able to perform using a scratch texture.
1444 if (unpremul || swapRAndB) {
1445 SkDstPixelInfo dstPI;
1446 if (!GrPixelConfig2ColorType(dstConfig, &dstPI.fColorType)) {
1449 dstPI.fAlphaType = kUnpremul_SkAlphaType;
1450 dstPI.fPixels = buffer;
1451 dstPI.fRowBytes = rowBytes;
1453 SkSrcPixelInfo srcPI;
1454 srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType;
1455 srcPI.fAlphaType = kPremul_SkAlphaType;
1456 srcPI.fPixels = buffer;
1457 srcPI.fRowBytes = rowBytes;
1459 return srcPI.convertPixelsTo(&dstPI, width, height);
1464 void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1466 ASSERT_OWNED_RESOURCE(target);
1467 // In the future we may track whether there are any pending draws to this
1468 // target. We don't today so we always perform a flush. We don't promise
1469 // this to our clients, though.
1471 fGpu->resolveRenderTarget(target);
1474 void GrContext::discardRenderTarget(GrRenderTarget* target) {
1476 ASSERT_OWNED_RESOURCE(target);
1477 AutoRestoreEffects are;
1478 AutoCheckFlush acf(this);
1479 this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->discard(target);
1482 void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
1483 if (NULL == src || NULL == dst) {
1486 ASSERT_OWNED_RESOURCE(src);
1488 // Writes pending to the source texture are not tracked, so a flush
1489 // is required to ensure that the copy captures the most recent contents
1490 // of the source texture. See similar behavior in
1491 // GrContext::resolveRenderTarget.
1494 GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1495 GrDrawState* drawState = fGpu->drawState();
1496 drawState->setRenderTarget(dst);
1498 sampleM.setIDiv(src->width(), src->height());
1499 SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
1500 if (NULL != topLeft) {
1501 srcRect.offset(*topLeft);
1503 SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
1504 if (!srcRect.intersect(srcBounds)) {
1507 sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop));
1508 drawState->addColorTextureEffect(src, sampleM);
1509 SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
1510 fGpu->drawSimpleRect(dstR, NULL);
1513 bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
1514 int left, int top, int width, int height,
1515 GrPixelConfig srcConfig,
1519 ASSERT_OWNED_RESOURCE(target);
1521 if (NULL == target) {
1522 target = fRenderTarget.get();
1523 if (NULL == target) {
1528 // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
1531 // We will always call some form of writeTexturePixels and we will pass our flags on to it.
1532 // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
1535 // If the RT is also a texture and we don't have to premultiply then take the texture path.
1536 // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
1539 #if !defined(SK_BUILD_FOR_MAC)
1540 // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
1541 // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
1543 if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) &&
1544 fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) {
1545 return this->writeTexturePixels(target->asTexture(),
1546 left, top, width, height,
1547 srcConfig, buffer, rowBytes, flags);
1551 // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1552 // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1553 // config. This scratch will then have R and B swapped. We correct for this by swapping again
1554 // when drawing the scratch to the dst using a conversion effect.
1555 bool swapRAndB = false;
1556 GrPixelConfig writeConfig = srcConfig;
1557 if (GrPixelConfigSwapRAndB(srcConfig) ==
1558 fGpu->preferredWritePixelsConfig(srcConfig, target->config())) {
1559 writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1564 desc.fWidth = width;
1565 desc.fHeight = height;
1566 desc.fConfig = writeConfig;
1567 GrAutoScratchTexture ast(this, desc);
1568 GrTexture* texture = ast.texture();
1569 if (NULL == texture) {
1573 SkAutoTUnref<const GrEffectRef> effect;
1574 SkMatrix textureMatrix;
1575 textureMatrix.setIDiv(texture->width(), texture->height());
1577 // allocate a tmp buffer and sw convert the pixels to premul
1578 SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1580 if (kUnpremul_PixelOpsFlag & flags) {
1581 if (!GrPixelConfigIs8888(srcConfig)) {
1584 effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1585 // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1586 if (NULL == effect) {
1587 SkSrcPixelInfo srcPI;
1588 if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
1591 srcPI.fAlphaType = kUnpremul_SkAlphaType;
1592 srcPI.fPixels = buffer;
1593 srcPI.fRowBytes = rowBytes;
1595 tmpPixels.reset(width * height);
1597 SkDstPixelInfo dstPI;
1598 dstPI.fColorType = srcPI.fColorType;
1599 dstPI.fAlphaType = kPremul_SkAlphaType;
1600 dstPI.fPixels = tmpPixels.get();
1601 dstPI.fRowBytes = 4 * width;
1603 if (!srcPI.convertPixelsTo(&dstPI, width, height)) {
1607 buffer = tmpPixels.get();
1608 rowBytes = 4 * width;
1611 if (NULL == effect) {
1612 effect.reset(GrConfigConversionEffect::Create(texture,
1614 GrConfigConversionEffect::kNone_PMConversion,
1618 if (!this->writeTexturePixels(texture,
1619 0, 0, width, height,
1620 writeConfig, buffer, rowBytes,
1621 flags & ~kUnpremul_PixelOpsFlag)) {
1625 // writeRenderTargetPixels can be called in the midst of drawing another
1626 // object (e.g., when uploading a SW path rendering to the gpu while
1627 // drawing a rect) so preserve the current geometry.
1629 matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1630 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
1631 GrDrawState* drawState = fGpu->drawState();
1633 drawState->addColorEffect(effect);
1635 drawState->setRenderTarget(target);
1637 fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
1640 ////////////////////////////////////////////////////////////////////////////////
1642 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1643 BufferedDraw buffered,
1644 AutoRestoreEffects* are,
1645 AutoCheckFlush* acf) {
1646 // All users of this draw state should be freeing up all effects when they're done.
1647 // Otherwise effects that own resources may keep those resources alive indefinitely.
1648 SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
1650 if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
1651 fDrawBuffer->flush();
1652 fLastDrawWasBuffered = kNo_BufferedDraw;
1654 ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1655 if (NULL != paint) {
1656 SkASSERT(NULL != are);
1657 SkASSERT(NULL != acf);
1658 are->set(fDrawState);
1659 fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1660 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1661 if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1662 !fGpu->canApplyCoverage()) {
1663 GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1667 fDrawState->reset(fViewMatrix);
1668 fDrawState->setRenderTarget(fRenderTarget.get());
1670 GrDrawTarget* target;
1671 if (kYes_BufferedDraw == buffered) {
1672 fLastDrawWasBuffered = kYes_BufferedDraw;
1673 target = fDrawBuffer;
1675 SkASSERT(kNo_BufferedDraw == buffered);
1676 fLastDrawWasBuffered = kNo_BufferedDraw;
1679 fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
1680 !fClip->fClipStack->isWideOpen());
1681 target->setClip(fClip);
1682 SkASSERT(fDrawState == target->drawState());
1687 * This method finds a path renderer that can draw the specified path on
1688 * the provided target.
1689 * Due to its expense, the software path renderer has split out so it can
1690 * can be individually allowed/disallowed via the "allowSW" boolean.
1692 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1693 const SkStrokeRec& stroke,
1694 const GrDrawTarget* target,
1696 GrPathRendererChain::DrawType drawType,
1697 GrPathRendererChain::StencilSupport* stencilSupport) {
1699 if (NULL == fPathRendererChain) {
1700 fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1703 GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1709 if (NULL == pr && allowSW) {
1710 if (NULL == fSoftwarePathRenderer) {
1711 fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1713 pr = fSoftwarePathRenderer;
1719 ////////////////////////////////////////////////////////////////////////////////
1720 bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
1721 return fGpu->caps()->isConfigRenderable(config, withMSAA);
1724 int GrContext::getRecommendedSampleCount(GrPixelConfig config,
1725 SkScalar dpi) const {
1726 if (!this->isConfigRenderable(config, true)) {
1729 int chosenSampleCount = 0;
1730 if (fGpu->caps()->pathRenderingSupport()) {
1731 if (dpi >= 250.0f) {
1732 chosenSampleCount = 4;
1734 chosenSampleCount = 16;
1737 return chosenSampleCount <= fGpu->caps()->maxSampleCount() ?
1738 chosenSampleCount : 0;
1741 void GrContext::setupDrawBuffer() {
1742 SkASSERT(NULL == fDrawBuffer);
1743 SkASSERT(NULL == fDrawBufferVBAllocPool);
1744 SkASSERT(NULL == fDrawBufferIBAllocPool);
1746 fDrawBufferVBAllocPool =
1747 SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1748 DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1749 DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1750 fDrawBufferIBAllocPool =
1751 SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1752 DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1753 DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1755 fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1756 fDrawBufferVBAllocPool,
1757 fDrawBufferIBAllocPool));
1759 fDrawBuffer->setDrawState(fDrawState);
1762 GrDrawTarget* GrContext::getTextTarget() {
1763 return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
1766 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1767 return fGpu->getQuadIndexBuffer();
1771 void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1772 GrConfigConversionEffect::PMConversion pmToUPM;
1773 GrConfigConversionEffect::PMConversion upmToPM;
1774 GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1775 *pmToUPMValue = pmToUPM;
1776 *upmToPMValue = upmToPM;
1780 const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture,
1782 const SkMatrix& matrix) {
1783 if (!fDidTestPMConversions) {
1784 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1785 fDidTestPMConversions = true;
1787 GrConfigConversionEffect::PMConversion pmToUPM =
1788 static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1789 if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1790 return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1796 const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture,
1798 const SkMatrix& matrix) {
1799 if (!fDidTestPMConversions) {
1800 test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1801 fDidTestPMConversions = true;
1803 GrConfigConversionEffect::PMConversion upmToPM =
1804 static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1805 if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1806 return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1812 GrPath* GrContext::createPath(const SkPath& inPath, const SkStrokeRec& stroke) {
1813 SkASSERT(fGpu->caps()->pathRenderingSupport());
1815 // TODO: now we add to fTextureCache. This should change to fResourceCache.
1816 GrResourceKey resourceKey = GrPath::ComputeKey(inPath, stroke);
1817 GrPath* path = static_cast<GrPath*>(fTextureCache->find(resourceKey));
1818 if (NULL != path && path->isEqualTo(inPath, stroke)) {
1821 path = fGpu->createPath(inPath, stroke);
1822 fTextureCache->purgeAsNeeded(1, path->gpuMemorySize());
1823 fTextureCache->addResource(resourceKey, path);
1828 void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrCacheable* resource) {
1829 fTextureCache->purgeAsNeeded(1, resource->gpuMemorySize());
1830 fTextureCache->addResource(resourceKey, resource);
1833 GrCacheable* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
1834 GrCacheable* resource = fTextureCache->find(resourceKey);
1835 SkSafeRef(resource);
1839 ///////////////////////////////////////////////////////////////////////////////
1841 void GrContext::printCacheStats() const {
1842 fTextureCache->printStats();