* found in the LICENSE file.
*/
-
#include "GrContext.h"
#include "effects/GrConfigConversionEffect.h"
#include "GrAARectRenderer.h"
#include "GrBufferAllocPool.h"
#include "GrGpu.h"
+#include "GrDistanceFieldTextContext.h"
#include "GrDrawTargetCaps.h"
#include "GrIndexBuffer.h"
#include "GrInOrderDrawBuffer.h"
#include "GrPathRenderer.h"
#include "GrPathUtils.h"
#include "GrResourceCache.h"
+#include "GrResourceCache2.h"
#include "GrSoftwarePathRenderer.h"
#include "GrStencilBuffer.h"
+#include "GrStencilAndCoverTextContext.h"
#include "GrStrokeInfo.h"
+#include "GrSurfacePriv.h"
#include "GrTextStrike.h"
+#include "GrTexturePriv.h"
#include "GrTraceMarker.h"
#include "GrTracing.h"
#include "SkDashPathPriv.h"
+#include "SkConfig8888.h"
#include "SkGr.h"
-#include "SkRTConf.h"
#include "SkRRect.h"
#include "SkStrokeRec.h"
#include "SkTLazy.h"
#include "SkTLS.h"
#include "SkTraceEvent.h"
-// It can be useful to set this to false to test whether a bug is caused by using the
-// InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
-// debugging simpler.
-SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
- "Defers rendering in GrContext via GrInOrderDrawBuffer.");
-
-#define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
-
#ifdef SK_DEBUG
// change this to a 1 to see notifications when partial coverage fails
#define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
class GrContext::AutoCheckFlush {
public:
- AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(NULL != context); }
+ AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(context); }
~AutoCheckFlush() {
if (fContext->fFlushToReduceCacheSize) {
GrContext* fContext;
};
-GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
- GrContext* context = SkNEW(GrContext);
+GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext,
+ const Options* opts) {
+ GrContext* context;
+ if (NULL == opts) {
+ context = SkNEW_ARGS(GrContext, (Options()));
+ } else {
+ context = SkNEW_ARGS(GrContext, (*opts));
+ }
+
if (context->init(backend, backendContext)) {
return context;
} else {
}
}
-GrContext::GrContext() {
+GrContext::GrContext(const Options& opts) : fOptions(opts) {
fDrawState = NULL;
fGpu = NULL;
fClip = NULL;
fPathRendererChain = NULL;
fSoftwarePathRenderer = NULL;
fResourceCache = NULL;
+ fResourceCache2 = NULL;
fFontCache = NULL;
fDrawBuffer = NULL;
fDrawBufferVBAllocPool = NULL;
fOvalRenderer = NULL;
fViewMatrix.reset();
fMaxTextureSizeOverride = 1 << 20;
- fGpuTracingEnabled = false;
}
bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
if (NULL == fGpu) {
return false;
}
+ this->initCommon();
+ return true;
+}
+void GrContext::initCommon() {
fDrawState = SkNEW(GrDrawState);
fGpu->setDrawState(fDrawState);
- fResourceCache = SkNEW_ARGS(GrResourceCache, (MAX_RESOURCE_CACHE_COUNT,
+ fResourceCache = SkNEW_ARGS(GrResourceCache, (fGpu->caps(),
+ MAX_RESOURCE_CACHE_COUNT,
MAX_RESOURCE_CACHE_BYTES));
fResourceCache->setOverbudgetCallback(OverbudgetCB, this);
+ fResourceCache2 = SkNEW(GrResourceCache2);
fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
- fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (fGpu)));
+ fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this)));
- fLastDrawWasBuffered = kNo_BufferedDraw;
-
- fAARectRenderer = SkNEW(GrAARectRenderer);
+ fAARectRenderer = SkNEW_ARGS(GrAARectRenderer, (fGpu));
fOvalRenderer = SkNEW(GrOvalRenderer);
fDidTestPMConversions = false;
this->setupDrawBuffer();
-
- return true;
}
GrContext::~GrContext() {
(*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
}
- // Since the gpu can hold scratch textures, give it a chance to let go
- // of them before freeing the texture cache
- fGpu->purgeResources();
-
- delete fResourceCache;
+ SkDELETE(fResourceCache2);
+ fResourceCache2 = NULL;
+ SkDELETE(fResourceCache);
fResourceCache = NULL;
- delete fFontCache;
- delete fDrawBuffer;
- delete fDrawBufferVBAllocPool;
- delete fDrawBufferIBAllocPool;
+ SkDELETE(fFontCache);
+ SkDELETE(fDrawBuffer);
+ SkDELETE(fDrawBufferVBAllocPool);
+ SkDELETE(fDrawBufferIBAllocPool);
fAARectRenderer->unref();
fOvalRenderer->unref();
fDrawState->unref();
}
-void GrContext::contextLost() {
- this->contextDestroyed();
- this->setupDrawBuffer();
-}
-
-void GrContext::contextDestroyed() {
+void GrContext::abandonContext() {
// abandon first to so destructors
// don't try to free the resources in the API.
- fGpu->abandonResources();
+ fResourceCache2->abandonAll();
+
+ fGpu->contextAbandoned();
// a path renderer may be holding onto resources that
// are now unusable
fFontCache->freeAll();
fLayerCache->freeAll();
- fGpu->markContextDirty();
}
void GrContext::resetContext(uint32_t state) {
this->flush();
fGpu->purgeResources();
+ if (fDrawBuffer) {
+ fDrawBuffer->purgeResources();
+ }
fAARectRenderer->reset();
fOvalRenderer->reset();
}
void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
- if (NULL != resourceCount) {
+ if (resourceCount) {
*resourceCount = fResourceCache->getCachedResourceCount();
}
- if (NULL != resourceBytes) {
+ if (resourceBytes) {
*resourceBytes = fResourceCache->getCachedResourceBytes();
}
}
+GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
+ const SkDeviceProperties&
+ leakyProperties,
+ bool enableDistanceFieldFonts) {
+ if (fGpu->caps()->pathRenderingSupport() && renderTarget->getStencilBuffer() &&
+ renderTarget->isMultisampled()) {
+ return GrStencilAndCoverTextContext::Create(this, leakyProperties);
+ }
+
+ return GrDistanceFieldTextContext::Create(this, leakyProperties, enableDistanceFieldFonts);
+}
+
////////////////////////////////////////////////////////////////////////////////
-GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
+GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const GrTextureParams* params) {
- GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
- GrCacheable* resource = fResourceCache->find(resourceKey);
- SkSafeRef(resource);
- return static_cast<GrTexture*>(resource);
+ GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
+ GrGpuResource* resource = fResourceCache->find(resourceKey);
+ if (resource) {
+ resource->ref();
+ return static_cast<GrSurface*>(resource)->asTexture();
+ } else {
+ return NULL;
+ }
}
-bool GrContext::isTextureInCache(const GrTextureDesc& desc,
+bool GrContext::isTextureInCache(const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const GrTextureParams* params) const {
- GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
+ GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
return fResourceCache->hasKey(resourceKey);
}
GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
height,
sampleCnt);
- GrCacheable* resource = fResourceCache->find(resourceKey);
+ GrGpuResource* resource = fResourceCache->find(resourceKey);
return static_cast<GrStencilBuffer*>(resource);
}
// The desired texture is NPOT and tiled but that isn't supported by
// the current hardware. Resize the texture to be a POT
-GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
+GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const void* srcData,
size_t rowBytes,
}
}
- GrTextureDesc rtDesc = desc;
+ GrSurfaceDesc rtDesc = desc;
rtDesc.fFlags = rtDesc.fFlags |
- kRenderTarget_GrTextureFlagBit |
- kNoStencil_GrTextureFlagBit;
+ kRenderTarget_GrSurfaceFlag |
+ kNoStencil_GrSurfaceFlag;
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
- if (NULL != texture) {
- GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
- GrDrawState* drawState = fGpu->drawState();
+ if (texture) {
+ GrDrawTarget::AutoStateRestore asr(fDrawBuffer, GrDrawTarget::kReset_ASRInit);
+ GrDrawState* drawState = fDrawBuffer->drawState();
drawState->setRenderTarget(texture->asRenderTarget());
// if filtering is not desired then we want to ensure all
// texels in the resampled image are copies of texels from
// the original.
- GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode :
- GrTextureParams::kNone_FilterMode);
- drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params);
+ GrTextureParams params(SkShader::kClamp_TileMode,
+ filter ? GrTextureParams::kBilerp_FilterMode :
+ GrTextureParams::kNone_FilterMode);
+ drawState->addColorTextureProcessor(clampedTexture, SkMatrix::I(), params);
- drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs));
+ drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs),
+ 2 * sizeof(SkPoint));
- GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
+ GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, 0);
if (arg.succeeded()) {
SkPoint* verts = (SkPoint*) arg.vertices();
verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
- fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
+ fDrawBuffer->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
}
} else {
// TODO: Our CPU stretch doesn't filter. But we create separate
// not. Either implement filtered stretch blit on CPU or just create
// one when FBO case fails.
- rtDesc.fFlags = kNone_GrTextureFlags;
+ rtDesc.fFlags = kNone_GrSurfaceFlags;
// no longer need to clamp at min RT size.
rtDesc.fWidth = GrNextPow2(desc.fWidth);
rtDesc.fHeight = GrNextPow2(desc.fHeight);
SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
size_t bpp = GrBytesPerPixel(desc.fConfig);
- SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
+ GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
srcData, desc.fWidth, desc.fHeight, bpp);
size_t stretchedRowBytes = rtDesc.fWidth * bpp;
texture = fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes);
- SkASSERT(NULL != texture);
+ SkASSERT(texture);
}
return texture;
}
GrTexture* GrContext::createTexture(const GrTextureParams* params,
- const GrTextureDesc& desc,
+ const GrSurfaceDesc& desc,
const GrCacheID& cacheID,
const void* srcData,
size_t rowBytes,
GrResourceKey* cacheKey) {
- GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
+ GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
GrTexture* texture;
- if (GrTextureImpl::NeedsResizing(resourceKey)) {
+ if (GrTexturePriv::NeedsResizing(resourceKey)) {
// We do not know how to resize compressed textures.
SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
texture = this->createResizedTexture(desc, cacheID,
srcData, rowBytes,
- GrTextureImpl::NeedsBilerp(resourceKey));
+ GrTexturePriv::NeedsBilerp(resourceKey));
} else {
texture = fGpu->createTexture(desc, srcData, rowBytes);
}
- if (NULL != texture) {
- // Adding a resource could put us overbudget. Try to free up the
- // necessary space before adding it.
- fResourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
+ if (texture) {
fResourceCache->addResource(resourceKey, texture);
- if (NULL != cacheKey) {
+ if (cacheKey) {
*cacheKey = resourceKey;
}
}
return texture;
}
-static GrTexture* create_scratch_texture(GrGpu* gpu,
- GrResourceCache* resourceCache,
- const GrTextureDesc& desc) {
- GrTexture* texture = gpu->createTexture(desc, NULL, 0);
- if (NULL != texture) {
- GrResourceKey key = GrTextureImpl::ComputeScratchKey(texture->desc());
- // Adding a resource could put us overbudget. Try to free up the
- // necessary space before adding it.
- resourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
- // Make the resource exclusive so future 'find' calls don't return it
- resourceCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
+GrTexture* GrContext::createNewScratchTexture(const GrSurfaceDesc& desc) {
+ GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
+ if (!texture) {
+ return NULL;
}
+ fResourceCache->addResource(texture->getScratchKey(), texture);
return texture;
}
-GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
-
- SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
- !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
-
- // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
- SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
- !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
- (inDesc.fConfig != kAlpha_8_GrPixelConfig));
-
- if (!fGpu->caps()->reuseScratchTextures() &&
- !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) {
- // If we're never recycling this texture we can always make it the right size
- return create_scratch_texture(fGpu, fResourceCache, inDesc);
- }
-
- GrTextureDesc desc = inDesc;
+GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexMatch match,
+ bool calledDuringFlush) {
+ // kNoStencil has no meaning if kRT isn't set.
+ SkASSERT((inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
+ !(inDesc.fFlags & kNoStencil_GrSurfaceFlag));
- if (kApprox_ScratchTexMatch == match) {
- // bin by pow2 with a reasonable min
- static const int MIN_SIZE = 16;
- desc.fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc.fWidth));
- desc.fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc.fHeight));
- }
+ // Make sure caller has checked for renderability if kRT is set.
+ SkASSERT(!(inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
+ this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0));
- GrCacheable* resource = NULL;
- int origWidth = desc.fWidth;
- int origHeight = desc.fHeight;
+ SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc);
- do {
- GrResourceKey key = GrTextureImpl::ComputeScratchKey(desc);
- // Ensure we have exclusive access to the texture so future 'find' calls don't return it
- resource = fResourceCache->find(key, GrResourceCache::kHide_OwnershipFlag);
- if (NULL != resource) {
- resource->ref();
- break;
- }
- if (kExact_ScratchTexMatch == match) {
- break;
- }
- // We had a cache miss and we are in approx mode, relax the fit of the flags.
-
- // We no longer try to reuse textures that were previously used as render targets in
- // situations where no RT is needed; doing otherwise can confuse the video driver and
- // cause significant performance problems in some cases.
- if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
- desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
- } else {
- break;
+ if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
+ GrSurfaceFlags origFlags = desc->fFlags;
+ if (kApprox_ScratchTexMatch == match) {
+ // bin by pow2 with a reasonable min
+ static const int MIN_SIZE = 16;
+ GrSurfaceDesc* wdesc = desc.writable();
+ wdesc->fWidth = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth));
+ wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight));
}
- } while (true);
-
- if (NULL == resource) {
- desc.fFlags = inDesc.fFlags;
- desc.fWidth = origWidth;
- desc.fHeight = origHeight;
- resource = create_scratch_texture(fGpu, fResourceCache, desc);
- }
+ do {
+ GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc);
+ uint32_t scratchFlags = 0;
+ if (calledDuringFlush) {
+ scratchFlags = GrResourceCache2::kRequireNoPendingIO_ScratchFlag;
+ } else if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
+ // If it is not a render target then it will most likely be populated by
+ // writePixels() which will trigger a flush if the texture has pending IO.
+ scratchFlags = GrResourceCache2::kPreferNoPendingIO_ScratchFlag;
+ }
+ GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key, scratchFlags);
+ if (resource) {
+ fResourceCache->makeResourceMRU(resource);
+ return static_cast<GrSurface*>(resource)->asTexture();
+ }
- return static_cast<GrTexture*>(resource);
-}
+ if (kExact_ScratchTexMatch == match) {
+ break;
+ }
+ // We had a cache miss and we are in approx mode, relax the fit of the flags.
+
+ // We no longer try to reuse textures that were previously used as render targets in
+ // situations where no RT is needed; doing otherwise can confuse the video driver and
+ // cause significant performance problems in some cases.
+ if (desc->fFlags & kNoStencil_GrSurfaceFlag) {
+ desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrSurfaceFlag;
+ } else {
+ break;
+ }
-void GrContext::addExistingTextureToCache(GrTexture* texture) {
+ } while (true);
- if (NULL == texture) {
- return;
+ desc.writable()->fFlags = origFlags;
}
- // This texture should already have a cache entry since it was once
- // attached
- SkASSERT(NULL != texture->getCacheEntry());
-
- // Conceptually, the cache entry is going to assume responsibility
- // for the creation ref. Assert refcnt == 1.
- SkASSERT(texture->unique());
-
- if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
- // Since this texture came from an AutoScratchTexture it should
- // still be in the exclusive pile. Recycle it.
- fResourceCache->makeNonExclusive(texture->getCacheEntry());
- this->purgeCache();
- } else if (texture->getDeferredRefCount() <= 0) {
- // When we aren't reusing textures we know this scratch texture
- // will never be reused and would be just wasting time in the cache
- fResourceCache->makeNonExclusive(texture->getCacheEntry());
- fResourceCache->deleteResource(texture->getCacheEntry());
- } else {
- // In this case (fDeferredRefCount > 0) but the cache is the only
- // one holding a real ref. Mark the object so when the deferred
- // ref count goes to 0 the texture will be deleted (remember
- // in this code path scratch textures aren't getting reused).
- texture->setNeedsDeferredUnref();
- }
-}
-
-
-void GrContext::unlockScratchTexture(GrTexture* texture) {
- ASSERT_OWNED_RESOURCE(texture);
- SkASSERT(NULL != texture->getCacheEntry());
-
- // If this is a scratch texture we detached it from the cache
- // while it was locked (to avoid two callers simultaneously getting
- // the same texture).
- if (texture->getCacheEntry()->key().isScratch()) {
- if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
- fResourceCache->makeNonExclusive(texture->getCacheEntry());
- this->purgeCache();
- } else if (texture->unique() && texture->getDeferredRefCount() <= 0) {
- // Only the cache now knows about this texture. Since we're never
- // reusing scratch textures (in this code path) it would just be
- // wasting time sitting in the cache.
- fResourceCache->makeNonExclusive(texture->getCacheEntry());
- fResourceCache->deleteResource(texture->getCacheEntry());
- } else {
- // In this case (fRefCnt > 1 || defRefCnt > 0) but we don't really
- // want to readd it to the cache (since it will never be reused).
- // Instead, give up the cache's ref and leave the decision up to
- // addExistingTextureToCache once its ref count reaches 0. For
- // this to work we need to leave it in the exclusive list.
- texture->impl()->setFlag((GrTextureFlags) GrTextureImpl::kReturnToCache_FlagBit);
- // Give up the cache's ref to the texture
- texture->unref();
- }
- }
-}
-
-void GrContext::purgeCache() {
- if (NULL != fResourceCache) {
- fResourceCache->purgeAsNeeded();
- }
+ GrTexture* texture = this->createNewScratchTexture(*desc);
+ SkASSERT(NULL == texture ||
+ texture->getScratchKey() == GrTexturePriv::ComputeScratchKey(*desc));
+ return texture;
}
bool GrContext::OverbudgetCB(void* data) {
- SkASSERT(NULL != data);
+ SkASSERT(data);
GrContext* context = reinterpret_cast<GrContext*>(data);
}
-GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
+GrTexture* GrContext::createUncachedTexture(const GrSurfaceDesc& descIn,
void* srcData,
size_t rowBytes) {
- GrTextureDesc descCopy = descIn;
+ GrSurfaceDesc descCopy = descIn;
return fGpu->createTexture(descCopy, srcData, rowBytes);
}
bool isPow2 = SkIsPow2(width) && SkIsPow2(height);
if (!isPow2) {
- bool tiled = NULL != params && params->isTiled();
+ bool tiled = params && params->isTiled();
if (tiled && !caps->npotTextureTileSupport()) {
return false;
}
void GrContext::clear(const SkIRect* rect,
const GrColor color,
bool canIgnoreRect,
- GrRenderTarget* target) {
+ GrRenderTarget* renderTarget) {
+ ASSERT_OWNED_RESOURCE(renderTarget);
+ SkASSERT(renderTarget);
+
AutoRestoreEffects are;
AutoCheckFlush acf(this);
- this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->clear(rect, color,
- canIgnoreRect, target);
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this);
+ GrDrawTarget* target = this->prepareToDraw(NULL, &are, &acf);
+ if (NULL == target) {
+ return;
+ }
+ target->clear(rect, color, canIgnoreRect, renderTarget);
}
void GrContext::drawPaint(const GrPaint& origPaint) {
SkMatrix inverse;
SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
AutoMatrix am;
+ GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::drawPaint", this);
// We attempt to map r by the inverse matrix and draw that. mapRect will
// map the four corners and bound them with a new rect. This will not
// produce a correct result for some perspective matrices.
if (!this->getMatrix().hasPerspective()) {
if (!fViewMatrix.invert(&inverse)) {
- GrPrintf("Could not invert matrix\n");
+ SkDebugf("Could not invert matrix\n");
return;
}
inverse.mapRect(&r);
} else {
if (!am.setIdentity(this, paint.writable())) {
- GrPrintf("Could not invert matrix\n");
+ SkDebugf("Could not invert matrix\n");
return;
}
}
verts[9] = verts[1];
}
-static bool isIRect(const SkRect& r) {
- return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) &&
- SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
+static inline bool is_irect(const SkRect& r) {
+ return SkScalarIsInt(r.fLeft) && SkScalarIsInt(r.fTop) &&
+ SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
}
static bool apply_aa_to_rect(GrDrawTarget* target,
const SkRect& rect,
SkScalar strokeWidth,
const SkMatrix& combinedMatrix,
- SkRect* devBoundRect,
- bool* useVertexCoverage) {
- // we use a simple coverage ramp to do aa on axis-aligned rects
- // we check if the rect will be axis-aligned, and the rect won't land on
- // integer coords.
-
- // we are keeping around the "tweak the alpha" trick because
- // it is our only hope for the fixed-pipe implementation.
- // In a shader implementation we can give a separate coverage input
- // TODO: remove this ugliness when we drop the fixed-pipe impl
- *useVertexCoverage = false;
- if (!target->getDrawState().canTweakAlphaForCoverage()) {
- if (target->shouldDisableCoverageAAForBlend()) {
+ SkRect* devBoundRect) {
+ if (!target->getDrawState().canTweakAlphaForCoverage() &&
+ target->shouldDisableCoverageAAForBlend()) {
#ifdef SK_DEBUG
- //GrPrintf("Turning off AA to correctly apply blend.\n");
+ //SkDebugf("Turning off AA to correctly apply blend.\n");
#endif
- return false;
- } else {
- *useVertexCoverage = true;
- }
+ return false;
}
const GrDrawState& drawState = target->getDrawState();
if (drawState.getRenderTarget()->isMultisampled()) {
return false;
}
- if (0 == strokeWidth && target->willUseHWAALines()) {
- return false;
- }
-
#if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
if (strokeWidth >= 0) {
#endif
#endif
combinedMatrix.mapRect(devBoundRect, rect);
-
if (strokeWidth < 0) {
- return !isIRect(*devBoundRect);
- } else {
- return true;
+ return !is_irect(*devBoundRect);
}
+
+ return true;
}
static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
void GrContext::drawRect(const GrPaint& paint,
const SkRect& rect,
- const GrStrokeInfo* strokeInfo,
- const SkMatrix* matrix) {
- if (NULL != strokeInfo && strokeInfo->isDashed()) {
+ const GrStrokeInfo* strokeInfo) {
+ if (strokeInfo && strokeInfo->isDashed()) {
SkPath path;
path.addRect(rect);
this->drawPath(paint, path, *strokeInfo);
AutoRestoreEffects are;
AutoCheckFlush acf(this);
- GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
+ GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
+ if (NULL == target) {
+ return;
+ }
GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
-
SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth();
- SkMatrix combinedMatrix = target->drawState()->getViewMatrix();
- if (NULL != matrix) {
- combinedMatrix.preConcat(*matrix);
- }
+ SkMatrix matrix = target->drawState()->getViewMatrix();
// Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
// cases where the RT is fully inside a stroke.
target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
SkRect clipSpaceRTRect = rtRect;
bool checkClip = false;
- if (NULL != this->getClip()) {
+ if (this->getClip()) {
checkClip = true;
clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
SkIntToScalar(this->getClip()->fOrigin.fY));
// Does the clip contain the entire RT?
if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
SkMatrix invM;
- if (!combinedMatrix.invert(&invM)) {
+ if (!matrix.invert(&invM)) {
return;
}
// Does the rect bound the RT?
// Will it blend?
GrColor clearColor;
if (paint.isOpaqueAndConstantColor(&clearColor)) {
- target->clear(NULL, clearColor, true);
+ target->clear(NULL, clearColor, true, fRenderTarget);
return;
}
}
}
SkRect devBoundRect;
- bool useVertexCoverage;
bool needAA = paint.isAntiAlias() &&
!target->getDrawState().getRenderTarget()->isMultisampled();
- bool doAA = needAA && apply_aa_to_rect(target, rect, width, combinedMatrix, &devBoundRect,
- &useVertexCoverage);
+ bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, &devBoundRect);
const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec();
return;
}
if (width >= 0) {
- fAARectRenderer->strokeAARect(this->getGpu(), target, rect,
- combinedMatrix, devBoundRect,
- strokeRec, useVertexCoverage);
+ fAARectRenderer->strokeAARect(target, rect,
+ matrix, devBoundRect,
+ strokeRec);
} else {
// filled AA rect
- fAARectRenderer->fillAARect(this->getGpu(), target,
- rect, combinedMatrix, devBoundRect,
- useVertexCoverage);
+ fAARectRenderer->fillAARect(target,
+ rect, matrix, devBoundRect);
}
return;
}
GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
if (!geo.succeeded()) {
- GrPrintf("Failed to get space for vertices!\n");
+ SkDebugf("Failed to get space for vertices!\n");
return;
}
vertex[4].set(rect.fLeft, rect.fTop);
}
- GrDrawState::AutoViewMatrixRestore avmr;
- if (NULL != matrix) {
- GrDrawState* drawState = target->drawState();
- avmr.set(drawState, *matrix);
- }
-
target->drawNonIndexed(primType, 0, vertCount);
} else {
// filled BW rect
- target->drawSimpleRect(rect, matrix);
+ target->drawSimpleRect(rect);
}
}
void GrContext::drawRectToRect(const GrPaint& paint,
const SkRect& dstRect,
const SkRect& localRect,
- const SkMatrix* dstMatrix,
const SkMatrix* localMatrix) {
AutoRestoreEffects are;
AutoCheckFlush acf(this);
- GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
+ GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
+ if (NULL == target) {
+ return;
+ }
GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
- target->drawRect(dstRect, dstMatrix, &localRect, localMatrix);
+ target->drawRect(dstRect, &localRect, localMatrix);
}
namespace {
{kVec4ub_GrVertexAttribType, 2*sizeof(SkPoint), kColor_GrVertexAttribBinding}
};
+static const size_t kPosUVAttribsSize = 2 * sizeof(SkPoint);
+static const size_t kPosUVColorAttribsSize = 2 * sizeof(SkPoint) + sizeof(GrColor);
+
extern const GrVertexAttrib gPosColorAttribs[] = {
{kVec2f_GrVertexAttribType, 0, kPosition_GrVertexAttribBinding},
{kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
};
+static const size_t kPosAttribsSize = sizeof(SkPoint);
+static const size_t kPosColorAttribsSize = sizeof(SkPoint) + sizeof(GrColor);
+
static void set_vertex_attributes(GrDrawState* drawState,
const SkPoint* texCoords,
const GrColor* colors,
*texOffset = -1;
*colorOffset = -1;
- if (NULL != texCoords && NULL != colors) {
+ if (texCoords && colors) {
*texOffset = sizeof(SkPoint);
*colorOffset = 2*sizeof(SkPoint);
- drawState->setVertexAttribs<gPosUVColorAttribs>(3);
- } else if (NULL != texCoords) {
+ drawState->setVertexAttribs<gPosUVColorAttribs>(3, kPosUVColorAttribsSize);
+ } else if (texCoords) {
*texOffset = sizeof(SkPoint);
- drawState->setVertexAttribs<gPosUVColorAttribs>(2);
- } else if (NULL != colors) {
+ drawState->setVertexAttribs<gPosUVColorAttribs>(2, kPosUVAttribsSize);
+ } else if (colors) {
*colorOffset = sizeof(SkPoint);
- drawState->setVertexAttribs<gPosColorAttribs>(2);
+ drawState->setVertexAttribs<gPosColorAttribs>(2, kPosColorAttribsSize);
} else {
- drawState->setVertexAttribs<gPosColorAttribs>(1);
+ drawState->setVertexAttribs<gPosColorAttribs>(1, kPosAttribsSize);
}
}
AutoCheckFlush acf(this);
GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
- GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
+ GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
+ if (NULL == target) {
+ return;
+ }
GrDrawState* drawState = target->drawState();
GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
int colorOffset = -1, texOffset = -1;
set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
- size_t vertexSize = drawState->getVertexSize();
- if (sizeof(SkPoint) != vertexSize) {
- if (!geo.set(target, vertexCount, 0)) {
- GrPrintf("Failed to get space for vertices!\n");
- return;
- }
- void* curVertex = geo.vertices();
+ size_t VertexStride = drawState->getVertexStride();
+ if (!geo.set(target, vertexCount, indexCount)) {
+ SkDebugf("Failed to get space for vertices!\n");
+ return;
+ }
+ void* curVertex = geo.vertices();
- for (int i = 0; i < vertexCount; ++i) {
- *((SkPoint*)curVertex) = positions[i];
+ for (int i = 0; i < vertexCount; ++i) {
+ *((SkPoint*)curVertex) = positions[i];
- if (texOffset >= 0) {
- *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
- }
- if (colorOffset >= 0) {
- *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
- }
- curVertex = (void*)((intptr_t)curVertex + vertexSize);
+ if (texOffset >= 0) {
+ *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
}
- } else {
- target->setVertexSourceToArray(positions, vertexCount);
+ if (colorOffset >= 0) {
+ *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
+ }
+ curVertex = (void*)((intptr_t)curVertex + VertexStride);
}
// we don't currently apply offscreen AA to this path. Need improved
// management of GrDrawTarget's geometry to avoid copying points per-tile.
-
- if (NULL != indices) {
- target->setIndexSourceToArray(indices, indexCount);
+ if (indices) {
+ uint16_t* curIndex = (uint16_t*)geo.indices();
+ for (int i = 0; i < indexCount; ++i) {
+ curIndex[i] = indices[i];
+ }
target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
- target->resetIndexSource();
} else {
target->drawNonIndexed(primitiveType, 0, vertexCount);
}
AutoRestoreEffects are;
AutoCheckFlush acf(this);
- GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
+ GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
+ if (NULL == target) {
+ return;
+ }
GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
AutoRestoreEffects are;
AutoCheckFlush acf(this);
- GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
+ GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
AutoRestoreEffects are;
AutoCheckFlush acf(this);
- GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
+ GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
+ if (NULL == target) {
+ return;
+ }
GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
static bool is_nested_rects(GrDrawTarget* target,
const SkPath& path,
const SkStrokeRec& stroke,
- SkRect rects[2],
- bool* useVertexCoverage) {
+ SkRect rects[2]) {
SkASSERT(stroke.isFillStyle());
if (path.isInverseFillType()) {
return false;
}
- *useVertexCoverage = false;
- if (!target->getDrawState().canTweakAlphaForCoverage()) {
- if (target->shouldDisableCoverageAAForBlend()) {
- return false;
- } else {
- *useVertexCoverage = true;
- }
+ if (!target->getDrawState().canTweakAlphaForCoverage() &&
+ target->shouldDisableCoverageAAForBlend()) {
+ return false;
}
SkPath::Direction dirs[2];
const SkScalar* outer = rects[0].asScalars();
const SkScalar* inner = rects[1].asScalars();
+ bool allEq = true;
+
SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
+ bool allGoE1 = margin >= SK_Scalar1;
+
for (int i = 1; i < 4; ++i) {
SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
+ if (temp < SK_Scalar1) {
+ allGoE1 = false;
+ }
if (!SkScalarNearlyEqual(margin, temp)) {
- return false;
+ allEq = false;
}
}
- return true;
+ return allEq || allGoE1;
}
void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const GrStrokeInfo& strokeInfo) {
if (path.isLine(pts)) {
AutoRestoreEffects are;
AutoCheckFlush acf(this);
- GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
+ GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
+ if (NULL == target) {
+ return;
+ }
GrDrawState* drawState = target->drawState();
SkMatrix origViewMatrix = drawState->getViewMatrix();
// OK.
AutoRestoreEffects are;
AutoCheckFlush acf(this);
- GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
+ GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
+ if (NULL == target) {
+ return;
+ }
GrDrawState* drawState = target->drawState();
- GR_CREATE_TRACE_MARKER("GrContext::drawPath", target);
+ GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex());
const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) {
// Concave AA paths are expensive - try to avoid them for special cases
- bool useVertexCoverage;
SkRect rects[2];
- if (is_nested_rects(target, path, strokeRec, rects, &useVertexCoverage)) {
+ if (is_nested_rects(target, path, strokeRec, rects)) {
SkMatrix origViewMatrix = drawState->getViewMatrix();
GrDrawState::AutoViewMatrixRestore avmr;
if (!avmr.setIdentity(target->drawState())) {
return;
}
- fAARectRenderer->fillAANestedRects(this->getGpu(), target,
- rects,
- origViewMatrix,
- useVertexCoverage);
+ fAARectRenderer->fillAANestedRects(target, rects, origViewMatrix);
return;
}
}
if (NULL == pr) {
#ifdef SK_DEBUG
- GrPrintf("Unable to find path renderer compatible with path.\n");
+ SkDebugf("Unable to find path renderer compatible with path.\n");
#endif
return;
}
} else {
fDrawBuffer->flush();
}
+ fResourceCache->purgeAsNeeded();
fFlushToReduceCacheSize = false;
}
-bool GrContext::writeTexturePixels(GrTexture* texture,
+bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes,
+ const void* inPixels, size_t outRowBytes, void* outPixels) {
+ SkSrcPixelInfo srcPI;
+ if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
+ return false;
+ }
+ srcPI.fAlphaType = kUnpremul_SkAlphaType;
+ srcPI.fPixels = inPixels;
+ srcPI.fRowBytes = inRowBytes;
+
+ SkDstPixelInfo dstPI;
+ dstPI.fColorType = srcPI.fColorType;
+ dstPI.fAlphaType = kPremul_SkAlphaType;
+ dstPI.fPixels = outPixels;
+ dstPI.fRowBytes = outRowBytes;
+
+ return srcPI.convertPixelsTo(&dstPI, width, height);
+}
+
+bool GrContext::writeSurfacePixels(GrSurface* surface,
int left, int top, int width, int height,
- GrPixelConfig config, const void* buffer, size_t rowBytes,
- uint32_t flags) {
- ASSERT_OWNED_RESOURCE(texture);
-
- if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) {
- if (NULL != texture->asRenderTarget()) {
- return this->writeRenderTargetPixels(texture->asRenderTarget(),
- left, top, width, height,
- config, buffer, rowBytes, flags);
- } else {
- return false;
+ GrPixelConfig srcConfig, const void* buffer, size_t rowBytes,
+ uint32_t pixelOpsFlags) {
+
+ {
+ GrTexture* texture = NULL;
+ if (!(kUnpremul_PixelOpsFlag & pixelOpsFlags) && (texture = surface->asTexture()) &&
+ fGpu->canWriteTexturePixels(texture, srcConfig)) {
+
+ if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) &&
+ surface->surfacePriv().hasPendingIO()) {
+ this->flush();
+ }
+ return fGpu->writeTexturePixels(texture, left, top, width, height,
+ srcConfig, buffer, rowBytes);
+ // Don't need to check kFlushWrites_PixelOp here, we just did a direct write so the
+ // upload is already flushed.
}
}
- if (!(kDontFlush_PixelOpsFlag & flags)) {
- this->flush();
+ // If we didn't do a direct texture write then we upload the pixels to a texture and draw.
+ GrRenderTarget* renderTarget = surface->asRenderTarget();
+ if (NULL == renderTarget) {
+ return false;
}
- return fGpu->writeTexturePixels(texture, left, top, width, height,
- config, buffer, rowBytes);
-}
+ // We ignore the preferred config unless it is a R/B swap of the src config. In that case
+ // we will upload the original src data to a scratch texture but we will spoof it as the swapped
+ // config. This scratch will then have R and B swapped. We correct for this by swapping again
+ // when drawing the scratch to the dst using a conversion effect.
+ bool swapRAndB = false;
+ GrPixelConfig writeConfig = srcConfig;
+ if (GrPixelConfigSwapRAndB(srcConfig) ==
+ fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) {
+ writeConfig = GrPixelConfigSwapRAndB(srcConfig);
+ swapRAndB = true;
+ }
-bool GrContext::readTexturePixels(GrTexture* texture,
- int left, int top, int width, int height,
- GrPixelConfig config, void* buffer, size_t rowBytes,
- uint32_t flags) {
- ASSERT_OWNED_RESOURCE(texture);
+ GrSurfaceDesc desc;
+ desc.fWidth = width;
+ desc.fHeight = height;
+ desc.fConfig = writeConfig;
+ SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, kApprox_ScratchTexMatch));
+ if (!texture) {
+ return false;
+ }
- GrRenderTarget* target = texture->asRenderTarget();
- if (NULL != target) {
- return this->readRenderTargetPixels(target,
- left, top, width, height,
- config, buffer, rowBytes,
- flags);
- } else {
- // TODO: make this more efficient for cases where we're reading the entire
- // texture, i.e., use GetTexImage() instead
+ SkAutoTUnref<const GrFragmentProcessor> fp;
+ SkMatrix textureMatrix;
+ textureMatrix.setIDiv(texture->width(), texture->height());
- // create scratch rendertarget and read from that
- GrAutoScratchTexture ast;
- GrTextureDesc desc;
- desc.fFlags = kRenderTarget_GrTextureFlagBit;
- desc.fWidth = width;
- desc.fHeight = height;
- desc.fConfig = config;
- desc.fOrigin = kTopLeft_GrSurfaceOrigin;
- ast.set(this, desc, kExact_ScratchTexMatch);
- GrTexture* dst = ast.texture();
- if (NULL != dst && NULL != (target = dst->asRenderTarget())) {
- this->copyTexture(texture, target, NULL);
- return this->readRenderTargetPixels(target,
- left, top, width, height,
- config, buffer, rowBytes,
- flags);
+ // allocate a tmp buffer and sw convert the pixels to premul
+ SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
+
+ if (kUnpremul_PixelOpsFlag & pixelOpsFlags) {
+ if (!GrPixelConfigIs8888(srcConfig)) {
+ return false;
}
+ fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
+ // handle the unpremul step on the CPU if we couldn't create an effect to do it.
+ if (NULL == fp) {
+ size_t tmpRowBytes = 4 * width;
+ tmpPixels.reset(width * height);
+ if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes,
+ tmpPixels.get())) {
+ return false;
+ }
+ rowBytes = tmpRowBytes;
+ buffer = tmpPixels.get();
+ }
+ }
+ if (NULL == fp) {
+ fp.reset(GrConfigConversionEffect::Create(texture,
+ swapRAndB,
+ GrConfigConversionEffect::kNone_PMConversion,
+ textureMatrix));
+ }
+ // Even if the client told us not to flush, we still flush here. The client may have known that
+ // writes to the original surface caused no data hazards, but they can't know that the scratch
+ // we just got is safe.
+ if (texture->surfacePriv().hasPendingIO()) {
+ this->flush();
+ }
+ if (!fGpu->writeTexturePixels(texture, 0, 0, width, height,
+ writeConfig, buffer, rowBytes)) {
return false;
}
-}
-#include "SkConfig8888.h"
+ SkMatrix matrix;
+ matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
+
+ // This function can be called in the midst of drawing another object (e.g., when uploading a
+ // SW-rasterized clip while issuing a draw). So we push the current geometry state before
+ // drawing a rect to the render target.
+ // The bracket ensures we pop the stack if we wind up flushing below.
+ {
+ GrDrawTarget* drawTarget = this->prepareToDraw(NULL, NULL, NULL);
+ GrDrawTarget::AutoGeometryAndStatePush agasp(drawTarget, GrDrawTarget::kReset_ASRInit,
+ &matrix);
+ GrDrawState* drawState = drawTarget->drawState();
+ drawState->addColorProcessor(fp);
+ drawState->setRenderTarget(renderTarget);
+ drawState->disableState(GrDrawState::kClip_StateBit);
+ drawTarget->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)));
+ }
+
+ if (kFlushWrites_PixelOp & pixelOpsFlags) {
+ this->flushSurfaceWrites(surface);
+ }
+
+ return true;
+}
// toggles between RGBA and BGRA
static SkColorType toggle_colortype32(SkColorType ct) {
GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
uint32_t flags) {
ASSERT_OWNED_RESOURCE(target);
+ SkASSERT(target);
- if (NULL == target) {
- target = fRenderTarget.get();
- if (NULL == target) {
- return false;
- }
- }
-
- if (!(kDontFlush_PixelOpsFlag & flags)) {
+ if (!(kDontFlush_PixelOpsFlag & flags) && target->surfacePriv().hasPendingWrite()) {
this->flush();
}
// conversions in the draw we set the corresponding bool to false so that we don't reapply it
// on the read back pixels.
GrTexture* src = target->asTexture();
- GrAutoScratchTexture ast;
- if (NULL != src && (swapRAndB || unpremul || flipY)) {
- // Make the scratch a render target because we don't have a robust readTexturePixels as of
- // yet. It calls this function.
- GrTextureDesc desc;
- desc.fFlags = kRenderTarget_GrTextureFlagBit;
+ if (src && (swapRAndB || unpremul || flipY)) {
+ // Make the scratch a render so we can read its pixels.
+ GrSurfaceDesc desc;
+ desc.fFlags = kRenderTarget_GrSurfaceFlag;
desc.fWidth = width;
desc.fHeight = height;
desc.fConfig = readConfig;
fGpu->fullReadPixelsIsFasterThanPartial()) {
match = kExact_ScratchTexMatch;
}
- ast.set(this, desc, match);
- GrTexture* texture = ast.texture();
+ SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, match));
if (texture) {
// compute a matrix to perform the draw
SkMatrix textureMatrix;
textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
textureMatrix.postIDiv(src->width(), src->height());
- SkAutoTUnref<const GrEffectRef> effect;
+ SkAutoTUnref<const GrFragmentProcessor> fp;
if (unpremul) {
- effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
- if (NULL != effect) {
+ fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
+ if (fp) {
unpremul = false; // we no longer need to do this on CPU after the read back.
}
}
// If we failed to create a PM->UPM effect and have no other conversions to perform then
// there is no longer any point to using the scratch.
- if (NULL != effect || flipY || swapRAndB) {
- if (!effect) {
- effect.reset(GrConfigConversionEffect::Create(
- src,
- swapRAndB,
- GrConfigConversionEffect::kNone_PMConversion,
- textureMatrix));
+ if (fp || flipY || swapRAndB) {
+ if (!fp) {
+ fp.reset(GrConfigConversionEffect::Create(
+ src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion,
+ textureMatrix));
}
swapRAndB = false; // we will handle the swap in the draw.
// We protect the existing geometry here since it may not be
// clear to the caller that a draw operation (i.e., drawSimpleRect)
// can be invoked in this method
- GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
- GrDrawState* drawState = fGpu->drawState();
- SkASSERT(effect);
- drawState->addColorEffect(effect);
-
- drawState->setRenderTarget(texture->asRenderTarget());
- SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
- fGpu->drawSimpleRect(rect, NULL);
- // we want to read back from the scratch's origin
- left = 0;
- top = 0;
- target = texture->asRenderTarget();
+ {
+ GrDrawTarget::AutoGeometryAndStatePush agasp(fDrawBuffer,
+ GrDrawTarget::kReset_ASRInit);
+ GrDrawState* drawState = fDrawBuffer->drawState();
+ SkASSERT(fp);
+ drawState->addColorProcessor(fp);
+
+ drawState->setRenderTarget(texture->asRenderTarget());
+ SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
+ fDrawBuffer->drawSimpleRect(rect);
+ // we want to read back from the scratch's origin
+ left = 0;
+ top = 0;
+ target = texture->asRenderTarget();
+ }
+ this->flushSurfaceWrites(target);
}
}
}
+
if (!fGpu->readPixels(target,
left, top, width, height,
readConfig, buffer, rowBytes)) {
return true;
}
-void GrContext::resolveRenderTarget(GrRenderTarget* target) {
- SkASSERT(target);
- ASSERT_OWNED_RESOURCE(target);
- // In the future we may track whether there are any pending draws to this
- // target. We don't today so we always perform a flush. We don't promise
- // this to our clients, though.
- this->flush();
- fGpu->resolveRenderTarget(target);
+void GrContext::prepareSurfaceForExternalRead(GrSurface* surface) {
+ SkASSERT(surface);
+ ASSERT_OWNED_RESOURCE(surface);
+ if (surface->surfacePriv().hasPendingIO()) {
+ this->flush();
+ }
+ GrRenderTarget* rt = surface->asRenderTarget();
+ if (fGpu && rt) {
+ fGpu->resolveRenderTarget(rt);
+ }
}
-void GrContext::discardRenderTarget(GrRenderTarget* target) {
- SkASSERT(target);
- ASSERT_OWNED_RESOURCE(target);
+void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) {
+ SkASSERT(renderTarget);
+ ASSERT_OWNED_RESOURCE(renderTarget);
AutoRestoreEffects are;
AutoCheckFlush acf(this);
- this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->discard(target);
+ GrDrawTarget* target = this->prepareToDraw(NULL, &are, &acf);
+ if (NULL == target) {
+ return;
+ }
+ target->discard(renderTarget);
}
-void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
+void GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
+ const SkIPoint& dstPoint, uint32_t pixelOpsFlags) {
if (NULL == src || NULL == dst) {
return;
}
ASSERT_OWNED_RESOURCE(src);
+ ASSERT_OWNED_RESOURCE(dst);
- // Writes pending to the source texture are not tracked, so a flush
- // is required to ensure that the copy captures the most recent contents
- // of the source texture. See similar behavior in
- // GrContext::resolveRenderTarget.
- this->flush();
-
- GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
- GrDrawState* drawState = fGpu->drawState();
- drawState->setRenderTarget(dst);
- SkMatrix sampleM;
- sampleM.setIDiv(src->width(), src->height());
- SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
- if (NULL != topLeft) {
- srcRect.offset(*topLeft);
- }
- SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
- if (!srcRect.intersect(srcBounds)) {
- return;
- }
- sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop));
- drawState->addColorTextureEffect(src, sampleM);
- SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
- fGpu->drawSimpleRect(dstR, NULL);
-}
-
-bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
- int left, int top, int width, int height,
- GrPixelConfig srcConfig,
- const void* buffer,
- size_t rowBytes,
- uint32_t flags) {
- ASSERT_OWNED_RESOURCE(target);
+ // Since we're going to the draw target and not GPU, no need to check kNoFlush
+ // here.
+ GrDrawTarget* target = this->prepareToDraw(NULL, NULL, NULL);
if (NULL == target) {
- target = fRenderTarget.get();
- if (NULL == target) {
- return false;
- }
- }
-
- // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
- // desktop GL).
-
- // We will always call some form of writeTexturePixels and we will pass our flags on to it.
- // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
- // set.)
-
- // If the RT is also a texture and we don't have to premultiply then take the texture path.
- // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
- // we do below.
-
-#if !defined(SK_BUILD_FOR_MAC)
- // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
- // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
- // HW is affected.
- if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) &&
- fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) {
- return this->writeTexturePixels(target->asTexture(),
- left, top, width, height,
- srcConfig, buffer, rowBytes, flags);
- }
-#endif
-
- // We ignore the preferred config unless it is a R/B swap of the src config. In that case
- // we will upload the original src data to a scratch texture but we will spoof it as the swapped
- // config. This scratch will then have R and B swapped. We correct for this by swapping again
- // when drawing the scratch to the dst using a conversion effect.
- bool swapRAndB = false;
- GrPixelConfig writeConfig = srcConfig;
- if (GrPixelConfigSwapRAndB(srcConfig) ==
- fGpu->preferredWritePixelsConfig(srcConfig, target->config())) {
- writeConfig = GrPixelConfigSwapRAndB(srcConfig);
- swapRAndB = true;
- }
-
- GrTextureDesc desc;
- desc.fWidth = width;
- desc.fHeight = height;
- desc.fConfig = writeConfig;
- GrAutoScratchTexture ast(this, desc);
- GrTexture* texture = ast.texture();
- if (NULL == texture) {
- return false;
+ return;
}
+ target->copySurface(dst, src, srcRect, dstPoint);
- SkAutoTUnref<const GrEffectRef> effect;
- SkMatrix textureMatrix;
- textureMatrix.setIDiv(texture->width(), texture->height());
-
- // allocate a tmp buffer and sw convert the pixels to premul
- SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
-
- if (kUnpremul_PixelOpsFlag & flags) {
- if (!GrPixelConfigIs8888(srcConfig)) {
- return false;
- }
- effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
- // handle the unpremul step on the CPU if we couldn't create an effect to do it.
- if (NULL == effect) {
- SkSrcPixelInfo srcPI;
- if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
- return false;
- }
- srcPI.fAlphaType = kUnpremul_SkAlphaType;
- srcPI.fPixels = buffer;
- srcPI.fRowBytes = rowBytes;
-
- tmpPixels.reset(width * height);
-
- SkDstPixelInfo dstPI;
- dstPI.fColorType = srcPI.fColorType;
- dstPI.fAlphaType = kPremul_SkAlphaType;
- dstPI.fPixels = tmpPixels.get();
- dstPI.fRowBytes = 4 * width;
-
- if (!srcPI.convertPixelsTo(&dstPI, width, height)) {
- return false;
- }
-
- buffer = tmpPixels.get();
- rowBytes = 4 * width;
- }
- }
- if (NULL == effect) {
- effect.reset(GrConfigConversionEffect::Create(texture,
- swapRAndB,
- GrConfigConversionEffect::kNone_PMConversion,
- textureMatrix));
+ if (kFlushWrites_PixelOp & pixelOpsFlags) {
+ this->flush();
}
+}
- if (!this->writeTexturePixels(texture,
- 0, 0, width, height,
- writeConfig, buffer, rowBytes,
- flags & ~kUnpremul_PixelOpsFlag)) {
- return false;
+void GrContext::flushSurfaceWrites(GrSurface* surface) {
+ if (surface->surfacePriv().hasPendingWrite()) {
+ this->flush();
}
-
- // writeRenderTargetPixels can be called in the midst of drawing another
- // object (e.g., when uploading a SW path rendering to the gpu while
- // drawing a rect) so preserve the current geometry.
- SkMatrix matrix;
- matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
- GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
- GrDrawState* drawState = fGpu->drawState();
- SkASSERT(effect);
- drawState->addColorEffect(effect);
-
- drawState->setRenderTarget(target);
-
- fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
- return true;
}
+
////////////////////////////////////////////////////////////////////////////////
GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
- BufferedDraw buffered,
AutoRestoreEffects* are,
AutoCheckFlush* acf) {
// All users of this draw state should be freeing up all effects when they're done.
// Otherwise effects that own resources may keep those resources alive indefinitely.
- SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
+ SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages() &&
+ !fDrawState->hasGeometryProcessor());
- if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
- fDrawBuffer->flush();
- fLastDrawWasBuffered = kNo_BufferedDraw;
+ if (NULL == fGpu) {
+ return NULL;
}
+
ASSERT_OWNED_RESOURCE(fRenderTarget.get());
- if (NULL != paint) {
- SkASSERT(NULL != are);
- SkASSERT(NULL != acf);
+ if (paint) {
+ SkASSERT(are);
+ SkASSERT(acf);
are->set(fDrawState);
fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
#if GR_DEBUG_PARTIAL_COVERAGE_CHECK
if ((paint->hasMask() || 0xff != paint->fCoverage) &&
- !fGpu->canApplyCoverage()) {
- GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
+ !fDrawState->couldApplyCoverage(fGpu->caps())) {
+ SkDebugf("Partial pixel coverage will be incorrectly blended.\n");
}
#endif
+ // Clear any vertex attributes configured for the previous use of the
+ // GrDrawState which can effect which blend optimizations are in effect.
+ fDrawState->setDefaultVertexAttribs();
} else {
fDrawState->reset(fViewMatrix);
fDrawState->setRenderTarget(fRenderTarget.get());
}
- GrDrawTarget* target;
- if (kYes_BufferedDraw == buffered) {
- fLastDrawWasBuffered = kYes_BufferedDraw;
- target = fDrawBuffer;
- } else {
- SkASSERT(kNo_BufferedDraw == buffered);
- fLastDrawWasBuffered = kNo_BufferedDraw;
- target = fGpu;
- }
- fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
+ fDrawState->setState(GrDrawState::kClip_StateBit, fClip &&
!fClip->fClipStack->isWideOpen());
- target->setClip(fClip);
- SkASSERT(fDrawState == target->drawState());
- return target;
+ fDrawBuffer->setClip(fClip);
+ SkASSERT(fDrawState == fDrawBuffer->drawState());
+ return fDrawBuffer;
}
/*
}
GrDrawTarget* GrContext::getTextTarget() {
- return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
+ return this->prepareToDraw(NULL, NULL, NULL);
}
const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
}
}
-const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture,
- bool swapRAndB,
- const SkMatrix& matrix) {
+const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture,
+ bool swapRAndB,
+ const SkMatrix& matrix) {
if (!fDidTestPMConversions) {
test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
fDidTestPMConversions = true;
}
}
-const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture,
- bool swapRAndB,
- const SkMatrix& matrix) {
+const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
+ bool swapRAndB,
+ const SkMatrix& matrix) {
if (!fDidTestPMConversions) {
test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
fDidTestPMConversions = true;
}
}
-GrPath* GrContext::createPath(const SkPath& inPath, const SkStrokeRec& stroke) {
- SkASSERT(fGpu->caps()->pathRenderingSupport());
-
- // TODO: now we add to fResourceCache. This should change to fResourceCache.
- GrResourceKey resourceKey = GrPath::ComputeKey(inPath, stroke);
- GrPath* path = static_cast<GrPath*>(fResourceCache->find(resourceKey));
- if (NULL != path && path->isEqualTo(inPath, stroke)) {
- path->ref();
- } else {
- path = fGpu->createPath(inPath, stroke);
- fResourceCache->purgeAsNeeded(1, path->gpuMemorySize());
- fResourceCache->addResource(resourceKey, path);
- }
- return path;
-}
-
-void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrCacheable* resource) {
- fResourceCache->purgeAsNeeded(1, resource->gpuMemorySize());
+void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResource* resource) {
fResourceCache->addResource(resourceKey, resource);
}
-GrCacheable* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
- GrCacheable* resource = fResourceCache->find(resourceKey);
+GrGpuResource* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
+ GrGpuResource* resource = fResourceCache->find(resourceKey);
SkSafeRef(resource);
return resource;
}
void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
fGpu->addGpuTraceMarker(marker);
- if (NULL != fDrawBuffer) {
+ if (fDrawBuffer) {
fDrawBuffer->addGpuTraceMarker(marker);
}
}
void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
fGpu->removeGpuTraceMarker(marker);
- if (NULL != fDrawBuffer) {
+ if (fDrawBuffer) {
fDrawBuffer->removeGpuTraceMarker(marker);
}
}
fResourceCache->printStats();
}
#endif
+
+#if GR_GPU_STATS
+const GrContext::GPUStats* GrContext::gpuStats() const {
+ return fGpu->gpuStats();
+}
+#endif
+