Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / third_party / skia / src / gpu / GrContext.cpp
1
2 /*
3  * Copyright 2011 Google Inc.
4  *
5  * Use of this source code is governed by a BSD-style license that can be
6  * found in the LICENSE file.
7  */
8
9
10 #include "GrContext.h"
11
12 #include "effects/GrSingleTextureEffect.h"
13 #include "effects/GrConfigConversionEffect.h"
14
15 #include "GrAARectRenderer.h"
16 #include "GrBufferAllocPool.h"
17 #include "GrGpu.h"
18 #include "GrDrawTargetCaps.h"
19 #include "GrIndexBuffer.h"
20 #include "GrInOrderDrawBuffer.h"
21 #include "GrLayerCache.h"
22 #include "GrOvalRenderer.h"
23 #include "GrPathRenderer.h"
24 #include "GrPathUtils.h"
25 #include "GrResourceCache.h"
26 #include "GrSoftwarePathRenderer.h"
27 #include "GrStencilBuffer.h"
28 #include "GrTextStrike.h"
29 #include "GrTracing.h"
30 #include "SkGr.h"
31 #include "SkRTConf.h"
32 #include "SkRRect.h"
33 #include "SkStrokeRec.h"
34 #include "SkTLazy.h"
35 #include "SkTLS.h"
36 #include "SkTraceEvent.h"
37
38 // It can be useful to set this to false to test whether a bug is caused by using the
39 // InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
40 // debugging simpler.
41 SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
42                 "Defers rendering in GrContext via GrInOrderDrawBuffer.");
43
44 #define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
45
46 #ifdef SK_DEBUG
47     // change this to a 1 to see notifications when partial coverage fails
48     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
49 #else
50     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
51 #endif
52
53 static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT;
54 static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024;
55
56 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
57 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
58
59 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
60 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
61
62 #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
63
64 // Glorified typedef to avoid including GrDrawState.h in GrContext.h
65 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
66
67 class GrContext::AutoCheckFlush {
68 public:
69     AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(NULL != context); }
70
71     ~AutoCheckFlush() {
72         if (fContext->fFlushToReduceCacheSize) {
73             fContext->flush();
74         }
75     }
76
77 private:
78     GrContext* fContext;
79 };
80
81 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext) {
82     GrContext* context = SkNEW(GrContext);
83     if (context->init(backend, backendContext)) {
84         return context;
85     } else {
86         context->unref();
87         return NULL;
88     }
89 }
90
91 GrContext::GrContext() {
92     fDrawState = NULL;
93     fGpu = NULL;
94     fClip = NULL;
95     fPathRendererChain = NULL;
96     fSoftwarePathRenderer = NULL;
97     fTextureCache = NULL;
98     fFontCache = NULL;
99     fDrawBuffer = NULL;
100     fDrawBufferVBAllocPool = NULL;
101     fDrawBufferIBAllocPool = NULL;
102     fFlushToReduceCacheSize = false;
103     fAARectRenderer = NULL;
104     fOvalRenderer = NULL;
105     fViewMatrix.reset();
106     fMaxTextureSizeOverride = 1 << 20;
107     fGpuTracingEnabled = false;
108 }
109
110 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
111     SkASSERT(NULL == fGpu);
112
113     fGpu = GrGpu::Create(backend, backendContext, this);
114     if (NULL == fGpu) {
115         return false;
116     }
117
118     fDrawState = SkNEW(GrDrawState);
119     fGpu->setDrawState(fDrawState);
120
121     fTextureCache = SkNEW_ARGS(GrResourceCache,
122                                (MAX_RESOURCE_CACHE_COUNT,
123                                 MAX_RESOURCE_CACHE_BYTES));
124     fTextureCache->setOverbudgetCallback(OverbudgetCB, this);
125
126     fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
127
128     fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (fGpu)));
129
130     fLastDrawWasBuffered = kNo_BufferedDraw;
131
132     fAARectRenderer = SkNEW(GrAARectRenderer);
133     fOvalRenderer = SkNEW(GrOvalRenderer);
134
135     fDidTestPMConversions = false;
136
137     this->setupDrawBuffer();
138
139     return true;
140 }
141
142 GrContext::~GrContext() {
143     if (NULL == fGpu) {
144         return;
145     }
146
147     this->flush();
148
149     for (int i = 0; i < fCleanUpData.count(); ++i) {
150         (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
151     }
152
153     // Since the gpu can hold scratch textures, give it a chance to let go
154     // of them before freeing the texture cache
155     fGpu->purgeResources();
156
157     delete fTextureCache;
158     fTextureCache = NULL;
159     delete fFontCache;
160     delete fDrawBuffer;
161     delete fDrawBufferVBAllocPool;
162     delete fDrawBufferIBAllocPool;
163
164     fAARectRenderer->unref();
165     fOvalRenderer->unref();
166
167     fGpu->unref();
168     SkSafeUnref(fPathRendererChain);
169     SkSafeUnref(fSoftwarePathRenderer);
170     fDrawState->unref();
171 }
172
173 void GrContext::contextLost() {
174     this->contextDestroyed();
175     this->setupDrawBuffer();
176 }
177
178 void GrContext::contextDestroyed() {
179     // abandon first to so destructors
180     // don't try to free the resources in the API.
181     fGpu->abandonResources();
182
183     // a path renderer may be holding onto resources that
184     // are now unusable
185     SkSafeSetNull(fPathRendererChain);
186     SkSafeSetNull(fSoftwarePathRenderer);
187
188     delete fDrawBuffer;
189     fDrawBuffer = NULL;
190
191     delete fDrawBufferVBAllocPool;
192     fDrawBufferVBAllocPool = NULL;
193
194     delete fDrawBufferIBAllocPool;
195     fDrawBufferIBAllocPool = NULL;
196
197     fAARectRenderer->reset();
198     fOvalRenderer->reset();
199
200     fTextureCache->purgeAllUnlocked();
201
202     fFontCache->freeAll();
203     fLayerCache->freeAll();
204     fGpu->markContextDirty();
205 }
206
207 void GrContext::resetContext(uint32_t state) {
208     fGpu->markContextDirty(state);
209 }
210
211 void GrContext::freeGpuResources() {
212     this->flush();
213
214     fGpu->purgeResources();
215
216     fAARectRenderer->reset();
217     fOvalRenderer->reset();
218
219     fTextureCache->purgeAllUnlocked();
220     fFontCache->freeAll();
221     fLayerCache->freeAll();
222     // a path renderer may be holding onto resources
223     SkSafeSetNull(fPathRendererChain);
224     SkSafeSetNull(fSoftwarePathRenderer);
225 }
226
227 size_t GrContext::getGpuTextureCacheBytes() const {
228   return fTextureCache->getCachedResourceBytes();
229 }
230
231 int GrContext::getGpuTextureCacheResourceCount() const {
232   return fTextureCache->getCachedResourceCount();
233 }
234
235 ////////////////////////////////////////////////////////////////////////////////
236
237 GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
238                                         const GrCacheID& cacheID,
239                                         const GrTextureParams* params) {
240     GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
241     GrCacheable* resource = fTextureCache->find(resourceKey);
242     SkSafeRef(resource);
243     return static_cast<GrTexture*>(resource);
244 }
245
246 bool GrContext::isTextureInCache(const GrTextureDesc& desc,
247                                  const GrCacheID& cacheID,
248                                  const GrTextureParams* params) const {
249     GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
250     return fTextureCache->hasKey(resourceKey);
251 }
252
253 void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
254     ASSERT_OWNED_RESOURCE(sb);
255
256     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
257                                                             sb->height(),
258                                                             sb->numSamples());
259     fTextureCache->addResource(resourceKey, sb);
260 }
261
262 GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
263                                               int sampleCnt) {
264     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
265                                                             height,
266                                                             sampleCnt);
267     GrCacheable* resource = fTextureCache->find(resourceKey);
268     return static_cast<GrStencilBuffer*>(resource);
269 }
270
271 static void stretchImage(void* dst,
272                          int dstW,
273                          int dstH,
274                          void* src,
275                          int srcW,
276                          int srcH,
277                          size_t bpp) {
278     SkFixed dx = (srcW << 16) / dstW;
279     SkFixed dy = (srcH << 16) / dstH;
280
281     SkFixed y = dy >> 1;
282
283     size_t dstXLimit = dstW*bpp;
284     for (int j = 0; j < dstH; ++j) {
285         SkFixed x = dx >> 1;
286         void* srcRow = (uint8_t*)src + (y>>16)*srcW*bpp;
287         void* dstRow = (uint8_t*)dst + j*dstW*bpp;
288         for (size_t i = 0; i < dstXLimit; i += bpp) {
289             memcpy((uint8_t*) dstRow + i,
290                    (uint8_t*) srcRow + (x>>16)*bpp,
291                    bpp);
292             x += dx;
293         }
294         y += dy;
295     }
296 }
297
298 namespace {
299
300 // position + local coordinate
301 extern const GrVertexAttrib gVertexAttribs[] = {
302     {kVec2f_GrVertexAttribType, 0,               kPosition_GrVertexAttribBinding},
303     {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding}
304 };
305
306 };
307
308 // The desired texture is NPOT and tiled but that isn't supported by
309 // the current hardware. Resize the texture to be a POT
310 GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
311                                            const GrCacheID& cacheID,
312                                            void* srcData,
313                                            size_t rowBytes,
314                                            bool filter) {
315     SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
316     if (NULL == clampedTexture) {
317         clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
318
319         if (NULL == clampedTexture) {
320             return NULL;
321         }
322     }
323
324     GrTextureDesc rtDesc = desc;
325     rtDesc.fFlags =  rtDesc.fFlags |
326                      kRenderTarget_GrTextureFlagBit |
327                      kNoStencil_GrTextureFlagBit;
328     rtDesc.fWidth  = GrNextPow2(desc.fWidth);
329     rtDesc.fHeight = GrNextPow2(desc.fHeight);
330
331     GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
332
333     if (NULL != texture) {
334         GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
335         GrDrawState* drawState = fGpu->drawState();
336         drawState->setRenderTarget(texture->asRenderTarget());
337
338         // if filtering is not desired then we want to ensure all
339         // texels in the resampled image are copies of texels from
340         // the original.
341         GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode :
342                                                                    GrTextureParams::kNone_FilterMode);
343         drawState->addColorTextureEffect(clampedTexture, SkMatrix::I(), params);
344
345         drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs));
346
347         GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
348
349         if (arg.succeeded()) {
350             SkPoint* verts = (SkPoint*) arg.vertices();
351             verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
352             verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
353             fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
354         }
355     } else {
356         // TODO: Our CPU stretch doesn't filter. But we create separate
357         // stretched textures when the texture params is either filtered or
358         // not. Either implement filtered stretch blit on CPU or just create
359         // one when FBO case fails.
360
361         rtDesc.fFlags = kNone_GrTextureFlags;
362         // no longer need to clamp at min RT size.
363         rtDesc.fWidth  = GrNextPow2(desc.fWidth);
364         rtDesc.fHeight = GrNextPow2(desc.fHeight);
365         size_t bpp = GrBytesPerPixel(desc.fConfig);
366         SkAutoSMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
367         stretchImage(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
368                      srcData, desc.fWidth, desc.fHeight, bpp);
369
370         size_t stretchedRowBytes = rtDesc.fWidth * bpp;
371
372         SkDEBUGCODE(GrTexture* texture = )fGpu->createTexture(rtDesc, stretchedPixels.get(),
373                                                               stretchedRowBytes);
374         SkASSERT(NULL != texture);
375     }
376
377     return texture;
378 }
379
380 GrTexture* GrContext::createTexture(const GrTextureParams* params,
381                                     const GrTextureDesc& desc,
382                                     const GrCacheID& cacheID,
383                                     void* srcData,
384                                     size_t rowBytes,
385                                     GrResourceKey* cacheKey) {
386     GrResourceKey resourceKey = GrTexture::ComputeKey(fGpu, params, desc, cacheID);
387
388     GrTexture* texture;
389     if (GrTexture::NeedsResizing(resourceKey)) {
390         texture = this->createResizedTexture(desc, cacheID,
391                                              srcData, rowBytes,
392                                              GrTexture::NeedsBilerp(resourceKey));
393     } else {
394         texture= fGpu->createTexture(desc, srcData, rowBytes);
395     }
396
397     if (NULL != texture) {
398         // Adding a resource could put us overbudget. Try to free up the
399         // necessary space before adding it.
400         fTextureCache->purgeAsNeeded(1, texture->gpuMemorySize());
401         fTextureCache->addResource(resourceKey, texture);
402
403         if (NULL != cacheKey) {
404             *cacheKey = resourceKey;
405         }
406     }
407
408     return texture;
409 }
410
411 static GrTexture* create_scratch_texture(GrGpu* gpu,
412                                          GrResourceCache* textureCache,
413                                          const GrTextureDesc& desc) {
414     GrTexture* texture = gpu->createTexture(desc, NULL, 0);
415     if (NULL != texture) {
416         GrResourceKey key = GrTexture::ComputeScratchKey(texture->desc());
417         // Adding a resource could put us overbudget. Try to free up the
418         // necessary space before adding it.
419         textureCache->purgeAsNeeded(1, texture->gpuMemorySize());
420         // Make the resource exclusive so future 'find' calls don't return it
421         textureCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
422     }
423     return texture;
424 }
425
426 GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
427
428     SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
429              !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
430
431     // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
432     SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
433              !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
434              (inDesc.fConfig != kAlpha_8_GrPixelConfig));
435
436     if (!fGpu->caps()->reuseScratchTextures() &&
437         !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) {
438         // If we're never recycling this texture we can always make it the right size
439         return create_scratch_texture(fGpu, fTextureCache, inDesc);
440     }
441
442     GrTextureDesc desc = inDesc;
443
444     if (kApprox_ScratchTexMatch == match) {
445         // bin by pow2 with a reasonable min
446         static const int MIN_SIZE = 16;
447         desc.fWidth  = SkTMax(MIN_SIZE, GrNextPow2(desc.fWidth));
448         desc.fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc.fHeight));
449     }
450
451     GrCacheable* resource = NULL;
452     int origWidth = desc.fWidth;
453     int origHeight = desc.fHeight;
454
455     do {
456         GrResourceKey key = GrTexture::ComputeScratchKey(desc);
457         // Ensure we have exclusive access to the texture so future 'find' calls don't return it
458         resource = fTextureCache->find(key, GrResourceCache::kHide_OwnershipFlag);
459         if (NULL != resource) {
460             resource->ref();
461             break;
462         }
463         if (kExact_ScratchTexMatch == match) {
464             break;
465         }
466         // We had a cache miss and we are in approx mode, relax the fit of the flags.
467
468         // We no longer try to reuse textures that were previously used as render targets in
469         // situations where no RT is needed; doing otherwise can confuse the video driver and
470         // cause significant performance problems in some cases.
471         if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
472             desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
473         } else {
474             break;
475         }
476
477     } while (true);
478
479     if (NULL == resource) {
480         desc.fFlags = inDesc.fFlags;
481         desc.fWidth = origWidth;
482         desc.fHeight = origHeight;
483         resource = create_scratch_texture(fGpu, fTextureCache, desc);
484     }
485
486     return static_cast<GrTexture*>(resource);
487 }
488
489 void GrContext::addExistingTextureToCache(GrTexture* texture) {
490
491     if (NULL == texture) {
492         return;
493     }
494
495     // This texture should already have a cache entry since it was once
496     // attached
497     SkASSERT(NULL != texture->getCacheEntry());
498
499     // Conceptually, the cache entry is going to assume responsibility
500     // for the creation ref. Assert refcnt == 1.
501     SkASSERT(texture->unique());
502
503     if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
504         // Since this texture came from an AutoScratchTexture it should
505         // still be in the exclusive pile. Recycle it.
506         fTextureCache->makeNonExclusive(texture->getCacheEntry());
507         this->purgeCache();
508     } else if (texture->getDeferredRefCount() <= 0) {
509         // When we aren't reusing textures we know this scratch texture
510         // will never be reused and would be just wasting time in the cache
511         fTextureCache->makeNonExclusive(texture->getCacheEntry());
512         fTextureCache->deleteResource(texture->getCacheEntry());
513     } else {
514         // In this case (fDeferredRefCount > 0) but the cache is the only
515         // one holding a real ref. Mark the object so when the deferred
516         // ref count goes to 0 the texture will be deleted (remember
517         // in this code path scratch textures aren't getting reused).
518         texture->setNeedsDeferredUnref();
519     }
520 }
521
522
523 void GrContext::unlockScratchTexture(GrTexture* texture) {
524     ASSERT_OWNED_RESOURCE(texture);
525     SkASSERT(NULL != texture->getCacheEntry());
526
527     // If this is a scratch texture we detached it from the cache
528     // while it was locked (to avoid two callers simultaneously getting
529     // the same texture).
530     if (texture->getCacheEntry()->key().isScratch()) {
531         if (fGpu->caps()->reuseScratchTextures() || NULL != texture->asRenderTarget()) {
532             fTextureCache->makeNonExclusive(texture->getCacheEntry());
533             this->purgeCache();
534         } else if (texture->unique() && texture->getDeferredRefCount() <= 0) {
535             // Only the cache now knows about this texture. Since we're never
536             // reusing scratch textures (in this code path) it would just be
537             // wasting time sitting in the cache.
538             fTextureCache->makeNonExclusive(texture->getCacheEntry());
539             fTextureCache->deleteResource(texture->getCacheEntry());
540         } else {
541             // In this case (fRefCnt > 1 || defRefCnt > 0) but we don't really
542             // want to readd it to the cache (since it will never be reused).
543             // Instead, give up the cache's ref and leave the decision up to
544             // addExistingTextureToCache once its ref count reaches 0. For
545             // this to work we need to leave it in the exclusive list.
546             texture->setFlag((GrTextureFlags) GrTexture::kReturnToCache_FlagBit);
547             // Give up the cache's ref to the texture
548             texture->unref();
549         }
550     }
551 }
552
553 void GrContext::purgeCache() {
554     if (NULL != fTextureCache) {
555         fTextureCache->purgeAsNeeded();
556     }
557 }
558
559 bool GrContext::OverbudgetCB(void* data) {
560     SkASSERT(NULL != data);
561
562     GrContext* context = reinterpret_cast<GrContext*>(data);
563
564     // Flush the InOrderDrawBuffer to possibly free up some textures
565     context->fFlushToReduceCacheSize = true;
566
567     return true;
568 }
569
570
571 GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
572                                             void* srcData,
573                                             size_t rowBytes) {
574     GrTextureDesc descCopy = descIn;
575     return fGpu->createTexture(descCopy, srcData, rowBytes);
576 }
577
578 void GrContext::getTextureCacheLimits(int* maxTextures,
579                                       size_t* maxTextureBytes) const {
580     fTextureCache->getLimits(maxTextures, maxTextureBytes);
581 }
582
583 void GrContext::setTextureCacheLimits(int maxTextures, size_t maxTextureBytes) {
584     fTextureCache->setLimits(maxTextures, maxTextureBytes);
585 }
586
587 int GrContext::getMaxTextureSize() const {
588     return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
589 }
590
591 int GrContext::getMaxRenderTargetSize() const {
592     return fGpu->caps()->maxRenderTargetSize();
593 }
594
595 int GrContext::getMaxSampleCount() const {
596     return fGpu->caps()->maxSampleCount();
597 }
598
599 ///////////////////////////////////////////////////////////////////////////////
600
601 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
602     return fGpu->wrapBackendTexture(desc);
603 }
604
605 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
606     return fGpu->wrapBackendRenderTarget(desc);
607 }
608
609 ///////////////////////////////////////////////////////////////////////////////
610
611 bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
612                                           int width, int height) const {
613     const GrDrawTargetCaps* caps = fGpu->caps();
614     if (!caps->eightBitPaletteSupport()) {
615         return false;
616     }
617
618     bool isPow2 = GrIsPow2(width) && GrIsPow2(height);
619
620     if (!isPow2) {
621         bool tiled = NULL != params && params->isTiled();
622         if (tiled && !caps->npotTextureTileSupport()) {
623             return false;
624         }
625     }
626     return true;
627 }
628
629
630 ////////////////////////////////////////////////////////////////////////////////
631
632 void GrContext::clear(const SkIRect* rect,
633                       const GrColor color,
634                       bool canIgnoreRect,
635                       GrRenderTarget* target) {
636     AutoRestoreEffects are;
637     AutoCheckFlush acf(this);
638     this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->clear(rect, color,
639                                                                 canIgnoreRect, target);
640 }
641
642 void GrContext::drawPaint(const GrPaint& origPaint) {
643     // set rect to be big enough to fill the space, but not super-huge, so we
644     // don't overflow fixed-point implementations
645     SkRect r;
646     r.setLTRB(0, 0,
647               SkIntToScalar(getRenderTarget()->width()),
648               SkIntToScalar(getRenderTarget()->height()));
649     SkMatrix inverse;
650     SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
651     AutoMatrix am;
652
653     // We attempt to map r by the inverse matrix and draw that. mapRect will
654     // map the four corners and bound them with a new rect. This will not
655     // produce a correct result for some perspective matrices.
656     if (!this->getMatrix().hasPerspective()) {
657         if (!fViewMatrix.invert(&inverse)) {
658             GrPrintf("Could not invert matrix\n");
659             return;
660         }
661         inverse.mapRect(&r);
662     } else {
663         if (!am.setIdentity(this, paint.writable())) {
664             GrPrintf("Could not invert matrix\n");
665             return;
666         }
667     }
668     // by definition this fills the entire clip, no need for AA
669     if (paint->isAntiAlias()) {
670         paint.writable()->setAntiAlias(false);
671     }
672     this->drawRect(*paint, r);
673 }
674
675 #ifdef SK_DEVELOPER
676 void GrContext::dumpFontCache() const {
677     fFontCache->dump();
678 }
679 #endif
680
681 ////////////////////////////////////////////////////////////////////////////////
682
683 /*  create a triangle strip that strokes the specified triangle. There are 8
684  unique vertices, but we repreat the last 2 to close up. Alternatively we
685  could use an indices array, and then only send 8 verts, but not sure that
686  would be faster.
687  */
688 static void setStrokeRectStrip(SkPoint verts[10], SkRect rect,
689                                SkScalar width) {
690     const SkScalar rad = SkScalarHalf(width);
691     rect.sort();
692
693     verts[0].set(rect.fLeft + rad, rect.fTop + rad);
694     verts[1].set(rect.fLeft - rad, rect.fTop - rad);
695     verts[2].set(rect.fRight - rad, rect.fTop + rad);
696     verts[3].set(rect.fRight + rad, rect.fTop - rad);
697     verts[4].set(rect.fRight - rad, rect.fBottom - rad);
698     verts[5].set(rect.fRight + rad, rect.fBottom + rad);
699     verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
700     verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
701     verts[8] = verts[0];
702     verts[9] = verts[1];
703 }
704
705 static bool isIRect(const SkRect& r) {
706     return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&
707            SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
708 }
709
710 static bool apply_aa_to_rect(GrDrawTarget* target,
711                              const SkRect& rect,
712                              SkScalar strokeWidth,
713                              const SkMatrix& combinedMatrix,
714                              SkRect* devBoundRect,
715                              bool* useVertexCoverage) {
716     // we use a simple coverage ramp to do aa on axis-aligned rects
717     // we check if the rect will be axis-aligned, and the rect won't land on
718     // integer coords.
719
720     // we are keeping around the "tweak the alpha" trick because
721     // it is our only hope for the fixed-pipe implementation.
722     // In a shader implementation we can give a separate coverage input
723     // TODO: remove this ugliness when we drop the fixed-pipe impl
724     *useVertexCoverage = false;
725     if (!target->getDrawState().canTweakAlphaForCoverage()) {
726         if (target->shouldDisableCoverageAAForBlend()) {
727 #ifdef SK_DEBUG
728             //GrPrintf("Turning off AA to correctly apply blend.\n");
729 #endif
730             return false;
731         } else {
732             *useVertexCoverage = true;
733         }
734     }
735     const GrDrawState& drawState = target->getDrawState();
736     if (drawState.getRenderTarget()->isMultisampled()) {
737         return false;
738     }
739
740     if (0 == strokeWidth && target->willUseHWAALines()) {
741         return false;
742     }
743
744 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
745     if (strokeWidth >= 0) {
746 #endif
747         if (!combinedMatrix.preservesAxisAlignment()) {
748             return false;
749         }
750
751 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
752     } else {
753         if (!combinedMatrix.preservesRightAngles()) {
754             return false;
755         }
756     }
757 #endif
758
759     combinedMatrix.mapRect(devBoundRect, rect);
760
761     if (strokeWidth < 0) {
762         return !isIRect(*devBoundRect);
763     } else {
764         return true;
765     }
766 }
767
768 static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
769     return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
770            point.fY >= rect.fTop && point.fY <= rect.fBottom;
771 }
772
773 void GrContext::drawRect(const GrPaint& paint,
774                          const SkRect& rect,
775                          const SkStrokeRec* stroke,
776                          const SkMatrix* matrix) {
777     AutoRestoreEffects are;
778     AutoCheckFlush acf(this);
779     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
780
781     GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
782
783     SkScalar width = stroke == NULL ? -1 : stroke->getWidth();
784     SkMatrix combinedMatrix = target->drawState()->getViewMatrix();
785     if (NULL != matrix) {
786         combinedMatrix.preConcat(*matrix);
787     }
788
789     // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
790     // cases where the RT is fully inside a stroke.
791     if (width < 0) {
792         SkRect rtRect;
793         target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
794         SkRect clipSpaceRTRect = rtRect;
795         bool checkClip = false;
796         if (NULL != this->getClip()) {
797             checkClip = true;
798             clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
799                                    SkIntToScalar(this->getClip()->fOrigin.fY));
800         }
801         // Does the clip contain the entire RT?
802         if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
803             SkMatrix invM;
804             if (!combinedMatrix.invert(&invM)) {
805                 return;
806             }
807             // Does the rect bound the RT?
808             SkPoint srcSpaceRTQuad[4];
809             invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
810             if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
811                 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
812                 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
813                 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
814                 // Will it blend?
815                 GrColor clearColor;
816                 if (paint.isOpaqueAndConstantColor(&clearColor)) {
817                     target->clear(NULL, clearColor, true);
818                     return;
819                 }
820             }
821         }
822     }
823
824     SkRect devBoundRect;
825     bool useVertexCoverage;
826     bool needAA = paint.isAntiAlias() &&
827                   !target->getDrawState().getRenderTarget()->isMultisampled();
828     bool doAA = needAA && apply_aa_to_rect(target, rect, width, combinedMatrix, &devBoundRect,
829                                            &useVertexCoverage);
830     if (doAA) {
831         GrDrawState::AutoViewMatrixRestore avmr;
832         if (!avmr.setIdentity(target->drawState())) {
833             return;
834         }
835         if (width >= 0) {
836             fAARectRenderer->strokeAARect(this->getGpu(), target, rect,
837                                           combinedMatrix, devBoundRect,
838                                           stroke, useVertexCoverage);
839         } else {
840             // filled AA rect
841             fAARectRenderer->fillAARect(this->getGpu(), target,
842                                         rect, combinedMatrix, devBoundRect,
843                                         useVertexCoverage);
844         }
845         return;
846     }
847
848     if (width >= 0) {
849         // TODO: consider making static vertex buffers for these cases.
850         // Hairline could be done by just adding closing vertex to
851         // unitSquareVertexBuffer()
852
853         static const int worstCaseVertCount = 10;
854         target->drawState()->setDefaultVertexAttribs();
855         GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
856
857         if (!geo.succeeded()) {
858             GrPrintf("Failed to get space for vertices!\n");
859             return;
860         }
861
862         GrPrimitiveType primType;
863         int vertCount;
864         SkPoint* vertex = geo.positions();
865
866         if (width > 0) {
867             vertCount = 10;
868             primType = kTriangleStrip_GrPrimitiveType;
869             setStrokeRectStrip(vertex, rect, width);
870         } else {
871             // hairline
872             vertCount = 5;
873             primType = kLineStrip_GrPrimitiveType;
874             vertex[0].set(rect.fLeft, rect.fTop);
875             vertex[1].set(rect.fRight, rect.fTop);
876             vertex[2].set(rect.fRight, rect.fBottom);
877             vertex[3].set(rect.fLeft, rect.fBottom);
878             vertex[4].set(rect.fLeft, rect.fTop);
879         }
880
881         GrDrawState::AutoViewMatrixRestore avmr;
882         if (NULL != matrix) {
883             GrDrawState* drawState = target->drawState();
884             avmr.set(drawState, *matrix);
885         }
886
887         target->drawNonIndexed(primType, 0, vertCount);
888     } else {
889         // filled BW rect
890         target->drawSimpleRect(rect, matrix);
891     }
892 }
893
894 void GrContext::drawRectToRect(const GrPaint& paint,
895                                const SkRect& dstRect,
896                                const SkRect& localRect,
897                                const SkMatrix* dstMatrix,
898                                const SkMatrix* localMatrix) {
899     AutoRestoreEffects are;
900     AutoCheckFlush acf(this);
901     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
902
903     GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
904
905     target->drawRect(dstRect, dstMatrix, &localRect, localMatrix);
906 }
907
908 namespace {
909
910 extern const GrVertexAttrib gPosUVColorAttribs[] = {
911     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding },
912     {kVec2f_GrVertexAttribType,  sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding },
913     {kVec4ub_GrVertexAttribType, 2*sizeof(SkPoint), kColor_GrVertexAttribBinding}
914 };
915
916 extern const GrVertexAttrib gPosColorAttribs[] = {
917     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding},
918     {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
919 };
920
921 static void set_vertex_attributes(GrDrawState* drawState,
922                                   const SkPoint* texCoords,
923                                   const GrColor* colors,
924                                   int* colorOffset,
925                                   int* texOffset) {
926     *texOffset = -1;
927     *colorOffset = -1;
928
929     if (NULL != texCoords && NULL != colors) {
930         *texOffset = sizeof(SkPoint);
931         *colorOffset = 2*sizeof(SkPoint);
932         drawState->setVertexAttribs<gPosUVColorAttribs>(3);
933     } else if (NULL != texCoords) {
934         *texOffset = sizeof(SkPoint);
935         drawState->setVertexAttribs<gPosUVColorAttribs>(2);
936     } else if (NULL != colors) {
937         *colorOffset = sizeof(SkPoint);
938         drawState->setVertexAttribs<gPosColorAttribs>(2);
939     } else {
940         drawState->setVertexAttribs<gPosColorAttribs>(1);
941     }
942 }
943
944 };
945
946 void GrContext::drawVertices(const GrPaint& paint,
947                              GrPrimitiveType primitiveType,
948                              int vertexCount,
949                              const SkPoint positions[],
950                              const SkPoint texCoords[],
951                              const GrColor colors[],
952                              const uint16_t indices[],
953                              int indexCount) {
954     AutoRestoreEffects are;
955     AutoCheckFlush acf(this);
956     GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
957
958     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
959
960     GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
961
962     GrDrawState* drawState = target->drawState();
963
964     int colorOffset = -1, texOffset = -1;
965     set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
966
967     size_t vertexSize = drawState->getVertexSize();
968     if (sizeof(SkPoint) != vertexSize) {
969         if (!geo.set(target, vertexCount, 0)) {
970             GrPrintf("Failed to get space for vertices!\n");
971             return;
972         }
973         void* curVertex = geo.vertices();
974
975         for (int i = 0; i < vertexCount; ++i) {
976             *((SkPoint*)curVertex) = positions[i];
977
978             if (texOffset >= 0) {
979                 *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
980             }
981             if (colorOffset >= 0) {
982                 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
983             }
984             curVertex = (void*)((intptr_t)curVertex + vertexSize);
985         }
986     } else {
987         target->setVertexSourceToArray(positions, vertexCount);
988     }
989
990     // we don't currently apply offscreen AA to this path. Need improved
991     // management of GrDrawTarget's geometry to avoid copying points per-tile.
992
993     if (NULL != indices) {
994         target->setIndexSourceToArray(indices, indexCount);
995         target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
996         target->resetIndexSource();
997     } else {
998         target->drawNonIndexed(primitiveType, 0, vertexCount);
999     }
1000 }
1001
1002 ///////////////////////////////////////////////////////////////////////////////
1003
1004 void GrContext::drawRRect(const GrPaint& paint,
1005                           const SkRRect& rrect,
1006                           const SkStrokeRec& stroke) {
1007     if (rrect.isEmpty()) {
1008        return;
1009     }
1010
1011     AutoRestoreEffects are;
1012     AutoCheckFlush acf(this);
1013     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1014
1015     GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
1016
1017     if (!fOvalRenderer->drawRRect(target, this, paint.isAntiAlias(), rrect, stroke)) {
1018         SkPath path;
1019         path.addRRect(rrect);
1020         this->internalDrawPath(target, paint.isAntiAlias(), path, stroke);
1021     }
1022 }
1023
1024 ///////////////////////////////////////////////////////////////////////////////
1025
1026 void GrContext::drawDRRect(const GrPaint& paint,
1027                            const SkRRect& outer,
1028                            const SkRRect& inner) {
1029     if (outer.isEmpty()) {
1030        return;
1031     }
1032
1033     AutoRestoreEffects are;
1034     AutoCheckFlush acf(this);
1035     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1036
1037     GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
1038
1039     if (!fOvalRenderer->drawDRRect(target, this, paint.isAntiAlias(), outer, inner)) {
1040         SkPath path;
1041         path.addRRect(inner);
1042         path.addRRect(outer);
1043         path.setFillType(SkPath::kEvenOdd_FillType);
1044
1045         SkStrokeRec fillRec(SkStrokeRec::kFill_InitStyle);
1046         this->internalDrawPath(target, paint.isAntiAlias(), path, fillRec);
1047     }
1048 }
1049
1050 ///////////////////////////////////////////////////////////////////////////////
1051
1052 void GrContext::drawOval(const GrPaint& paint,
1053                          const SkRect& oval,
1054                          const SkStrokeRec& stroke) {
1055     if (oval.isEmpty()) {
1056        return;
1057     }
1058
1059     AutoRestoreEffects are;
1060     AutoCheckFlush acf(this);
1061     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1062
1063     GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
1064
1065     if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, stroke)) {
1066         SkPath path;
1067         path.addOval(oval);
1068         this->internalDrawPath(target, paint.isAntiAlias(), path, stroke);
1069     }
1070 }
1071
1072 // Can 'path' be drawn as a pair of filled nested rectangles?
1073 static bool is_nested_rects(GrDrawTarget* target,
1074                             const SkPath& path,
1075                             const SkStrokeRec& stroke,
1076                             SkRect rects[2],
1077                             bool* useVertexCoverage) {
1078     SkASSERT(stroke.isFillStyle());
1079
1080     if (path.isInverseFillType()) {
1081         return false;
1082     }
1083
1084     const GrDrawState& drawState = target->getDrawState();
1085
1086     // TODO: this restriction could be lifted if we were willing to apply
1087     // the matrix to all the points individually rather than just to the rect
1088     if (!drawState.getViewMatrix().preservesAxisAlignment()) {
1089         return false;
1090     }
1091
1092     *useVertexCoverage = false;
1093     if (!target->getDrawState().canTweakAlphaForCoverage()) {
1094         if (target->shouldDisableCoverageAAForBlend()) {
1095             return false;
1096         } else {
1097             *useVertexCoverage = true;
1098         }
1099     }
1100
1101     SkPath::Direction dirs[2];
1102     if (!path.isNestedRects(rects, dirs)) {
1103         return false;
1104     }
1105
1106     if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
1107         // The two rects need to be wound opposite to each other
1108         return false;
1109     }
1110
1111     // Right now, nested rects where the margin is not the same width
1112     // all around do not render correctly
1113     const SkScalar* outer = rects[0].asScalars();
1114     const SkScalar* inner = rects[1].asScalars();
1115
1116     SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
1117     for (int i = 1; i < 4; ++i) {
1118         SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
1119         if (!SkScalarNearlyEqual(margin, temp)) {
1120             return false;
1121         }
1122     }
1123
1124     return true;
1125 }
1126
1127 void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const SkStrokeRec& stroke) {
1128
1129     if (path.isEmpty()) {
1130        if (path.isInverseFillType()) {
1131            this->drawPaint(paint);
1132        }
1133        return;
1134     }
1135
1136     // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1137     // Scratch textures can be recycled after they are returned to the texture
1138     // cache. This presents a potential hazard for buffered drawing. However,
1139     // the writePixels that uploads to the scratch will perform a flush so we're
1140     // OK.
1141     AutoRestoreEffects are;
1142     AutoCheckFlush acf(this);
1143     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1144     GrDrawState* drawState = target->drawState();
1145
1146     GR_CREATE_TRACE_MARKER("GrContext::drawPath", target);
1147
1148     bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled();
1149
1150     if (useCoverageAA && stroke.getWidth() < 0 && !path.isConvex()) {
1151         // Concave AA paths are expensive - try to avoid them for special cases
1152         bool useVertexCoverage;
1153         SkRect rects[2];
1154
1155         if (is_nested_rects(target, path, stroke, rects, &useVertexCoverage)) {
1156             SkMatrix origViewMatrix = drawState->getViewMatrix();
1157             GrDrawState::AutoViewMatrixRestore avmr;
1158             if (!avmr.setIdentity(target->drawState())) {
1159                 return;
1160             }
1161
1162             fAARectRenderer->fillAANestedRects(this->getGpu(), target,
1163                                                rects,
1164                                                origViewMatrix,
1165                                                useVertexCoverage);
1166             return;
1167         }
1168     }
1169
1170     SkRect ovalRect;
1171     bool isOval = path.isOval(&ovalRect);
1172
1173     if (!isOval || path.isInverseFillType()
1174         || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, stroke)) {
1175         this->internalDrawPath(target, paint.isAntiAlias(), path, stroke);
1176     }
1177 }
1178
1179 void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
1180                                  const SkStrokeRec& origStroke) {
1181     SkASSERT(!path.isEmpty());
1182
1183     GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target);
1184
1185
1186     // An Assumption here is that path renderer would use some form of tweaking
1187     // the src color (either the input alpha or in the frag shader) to implement
1188     // aa. If we have some future driver-mojo path AA that can do the right
1189     // thing WRT to the blend then we'll need some query on the PR.
1190     bool useCoverageAA = useAA &&
1191         !target->getDrawState().getRenderTarget()->isMultisampled() &&
1192         !target->shouldDisableCoverageAAForBlend();
1193
1194
1195     GrPathRendererChain::DrawType type =
1196         useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1197                            GrPathRendererChain::kColor_DrawType;
1198
1199     const SkPath* pathPtr = &path;
1200     SkTLazy<SkPath> tmpPath;
1201     SkTCopyOnFirstWrite<SkStrokeRec> stroke(origStroke);
1202
1203     // Try a 1st time without stroking the path and without allowing the SW renderer
1204     GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type);
1205
1206     if (NULL == pr) {
1207         if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) {
1208             // It didn't work the 1st time, so try again with the stroked path
1209             if (stroke->applyToPath(tmpPath.init(), *pathPtr)) {
1210                 pathPtr = tmpPath.get();
1211                 stroke.writable()->setFillStyle();
1212                 if (pathPtr->isEmpty()) {
1213                     return;
1214                 }
1215             }
1216         }
1217
1218         // This time, allow SW renderer
1219         pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type);
1220     }
1221
1222     if (NULL == pr) {
1223 #ifdef SK_DEBUG
1224         GrPrintf("Unable to find path renderer compatible with path.\n");
1225 #endif
1226         return;
1227     }
1228
1229     pr->drawPath(*pathPtr, *stroke, target, useCoverageAA);
1230 }
1231
1232 ////////////////////////////////////////////////////////////////////////////////
1233
1234 void GrContext::flush(int flagsBitfield) {
1235     if (NULL == fDrawBuffer) {
1236         return;
1237     }
1238
1239     if (kDiscard_FlushBit & flagsBitfield) {
1240         fDrawBuffer->reset();
1241     } else {
1242         fDrawBuffer->flush();
1243     }
1244     fFlushToReduceCacheSize = false;
1245 }
1246
1247 bool GrContext::writeTexturePixels(GrTexture* texture,
1248                                    int left, int top, int width, int height,
1249                                    GrPixelConfig config, const void* buffer, size_t rowBytes,
1250                                    uint32_t flags) {
1251     ASSERT_OWNED_RESOURCE(texture);
1252
1253     if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) {
1254         if (NULL != texture->asRenderTarget()) {
1255             return this->writeRenderTargetPixels(texture->asRenderTarget(),
1256                                                  left, top, width, height,
1257                                                  config, buffer, rowBytes, flags);
1258         } else {
1259             return false;
1260         }
1261     }
1262
1263     if (!(kDontFlush_PixelOpsFlag & flags)) {
1264         this->flush();
1265     }
1266
1267     return fGpu->writeTexturePixels(texture, left, top, width, height,
1268                                     config, buffer, rowBytes);
1269 }
1270
1271 bool GrContext::readTexturePixels(GrTexture* texture,
1272                                   int left, int top, int width, int height,
1273                                   GrPixelConfig config, void* buffer, size_t rowBytes,
1274                                   uint32_t flags) {
1275     ASSERT_OWNED_RESOURCE(texture);
1276
1277     GrRenderTarget* target = texture->asRenderTarget();
1278     if (NULL != target) {
1279         return this->readRenderTargetPixels(target,
1280                                             left, top, width, height,
1281                                             config, buffer, rowBytes,
1282                                             flags);
1283     } else {
1284         // TODO: make this more efficient for cases where we're reading the entire
1285         //       texture, i.e., use GetTexImage() instead
1286
1287         // create scratch rendertarget and read from that
1288         GrAutoScratchTexture ast;
1289         GrTextureDesc desc;
1290         desc.fFlags = kRenderTarget_GrTextureFlagBit;
1291         desc.fWidth = width;
1292         desc.fHeight = height;
1293         desc.fConfig = config;
1294         desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1295         ast.set(this, desc, kExact_ScratchTexMatch);
1296         GrTexture* dst = ast.texture();
1297         if (NULL != dst && NULL != (target = dst->asRenderTarget())) {
1298             this->copyTexture(texture, target, NULL);
1299             return this->readRenderTargetPixels(target,
1300                                                 left, top, width, height,
1301                                                 config, buffer, rowBytes,
1302                                                 flags);
1303         }
1304
1305         return false;
1306     }
1307 }
1308
1309 #include "SkConfig8888.h"
1310
1311 // toggles between RGBA and BGRA
1312 static SkColorType toggle_colortype32(SkColorType ct) {
1313     if (kRGBA_8888_SkColorType == ct) {
1314         return kBGRA_8888_SkColorType;
1315     } else {
1316         SkASSERT(kBGRA_8888_SkColorType == ct);
1317         return kRGBA_8888_SkColorType;
1318     }
1319 }
1320
1321 bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1322                                        int left, int top, int width, int height,
1323                                        GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1324                                        uint32_t flags) {
1325     ASSERT_OWNED_RESOURCE(target);
1326
1327     if (NULL == target) {
1328         target = fRenderTarget.get();
1329         if (NULL == target) {
1330             return false;
1331         }
1332     }
1333
1334     if (!(kDontFlush_PixelOpsFlag & flags)) {
1335         this->flush();
1336     }
1337
1338     // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1339
1340     // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1341     // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1342     bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1343                                                  width, height, dstConfig,
1344                                                  rowBytes);
1345     // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1346     // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1347     // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1348     // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1349     // dstConfig.
1350     GrPixelConfig readConfig = dstConfig;
1351     bool swapRAndB = false;
1352     if (GrPixelConfigSwapRAndB(dstConfig) ==
1353         fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1354         readConfig = GrPixelConfigSwapRAndB(readConfig);
1355         swapRAndB = true;
1356     }
1357
1358     bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1359
1360     if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1361         // The unpremul flag is only allowed for these two configs.
1362         return false;
1363     }
1364
1365     // If the src is a texture and we would have to do conversions after read pixels, we instead
1366     // do the conversions by drawing the src to a scratch texture. If we handle any of the
1367     // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1368     // on the read back pixels.
1369     GrTexture* src = target->asTexture();
1370     GrAutoScratchTexture ast;
1371     if (NULL != src && (swapRAndB || unpremul || flipY)) {
1372         // Make the scratch a render target because we don't have a robust readTexturePixels as of
1373         // yet. It calls this function.
1374         GrTextureDesc desc;
1375         desc.fFlags = kRenderTarget_GrTextureFlagBit;
1376         desc.fWidth = width;
1377         desc.fHeight = height;
1378         desc.fConfig = readConfig;
1379         desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1380
1381         // When a full read back is faster than a partial we could always make the scratch exactly
1382         // match the passed rect. However, if we see many different size rectangles we will trash
1383         // our texture cache and pay the cost of creating and destroying many textures. So, we only
1384         // request an exact match when the caller is reading an entire RT.
1385         ScratchTexMatch match = kApprox_ScratchTexMatch;
1386         if (0 == left &&
1387             0 == top &&
1388             target->width() == width &&
1389             target->height() == height &&
1390             fGpu->fullReadPixelsIsFasterThanPartial()) {
1391             match = kExact_ScratchTexMatch;
1392         }
1393         ast.set(this, desc, match);
1394         GrTexture* texture = ast.texture();
1395         if (texture) {
1396             // compute a matrix to perform the draw
1397             SkMatrix textureMatrix;
1398             textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1399             textureMatrix.postIDiv(src->width(), src->height());
1400
1401             SkAutoTUnref<const GrEffectRef> effect;
1402             if (unpremul) {
1403                 effect.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1404                 if (NULL != effect) {
1405                     unpremul = false; // we no longer need to do this on CPU after the read back.
1406                 }
1407             }
1408             // If we failed to create a PM->UPM effect and have no other conversions to perform then
1409             // there is no longer any point to using the scratch.
1410             if (NULL != effect || flipY || swapRAndB) {
1411                 if (!effect) {
1412                     effect.reset(GrConfigConversionEffect::Create(
1413                                                     src,
1414                                                     swapRAndB,
1415                                                     GrConfigConversionEffect::kNone_PMConversion,
1416                                                     textureMatrix));
1417                 }
1418                 swapRAndB = false; // we will handle the swap in the draw.
1419
1420                 // We protect the existing geometry here since it may not be
1421                 // clear to the caller that a draw operation (i.e., drawSimpleRect)
1422                 // can be invoked in this method
1423                 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
1424                 GrDrawState* drawState = fGpu->drawState();
1425                 SkASSERT(effect);
1426                 drawState->addColorEffect(effect);
1427
1428                 drawState->setRenderTarget(texture->asRenderTarget());
1429                 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1430                 fGpu->drawSimpleRect(rect, NULL);
1431                 // we want to read back from the scratch's origin
1432                 left = 0;
1433                 top = 0;
1434                 target = texture->asRenderTarget();
1435             }
1436         }
1437     }
1438     if (!fGpu->readPixels(target,
1439                           left, top, width, height,
1440                           readConfig, buffer, rowBytes)) {
1441         return false;
1442     }
1443     // Perform any conversions we weren't able to perform using a scratch texture.
1444     if (unpremul || swapRAndB) {
1445         SkDstPixelInfo dstPI;
1446         if (!GrPixelConfig2ColorType(dstConfig, &dstPI.fColorType)) {
1447             return false;
1448         }
1449         dstPI.fAlphaType = kUnpremul_SkAlphaType;
1450         dstPI.fPixels = buffer;
1451         dstPI.fRowBytes = rowBytes;
1452
1453         SkSrcPixelInfo srcPI;
1454         srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType;
1455         srcPI.fAlphaType = kPremul_SkAlphaType;
1456         srcPI.fPixels = buffer;
1457         srcPI.fRowBytes = rowBytes;
1458
1459         return srcPI.convertPixelsTo(&dstPI, width, height);
1460     }
1461     return true;
1462 }
1463
1464 void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1465     SkASSERT(target);
1466     ASSERT_OWNED_RESOURCE(target);
1467     // In the future we may track whether there are any pending draws to this
1468     // target. We don't today so we always perform a flush. We don't promise
1469     // this to our clients, though.
1470     this->flush();
1471     fGpu->resolveRenderTarget(target);
1472 }
1473
1474 void GrContext::discardRenderTarget(GrRenderTarget* target) {
1475     SkASSERT(target);
1476     ASSERT_OWNED_RESOURCE(target);
1477     AutoRestoreEffects are;
1478     AutoCheckFlush acf(this);
1479     this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf)->discard(target);
1480 }
1481
1482 void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
1483     if (NULL == src || NULL == dst) {
1484         return;
1485     }
1486     ASSERT_OWNED_RESOURCE(src);
1487
1488     // Writes pending to the source texture are not tracked, so a flush
1489     // is required to ensure that the copy captures the most recent contents
1490     // of the source texture. See similar behavior in
1491     // GrContext::resolveRenderTarget.
1492     this->flush();
1493
1494     GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
1495     GrDrawState* drawState = fGpu->drawState();
1496     drawState->setRenderTarget(dst);
1497     SkMatrix sampleM;
1498     sampleM.setIDiv(src->width(), src->height());
1499     SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
1500     if (NULL != topLeft) {
1501         srcRect.offset(*topLeft);
1502     }
1503     SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
1504     if (!srcRect.intersect(srcBounds)) {
1505         return;
1506     }
1507     sampleM.preTranslate(SkIntToScalar(srcRect.fLeft), SkIntToScalar(srcRect.fTop));
1508     drawState->addColorTextureEffect(src, sampleM);
1509     SkRect dstR = SkRect::MakeWH(SkIntToScalar(srcRect.width()), SkIntToScalar(srcRect.height()));
1510     fGpu->drawSimpleRect(dstR, NULL);
1511 }
1512
1513 bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
1514                                         int left, int top, int width, int height,
1515                                         GrPixelConfig srcConfig,
1516                                         const void* buffer,
1517                                         size_t rowBytes,
1518                                         uint32_t flags) {
1519     ASSERT_OWNED_RESOURCE(target);
1520
1521     if (NULL == target) {
1522         target = fRenderTarget.get();
1523         if (NULL == target) {
1524             return false;
1525         }
1526     }
1527
1528     // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
1529     // desktop GL).
1530
1531     // We will always call some form of writeTexturePixels and we will pass our flags on to it.
1532     // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
1533     // set.)
1534
1535     // If the RT is also a texture and we don't have to premultiply then take the texture path.
1536     // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
1537     // we do below.
1538
1539 #if !defined(SK_BUILD_FOR_MAC)
1540     // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
1541     // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
1542     // HW is affected.
1543     if (NULL != target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) &&
1544         fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) {
1545         return this->writeTexturePixels(target->asTexture(),
1546                                         left, top, width, height,
1547                                         srcConfig, buffer, rowBytes, flags);
1548     }
1549 #endif
1550
1551     // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1552     // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1553     // config. This scratch will then have R and B swapped. We correct for this by swapping again
1554     // when drawing the scratch to the dst using a conversion effect.
1555     bool swapRAndB = false;
1556     GrPixelConfig writeConfig = srcConfig;
1557     if (GrPixelConfigSwapRAndB(srcConfig) ==
1558         fGpu->preferredWritePixelsConfig(srcConfig, target->config())) {
1559         writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1560         swapRAndB = true;
1561     }
1562
1563     GrTextureDesc desc;
1564     desc.fWidth = width;
1565     desc.fHeight = height;
1566     desc.fConfig = writeConfig;
1567     GrAutoScratchTexture ast(this, desc);
1568     GrTexture* texture = ast.texture();
1569     if (NULL == texture) {
1570         return false;
1571     }
1572
1573     SkAutoTUnref<const GrEffectRef> effect;
1574     SkMatrix textureMatrix;
1575     textureMatrix.setIDiv(texture->width(), texture->height());
1576
1577     // allocate a tmp buffer and sw convert the pixels to premul
1578     SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1579
1580     if (kUnpremul_PixelOpsFlag & flags) {
1581         if (!GrPixelConfigIs8888(srcConfig)) {
1582             return false;
1583         }
1584         effect.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1585         // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1586         if (NULL == effect) {
1587             SkSrcPixelInfo srcPI;
1588             if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
1589                 return false;
1590             }
1591             srcPI.fAlphaType = kUnpremul_SkAlphaType;
1592             srcPI.fPixels = buffer;
1593             srcPI.fRowBytes = rowBytes;
1594
1595             tmpPixels.reset(width * height);
1596
1597             SkDstPixelInfo dstPI;
1598             dstPI.fColorType = srcPI.fColorType;
1599             dstPI.fAlphaType = kPremul_SkAlphaType;
1600             dstPI.fPixels = tmpPixels.get();
1601             dstPI.fRowBytes = 4 * width;
1602
1603             if (!srcPI.convertPixelsTo(&dstPI, width, height)) {
1604                 return false;
1605             }
1606
1607             buffer = tmpPixels.get();
1608             rowBytes = 4 * width;
1609         }
1610     }
1611     if (NULL == effect) {
1612         effect.reset(GrConfigConversionEffect::Create(texture,
1613                                                       swapRAndB,
1614                                                       GrConfigConversionEffect::kNone_PMConversion,
1615                                                       textureMatrix));
1616     }
1617
1618     if (!this->writeTexturePixels(texture,
1619                                   0, 0, width, height,
1620                                   writeConfig, buffer, rowBytes,
1621                                   flags & ~kUnpremul_PixelOpsFlag)) {
1622         return false;
1623     }
1624
1625     // writeRenderTargetPixels can be called in the midst of drawing another
1626     // object (e.g., when uploading a SW path rendering to the gpu while
1627     // drawing a rect) so preserve the current geometry.
1628     SkMatrix matrix;
1629     matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1630     GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
1631     GrDrawState* drawState = fGpu->drawState();
1632     SkASSERT(effect);
1633     drawState->addColorEffect(effect);
1634
1635     drawState->setRenderTarget(target);
1636
1637     fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)), NULL);
1638     return true;
1639 }
1640 ////////////////////////////////////////////////////////////////////////////////
1641
1642 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1643                                        BufferedDraw buffered,
1644                                        AutoRestoreEffects* are,
1645                                        AutoCheckFlush* acf) {
1646     // All users of this draw state should be freeing up all effects when they're done.
1647     // Otherwise effects that own resources may keep those resources alive indefinitely.
1648     SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages());
1649
1650     if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
1651         fDrawBuffer->flush();
1652         fLastDrawWasBuffered = kNo_BufferedDraw;
1653     }
1654     ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1655     if (NULL != paint) {
1656         SkASSERT(NULL != are);
1657         SkASSERT(NULL != acf);
1658         are->set(fDrawState);
1659         fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1660 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1661         if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1662             !fGpu->canApplyCoverage()) {
1663             GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1664         }
1665 #endif
1666     } else {
1667         fDrawState->reset(fViewMatrix);
1668         fDrawState->setRenderTarget(fRenderTarget.get());
1669     }
1670     GrDrawTarget* target;
1671     if (kYes_BufferedDraw == buffered) {
1672         fLastDrawWasBuffered = kYes_BufferedDraw;
1673         target = fDrawBuffer;
1674     } else {
1675         SkASSERT(kNo_BufferedDraw == buffered);
1676         fLastDrawWasBuffered = kNo_BufferedDraw;
1677         target = fGpu;
1678     }
1679     fDrawState->setState(GrDrawState::kClip_StateBit, NULL != fClip &&
1680                                                      !fClip->fClipStack->isWideOpen());
1681     target->setClip(fClip);
1682     SkASSERT(fDrawState == target->drawState());
1683     return target;
1684 }
1685
1686 /*
1687  * This method finds a path renderer that can draw the specified path on
1688  * the provided target.
1689  * Due to its expense, the software path renderer has split out so it can
1690  * can be individually allowed/disallowed via the "allowSW" boolean.
1691  */
1692 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1693                                            const SkStrokeRec& stroke,
1694                                            const GrDrawTarget* target,
1695                                            bool allowSW,
1696                                            GrPathRendererChain::DrawType drawType,
1697                                            GrPathRendererChain::StencilSupport* stencilSupport) {
1698
1699     if (NULL == fPathRendererChain) {
1700         fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1701     }
1702
1703     GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1704                                                              stroke,
1705                                                              target,
1706                                                              drawType,
1707                                                              stencilSupport);
1708
1709     if (NULL == pr && allowSW) {
1710         if (NULL == fSoftwarePathRenderer) {
1711             fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1712         }
1713         pr = fSoftwarePathRenderer;
1714     }
1715
1716     return pr;
1717 }
1718
1719 ////////////////////////////////////////////////////////////////////////////////
1720 bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
1721     return fGpu->caps()->isConfigRenderable(config, withMSAA);
1722 }
1723
1724 int GrContext::getRecommendedSampleCount(GrPixelConfig config,
1725                                          SkScalar dpi) const {
1726     if (!this->isConfigRenderable(config, true)) {
1727         return 0;
1728     }
1729     int chosenSampleCount = 0;
1730     if (fGpu->caps()->pathRenderingSupport()) {
1731         if (dpi >= 250.0f) {
1732             chosenSampleCount = 4;
1733         } else {
1734             chosenSampleCount = 16;
1735         }
1736     }
1737     return chosenSampleCount <= fGpu->caps()->maxSampleCount() ?
1738         chosenSampleCount : 0;
1739 }
1740
1741 void GrContext::setupDrawBuffer() {
1742     SkASSERT(NULL == fDrawBuffer);
1743     SkASSERT(NULL == fDrawBufferVBAllocPool);
1744     SkASSERT(NULL == fDrawBufferIBAllocPool);
1745
1746     fDrawBufferVBAllocPool =
1747         SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1748                                     DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1749                                     DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1750     fDrawBufferIBAllocPool =
1751         SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1752                                    DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1753                                    DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1754
1755     fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1756                                                    fDrawBufferVBAllocPool,
1757                                                    fDrawBufferIBAllocPool));
1758
1759     fDrawBuffer->setDrawState(fDrawState);
1760 }
1761
1762 GrDrawTarget* GrContext::getTextTarget() {
1763     return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
1764 }
1765
1766 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1767     return fGpu->getQuadIndexBuffer();
1768 }
1769
1770 namespace {
1771 void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1772     GrConfigConversionEffect::PMConversion pmToUPM;
1773     GrConfigConversionEffect::PMConversion upmToPM;
1774     GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1775     *pmToUPMValue = pmToUPM;
1776     *upmToPMValue = upmToPM;
1777 }
1778 }
1779
1780 const GrEffectRef* GrContext::createPMToUPMEffect(GrTexture* texture,
1781                                                   bool swapRAndB,
1782                                                   const SkMatrix& matrix) {
1783     if (!fDidTestPMConversions) {
1784         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1785         fDidTestPMConversions = true;
1786     }
1787     GrConfigConversionEffect::PMConversion pmToUPM =
1788         static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1789     if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1790         return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1791     } else {
1792         return NULL;
1793     }
1794 }
1795
1796 const GrEffectRef* GrContext::createUPMToPMEffect(GrTexture* texture,
1797                                                   bool swapRAndB,
1798                                                   const SkMatrix& matrix) {
1799     if (!fDidTestPMConversions) {
1800         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1801         fDidTestPMConversions = true;
1802     }
1803     GrConfigConversionEffect::PMConversion upmToPM =
1804         static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1805     if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1806         return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1807     } else {
1808         return NULL;
1809     }
1810 }
1811
1812 GrPath* GrContext::createPath(const SkPath& inPath, const SkStrokeRec& stroke) {
1813     SkASSERT(fGpu->caps()->pathRenderingSupport());
1814
1815     // TODO: now we add to fTextureCache. This should change to fResourceCache.
1816     GrResourceKey resourceKey = GrPath::ComputeKey(inPath, stroke);
1817     GrPath* path = static_cast<GrPath*>(fTextureCache->find(resourceKey));
1818     if (NULL != path && path->isEqualTo(inPath, stroke)) {
1819         path->ref();
1820     } else {
1821         path = fGpu->createPath(inPath, stroke);
1822         fTextureCache->purgeAsNeeded(1, path->gpuMemorySize());
1823         fTextureCache->addResource(resourceKey, path);
1824     }
1825     return path;
1826 }
1827
1828 void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrCacheable* resource) {
1829     fTextureCache->purgeAsNeeded(1, resource->gpuMemorySize());
1830     fTextureCache->addResource(resourceKey, resource);
1831 }
1832
1833 GrCacheable* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
1834     GrCacheable* resource = fTextureCache->find(resourceKey);
1835     SkSafeRef(resource);
1836     return resource;
1837 }
1838
1839 ///////////////////////////////////////////////////////////////////////////////
1840 #if GR_CACHE_STATS
1841 void GrContext::printCacheStats() const {
1842     fTextureCache->printStats();
1843 }
1844 #endif