ad1b4c49553316267a5ed150390f8e68ed47cb7d
[platform/framework/web/crosswalk.git] / src / third_party / skia / src / gpu / GrContext.cpp
1
2 /*
3  * Copyright 2011 Google Inc.
4  *
5  * Use of this source code is governed by a BSD-style license that can be
6  * found in the LICENSE file.
7  */
8
9
10 #include "GrContext.h"
11
12 #include "effects/GrConfigConversionEffect.h"
13 #include "effects/GrDashingEffect.h"
14 #include "effects/GrSingleTextureEffect.h"
15
16 #include "GrAARectRenderer.h"
17 #include "GrBufferAllocPool.h"
18 #include "GrGpu.h"
19 #include "GrDistanceFieldTextContext.h"
20 #include "GrDrawTargetCaps.h"
21 #include "GrIndexBuffer.h"
22 #include "GrInOrderDrawBuffer.h"
23 #include "GrLayerCache.h"
24 #include "GrOvalRenderer.h"
25 #include "GrPathRenderer.h"
26 #include "GrPathUtils.h"
27 #include "GrResourceCache.h"
28 #include "GrResourceCache2.h"
29 #include "GrSoftwarePathRenderer.h"
30 #include "GrStencilBuffer.h"
31 #include "GrStencilAndCoverTextContext.h"
32 #include "GrStrokeInfo.h"
33 #include "GrTextStrike.h"
34 #include "GrTraceMarker.h"
35 #include "GrTracing.h"
36 #include "SkDashPathPriv.h"
37 #include "SkGr.h"
38 #include "SkRTConf.h"
39 #include "SkRRect.h"
40 #include "SkStrokeRec.h"
41 #include "SkTLazy.h"
42 #include "SkTLS.h"
43 #include "SkTraceEvent.h"
44
45 // It can be useful to set this to false to test whether a bug is caused by using the
46 // InOrderDrawBuffer, to compare performance of using/not using InOrderDrawBuffer, or to make
47 // debugging simpler.
48 SK_CONF_DECLARE(bool, c_Defer, "gpu.deferContext", true,
49                 "Defers rendering in GrContext via GrInOrderDrawBuffer.");
50
51 #define BUFFERED_DRAW (c_Defer ? kYes_BufferedDraw : kNo_BufferedDraw)
52
53 #ifdef SK_DEBUG
54     // change this to a 1 to see notifications when partial coverage fails
55     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
56 #else
57     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
58 #endif
59
60 static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT;
61 static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024;
62
63 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
64 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
65
66 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
67 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
68
69 #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
70
71 // Glorified typedef to avoid including GrDrawState.h in GrContext.h
72 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
73
74 class GrContext::AutoCheckFlush {
75 public:
76     AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(context); }
77
78     ~AutoCheckFlush() {
79         if (fContext->fFlushToReduceCacheSize) {
80             fContext->flush();
81         }
82     }
83
84 private:
85     GrContext* fContext;
86 };
87
88 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext,
89                              const Options* opts) {
90     GrContext* context;
91     if (NULL == opts) {
92         context = SkNEW_ARGS(GrContext, (Options()));
93     } else {
94         context = SkNEW_ARGS(GrContext, (*opts));
95     }
96
97     if (context->init(backend, backendContext)) {
98         return context;
99     } else {
100         context->unref();
101         return NULL;
102     }
103 }
104
105 GrContext::GrContext(const Options& opts) : fOptions(opts) {
106     fDrawState = NULL;
107     fGpu = NULL;
108     fClip = NULL;
109     fPathRendererChain = NULL;
110     fSoftwarePathRenderer = NULL;
111     fResourceCache = NULL;
112     fResourceCache2 = NULL;
113     fFontCache = NULL;
114     fDrawBuffer = NULL;
115     fDrawBufferVBAllocPool = NULL;
116     fDrawBufferIBAllocPool = NULL;
117     fFlushToReduceCacheSize = false;
118     fAARectRenderer = NULL;
119     fOvalRenderer = NULL;
120     fViewMatrix.reset();
121     fMaxTextureSizeOverride = 1 << 20;
122 }
123
124 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
125     SkASSERT(NULL == fGpu);
126
127     fGpu = GrGpu::Create(backend, backendContext, this);
128     if (NULL == fGpu) {
129         return false;
130     }
131
132     fDrawState = SkNEW(GrDrawState);
133     fGpu->setDrawState(fDrawState);
134
135     fResourceCache = SkNEW_ARGS(GrResourceCache, (MAX_RESOURCE_CACHE_COUNT,
136                                                   MAX_RESOURCE_CACHE_BYTES));
137     fResourceCache->setOverbudgetCallback(OverbudgetCB, this);
138     fResourceCache2 = SkNEW(GrResourceCache2);
139
140     fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
141
142     fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this)));
143
144     fLastDrawWasBuffered = kNo_BufferedDraw;
145
146     fAARectRenderer = SkNEW(GrAARectRenderer);
147     fOvalRenderer = SkNEW(GrOvalRenderer);
148
149     fDidTestPMConversions = false;
150
151     this->setupDrawBuffer();
152
153     return true;
154 }
155
156 GrContext::~GrContext() {
157     if (NULL == fGpu) {
158         return;
159     }
160
161     this->flush();
162
163     for (int i = 0; i < fCleanUpData.count(); ++i) {
164         (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
165     }
166
167     delete fResourceCache2;
168     fResourceCache2 = NULL;
169     delete fResourceCache;
170     fResourceCache = NULL;
171     delete fFontCache;
172     delete fDrawBuffer;
173     delete fDrawBufferVBAllocPool;
174     delete fDrawBufferIBAllocPool;
175
176     fAARectRenderer->unref();
177     fOvalRenderer->unref();
178
179     fGpu->unref();
180     SkSafeUnref(fPathRendererChain);
181     SkSafeUnref(fSoftwarePathRenderer);
182     fDrawState->unref();
183 }
184
185 void GrContext::abandonContext() {
186     // abandon first to so destructors
187     // don't try to free the resources in the API.
188     fResourceCache2->abandonAll();
189
190     fGpu->contextAbandoned();
191
192     // a path renderer may be holding onto resources that
193     // are now unusable
194     SkSafeSetNull(fPathRendererChain);
195     SkSafeSetNull(fSoftwarePathRenderer);
196
197     delete fDrawBuffer;
198     fDrawBuffer = NULL;
199
200     delete fDrawBufferVBAllocPool;
201     fDrawBufferVBAllocPool = NULL;
202
203     delete fDrawBufferIBAllocPool;
204     fDrawBufferIBAllocPool = NULL;
205
206     fAARectRenderer->reset();
207     fOvalRenderer->reset();
208
209     fResourceCache->purgeAllUnlocked();
210
211     fFontCache->freeAll();
212     fLayerCache->freeAll();
213 }
214
215 void GrContext::resetContext(uint32_t state) {
216     fGpu->markContextDirty(state);
217 }
218
219 void GrContext::freeGpuResources() {
220     this->flush();
221
222     fGpu->purgeResources();
223     if (fDrawBuffer) {
224         fDrawBuffer->purgeResources();
225     }
226
227     fAARectRenderer->reset();
228     fOvalRenderer->reset();
229
230     fResourceCache->purgeAllUnlocked();
231     fFontCache->freeAll();
232     fLayerCache->freeAll();
233     // a path renderer may be holding onto resources
234     SkSafeSetNull(fPathRendererChain);
235     SkSafeSetNull(fSoftwarePathRenderer);
236 }
237
238 void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
239   if (resourceCount) {
240     *resourceCount = fResourceCache->getCachedResourceCount();
241   }
242   if (resourceBytes) {
243     *resourceBytes = fResourceCache->getCachedResourceBytes();
244   }
245 }
246
247 GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
248                                             const SkDeviceProperties&
249                                             leakyProperties,
250                                             bool enableDistanceFieldFonts) {
251     if (fGpu->caps()->pathRenderingSupport()) {
252         if (renderTarget->getStencilBuffer() && renderTarget->isMultisampled()) {
253             return SkNEW_ARGS(GrStencilAndCoverTextContext, (this, leakyProperties));
254         }
255     }
256     return SkNEW_ARGS(GrDistanceFieldTextContext, (this, leakyProperties,
257                                                    enableDistanceFieldFonts));
258 }
259
260 ////////////////////////////////////////////////////////////////////////////////
261
262 GrTexture* GrContext::findAndRefTexture(const GrTextureDesc& desc,
263                                         const GrCacheID& cacheID,
264                                         const GrTextureParams* params) {
265     GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
266     GrGpuResource* resource = fResourceCache->find(resourceKey);
267     SkSafeRef(resource);
268     return static_cast<GrTexture*>(resource);
269 }
270
271 bool GrContext::isTextureInCache(const GrTextureDesc& desc,
272                                  const GrCacheID& cacheID,
273                                  const GrTextureParams* params) const {
274     GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
275     return fResourceCache->hasKey(resourceKey);
276 }
277
278 void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
279     ASSERT_OWNED_RESOURCE(sb);
280
281     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
282                                                             sb->height(),
283                                                             sb->numSamples());
284     fResourceCache->addResource(resourceKey, sb);
285 }
286
287 GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
288                                               int sampleCnt) {
289     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
290                                                             height,
291                                                             sampleCnt);
292     GrGpuResource* resource = fResourceCache->find(resourceKey);
293     return static_cast<GrStencilBuffer*>(resource);
294 }
295
296 static void stretch_image(void* dst,
297                           int dstW,
298                           int dstH,
299                           const void* src,
300                           int srcW,
301                           int srcH,
302                           size_t bpp) {
303     SkFixed dx = (srcW << 16) / dstW;
304     SkFixed dy = (srcH << 16) / dstH;
305
306     SkFixed y = dy >> 1;
307
308     size_t dstXLimit = dstW*bpp;
309     for (int j = 0; j < dstH; ++j) {
310         SkFixed x = dx >> 1;
311         const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)*srcW*bpp;
312         uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp;
313         for (size_t i = 0; i < dstXLimit; i += bpp) {
314             memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp);
315             x += dx;
316         }
317         y += dy;
318     }
319 }
320
321 namespace {
322
323 // position + local coordinate
324 extern const GrVertexAttrib gVertexAttribs[] = {
325     {kVec2f_GrVertexAttribType, 0,               kPosition_GrVertexAttribBinding},
326     {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding}
327 };
328
329 };
330
331 // The desired texture is NPOT and tiled but that isn't supported by
332 // the current hardware. Resize the texture to be a POT
333 GrTexture* GrContext::createResizedTexture(const GrTextureDesc& desc,
334                                            const GrCacheID& cacheID,
335                                            const void* srcData,
336                                            size_t rowBytes,
337                                            bool filter) {
338     SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
339     if (NULL == clampedTexture) {
340         clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
341
342         if (NULL == clampedTexture) {
343             return NULL;
344         }
345     }
346
347     GrTextureDesc rtDesc = desc;
348     rtDesc.fFlags =  rtDesc.fFlags |
349                      kRenderTarget_GrTextureFlagBit |
350                      kNoStencil_GrTextureFlagBit;
351     rtDesc.fWidth  = GrNextPow2(desc.fWidth);
352     rtDesc.fHeight = GrNextPow2(desc.fHeight);
353
354     GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
355
356     if (texture) {
357         GrDrawTarget::AutoStateRestore asr(fGpu, GrDrawTarget::kReset_ASRInit);
358         GrDrawState* drawState = fGpu->drawState();
359         drawState->setRenderTarget(texture->asRenderTarget());
360
361         // if filtering is not desired then we want to ensure all
362         // texels in the resampled image are copies of texels from
363         // the original.
364         GrTextureParams params(SkShader::kClamp_TileMode, filter ? GrTextureParams::kBilerp_FilterMode :
365                                                                    GrTextureParams::kNone_FilterMode);
366         drawState->addColorTextureProcessor(clampedTexture, SkMatrix::I(), params);
367
368         drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs),
369                                                     2 * sizeof(SkPoint));
370
371         GrDrawTarget::AutoReleaseGeometry arg(fGpu, 4, 0);
372
373         if (arg.succeeded()) {
374             SkPoint* verts = (SkPoint*) arg.vertices();
375             verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
376             verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
377             fGpu->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
378         }
379     } else {
380         // TODO: Our CPU stretch doesn't filter. But we create separate
381         // stretched textures when the texture params is either filtered or
382         // not. Either implement filtered stretch blit on CPU or just create
383         // one when FBO case fails.
384
385         rtDesc.fFlags = kNone_GrTextureFlags;
386         // no longer need to clamp at min RT size.
387         rtDesc.fWidth  = GrNextPow2(desc.fWidth);
388         rtDesc.fHeight = GrNextPow2(desc.fHeight);
389
390         // We shouldn't be resizing a compressed texture.
391         SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
392
393         size_t bpp = GrBytesPerPixel(desc.fConfig);
394         GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
395         stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
396                       srcData, desc.fWidth, desc.fHeight, bpp);
397
398         size_t stretchedRowBytes = rtDesc.fWidth * bpp;
399
400         texture = fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes);
401         SkASSERT(texture);
402     }
403
404     return texture;
405 }
406
407 GrTexture* GrContext::createTexture(const GrTextureParams* params,
408                                     const GrTextureDesc& desc,
409                                     const GrCacheID& cacheID,
410                                     const void* srcData,
411                                     size_t rowBytes,
412                                     GrResourceKey* cacheKey) {
413     GrResourceKey resourceKey = GrTextureImpl::ComputeKey(fGpu, params, desc, cacheID);
414
415     GrTexture* texture;
416     if (GrTextureImpl::NeedsResizing(resourceKey)) {
417         // We do not know how to resize compressed textures.
418         SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
419
420         texture = this->createResizedTexture(desc, cacheID,
421                                              srcData, rowBytes,
422                                              GrTextureImpl::NeedsBilerp(resourceKey));
423     } else {
424         texture = fGpu->createTexture(desc, srcData, rowBytes);
425     }
426
427     if (texture) {
428         // Adding a resource could put us overbudget. Try to free up the
429         // necessary space before adding it.
430         fResourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
431         fResourceCache->addResource(resourceKey, texture);
432
433         if (cacheKey) {
434             *cacheKey = resourceKey;
435         }
436     }
437
438     return texture;
439 }
440
441 static GrTexture* create_scratch_texture(GrGpu* gpu,
442                                          GrResourceCache* resourceCache,
443                                          const GrTextureDesc& desc) {
444     GrTexture* texture = gpu->createTexture(desc, NULL, 0);
445     if (texture) {
446         GrResourceKey key = GrTextureImpl::ComputeScratchKey(texture->desc());
447         // Adding a resource could put us overbudget. Try to free up the
448         // necessary space before adding it.
449         resourceCache->purgeAsNeeded(1, texture->gpuMemorySize());
450         // Make the resource exclusive so future 'find' calls don't return it
451         resourceCache->addResource(key, texture, GrResourceCache::kHide_OwnershipFlag);
452     }
453     return texture;
454 }
455
456 GrTexture* GrContext::lockAndRefScratchTexture(const GrTextureDesc& inDesc, ScratchTexMatch match) {
457
458     SkASSERT((inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
459              !(inDesc.fFlags & kNoStencil_GrTextureFlagBit));
460
461     // Renderable A8 targets are not universally supported (e.g., not on ANGLE)
462     SkASSERT(this->isConfigRenderable(kAlpha_8_GrPixelConfig, inDesc.fSampleCnt > 0) ||
463              !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit) ||
464              (inDesc.fConfig != kAlpha_8_GrPixelConfig));
465
466     if (!fGpu->caps()->reuseScratchTextures() &&
467         !(inDesc.fFlags & kRenderTarget_GrTextureFlagBit)) {
468         // If we're never recycling this texture we can always make it the right size
469         return create_scratch_texture(fGpu, fResourceCache, inDesc);
470     }
471
472     GrTextureDesc desc = inDesc;
473
474     if (kApprox_ScratchTexMatch == match) {
475         // bin by pow2 with a reasonable min
476         static const int MIN_SIZE = 16;
477         desc.fWidth  = SkTMax(MIN_SIZE, GrNextPow2(desc.fWidth));
478         desc.fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc.fHeight));
479     }
480
481     GrGpuResource* resource = NULL;
482     int origWidth = desc.fWidth;
483     int origHeight = desc.fHeight;
484
485     do {
486         GrResourceKey key = GrTextureImpl::ComputeScratchKey(desc);
487         // Ensure we have exclusive access to the texture so future 'find' calls don't return it
488         resource = fResourceCache->find(key, GrResourceCache::kHide_OwnershipFlag);
489         if (resource) {
490             resource->ref();
491             break;
492         }
493         if (kExact_ScratchTexMatch == match) {
494             break;
495         }
496         // We had a cache miss and we are in approx mode, relax the fit of the flags.
497
498         // We no longer try to reuse textures that were previously used as render targets in
499         // situations where no RT is needed; doing otherwise can confuse the video driver and
500         // cause significant performance problems in some cases.
501         if (desc.fFlags & kNoStencil_GrTextureFlagBit) {
502             desc.fFlags = desc.fFlags & ~kNoStencil_GrTextureFlagBit;
503         } else {
504             break;
505         }
506
507     } while (true);
508
509     if (NULL == resource) {
510         desc.fFlags = inDesc.fFlags;
511         desc.fWidth = origWidth;
512         desc.fHeight = origHeight;
513         resource = create_scratch_texture(fGpu, fResourceCache, desc);
514     }
515
516     return static_cast<GrTexture*>(resource);
517 }
518
519 void GrContext::addExistingTextureToCache(GrTexture* texture) {
520
521     if (NULL == texture) {
522         return;
523     }
524
525     // This texture should already have a cache entry since it was once
526     // attached
527     SkASSERT(texture->getCacheEntry());
528
529     // Conceptually, the cache entry is going to assume responsibility
530     // for the creation ref. Assert refcnt == 1.
531     // Except that this also gets called when the texture is prematurely
532     // abandoned. In that case the ref count may be > 1.
533     // SkASSERT(texture->unique());
534
535     if (fGpu->caps()->reuseScratchTextures() || texture->asRenderTarget()) {
536         // Since this texture came from an AutoScratchTexture it should
537         // still be in the exclusive pile. Recycle it.
538         fResourceCache->makeNonExclusive(texture->getCacheEntry());
539         this->purgeCache();
540     } else {
541         // When we aren't reusing textures we know this scratch texture
542         // will never be reused and would be just wasting time in the cache
543         fResourceCache->makeNonExclusive(texture->getCacheEntry());
544         fResourceCache->deleteResource(texture->getCacheEntry());
545     }
546 }
547
548 void GrContext::unlockScratchTexture(GrTexture* texture) {
549     if (texture->wasDestroyed()) {
550         if (texture->getCacheEntry()->key().isScratch()) {
551             // This texture was detached from the cache but the cache still had a ref to it but
552             // not a pointer to it. This will unref the texture and delete its resource cache
553             // entry.
554             delete texture->getCacheEntry();
555         }
556         return;
557     }
558
559     ASSERT_OWNED_RESOURCE(texture);
560     SkASSERT(texture->getCacheEntry());
561
562     // If this is a scratch texture we detached it from the cache
563     // while it was locked (to avoid two callers simultaneously getting
564     // the same texture).
565     if (texture->getCacheEntry()->key().isScratch()) {
566         if (fGpu->caps()->reuseScratchTextures() || texture->asRenderTarget()) {
567             fResourceCache->makeNonExclusive(texture->getCacheEntry());
568             this->purgeCache();
569         } else if (texture->unique()) {
570             // Only the cache now knows about this texture. Since we're never
571             // reusing scratch textures (in this code path) it would just be
572             // wasting time sitting in the cache.
573             fResourceCache->makeNonExclusive(texture->getCacheEntry());
574             fResourceCache->deleteResource(texture->getCacheEntry());
575         } else {
576             // In this case (there is still a non-cache ref) but we don't really
577             // want to readd it to the cache (since it will never be reused).
578             // Instead, give up the cache's ref and leave the decision up to
579             // addExistingTextureToCache once its ref count reaches 0. For
580             // this to work we need to leave it in the exclusive list.
581             texture->impl()->setFlag((GrTextureFlags) GrTextureImpl::kReturnToCache_FlagBit);
582             // Give up the cache's ref to the texture
583             texture->unref();
584         }
585     }
586 }
587
588 void GrContext::purgeCache() {
589     if (fResourceCache) {
590         fResourceCache->purgeAsNeeded();
591     }
592 }
593
594 bool GrContext::OverbudgetCB(void* data) {
595     SkASSERT(data);
596
597     GrContext* context = reinterpret_cast<GrContext*>(data);
598
599     // Flush the InOrderDrawBuffer to possibly free up some textures
600     context->fFlushToReduceCacheSize = true;
601
602     return true;
603 }
604
605
606 GrTexture* GrContext::createUncachedTexture(const GrTextureDesc& descIn,
607                                             void* srcData,
608                                             size_t rowBytes) {
609     GrTextureDesc descCopy = descIn;
610     return fGpu->createTexture(descCopy, srcData, rowBytes);
611 }
612
613 void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
614     fResourceCache->getLimits(maxTextures, maxTextureBytes);
615 }
616
617 void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
618     fResourceCache->setLimits(maxTextures, maxTextureBytes);
619 }
620
621 int GrContext::getMaxTextureSize() const {
622     return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
623 }
624
625 int GrContext::getMaxRenderTargetSize() const {
626     return fGpu->caps()->maxRenderTargetSize();
627 }
628
629 int GrContext::getMaxSampleCount() const {
630     return fGpu->caps()->maxSampleCount();
631 }
632
633 ///////////////////////////////////////////////////////////////////////////////
634
635 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
636     return fGpu->wrapBackendTexture(desc);
637 }
638
639 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
640     return fGpu->wrapBackendRenderTarget(desc);
641 }
642
643 ///////////////////////////////////////////////////////////////////////////////
644
645 bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
646                                           int width, int height) const {
647     const GrDrawTargetCaps* caps = fGpu->caps();
648     if (!caps->isConfigTexturable(kIndex_8_GrPixelConfig)) {
649         return false;
650     }
651
652     bool isPow2 = SkIsPow2(width) && SkIsPow2(height);
653
654     if (!isPow2) {
655         bool tiled = params && params->isTiled();
656         if (tiled && !caps->npotTextureTileSupport()) {
657             return false;
658         }
659     }
660     return true;
661 }
662
663
664 ////////////////////////////////////////////////////////////////////////////////
665
666 void GrContext::clear(const SkIRect* rect,
667                       const GrColor color,
668                       bool canIgnoreRect,
669                       GrRenderTarget* renderTarget) {
670     ASSERT_OWNED_RESOURCE(renderTarget);
671     AutoRestoreEffects are;
672     AutoCheckFlush acf(this);
673     GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this);
674     GrDrawTarget* target = this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf);
675     if (NULL == target) {
676         return;
677     }
678     target->clear(rect, color, canIgnoreRect, renderTarget);
679 }
680
681 void GrContext::drawPaint(const GrPaint& origPaint) {
682     // set rect to be big enough to fill the space, but not super-huge, so we
683     // don't overflow fixed-point implementations
684     SkRect r;
685     r.setLTRB(0, 0,
686               SkIntToScalar(getRenderTarget()->width()),
687               SkIntToScalar(getRenderTarget()->height()));
688     SkMatrix inverse;
689     SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
690     AutoMatrix am;
691     GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::drawPaint", this);
692
693     // We attempt to map r by the inverse matrix and draw that. mapRect will
694     // map the four corners and bound them with a new rect. This will not
695     // produce a correct result for some perspective matrices.
696     if (!this->getMatrix().hasPerspective()) {
697         if (!fViewMatrix.invert(&inverse)) {
698             GrPrintf("Could not invert matrix\n");
699             return;
700         }
701         inverse.mapRect(&r);
702     } else {
703         if (!am.setIdentity(this, paint.writable())) {
704             GrPrintf("Could not invert matrix\n");
705             return;
706         }
707     }
708     // by definition this fills the entire clip, no need for AA
709     if (paint->isAntiAlias()) {
710         paint.writable()->setAntiAlias(false);
711     }
712     this->drawRect(*paint, r);
713 }
714
715 #ifdef SK_DEVELOPER
716 void GrContext::dumpFontCache() const {
717     fFontCache->dump();
718 }
719 #endif
720
721 ////////////////////////////////////////////////////////////////////////////////
722
723 /*  create a triangle strip that strokes the specified triangle. There are 8
724  unique vertices, but we repreat the last 2 to close up. Alternatively we
725  could use an indices array, and then only send 8 verts, but not sure that
726  would be faster.
727  */
728 static void setStrokeRectStrip(SkPoint verts[10], SkRect rect,
729                                SkScalar width) {
730     const SkScalar rad = SkScalarHalf(width);
731     rect.sort();
732
733     verts[0].set(rect.fLeft + rad, rect.fTop + rad);
734     verts[1].set(rect.fLeft - rad, rect.fTop - rad);
735     verts[2].set(rect.fRight - rad, rect.fTop + rad);
736     verts[3].set(rect.fRight + rad, rect.fTop - rad);
737     verts[4].set(rect.fRight - rad, rect.fBottom - rad);
738     verts[5].set(rect.fRight + rad, rect.fBottom + rad);
739     verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
740     verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
741     verts[8] = verts[0];
742     verts[9] = verts[1];
743 }
744
745 static inline bool is_irect(const SkRect& r) {
746   return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&    
747          SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);   
748 }
749
750 static bool apply_aa_to_rect(GrDrawTarget* target,
751                              const SkRect& rect,
752                              SkScalar strokeWidth,
753                              const SkMatrix& combinedMatrix,
754                              SkRect* devBoundRect) {
755     if (!target->getDrawState().canTweakAlphaForCoverage() &&
756         target->shouldDisableCoverageAAForBlend()) {
757 #ifdef SK_DEBUG
758         //GrPrintf("Turning off AA to correctly apply blend.\n");
759 #endif
760         return false;
761     }
762     const GrDrawState& drawState = target->getDrawState();
763     if (drawState.getRenderTarget()->isMultisampled()) {
764         return false;
765     }
766
767 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
768     if (strokeWidth >= 0) {
769 #endif
770         if (!combinedMatrix.preservesAxisAlignment()) {
771             return false;
772         }
773
774 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
775     } else {
776         if (!combinedMatrix.preservesRightAngles()) {
777             return false;
778         }
779     }
780 #endif
781
782     combinedMatrix.mapRect(devBoundRect, rect);
783     if (strokeWidth < 0) {
784         return !is_irect(*devBoundRect);
785     }
786
787     return true;
788 }
789
790 static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
791     return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
792            point.fY >= rect.fTop && point.fY <= rect.fBottom;
793 }
794
795 void GrContext::drawRect(const GrPaint& paint,
796                          const SkRect& rect,
797                          const GrStrokeInfo* strokeInfo) {
798     if (strokeInfo && strokeInfo->isDashed()) {
799         SkPath path;
800         path.addRect(rect);
801         this->drawPath(paint, path, *strokeInfo);
802         return;
803     }
804
805     AutoRestoreEffects are;
806     AutoCheckFlush acf(this);
807     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
808     if (NULL == target) {
809         return;
810     }
811
812     GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
813     SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth();
814     SkMatrix matrix = target->drawState()->getViewMatrix();
815
816     // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
817     // cases where the RT is fully inside a stroke.
818     if (width < 0) {
819         SkRect rtRect;
820         target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
821         SkRect clipSpaceRTRect = rtRect;
822         bool checkClip = false;
823         if (this->getClip()) {
824             checkClip = true;
825             clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
826                                    SkIntToScalar(this->getClip()->fOrigin.fY));
827         }
828         // Does the clip contain the entire RT?
829         if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
830             SkMatrix invM;
831             if (!matrix.invert(&invM)) {
832                 return;
833             }
834             // Does the rect bound the RT?
835             SkPoint srcSpaceRTQuad[4];
836             invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
837             if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
838                 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
839                 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
840                 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
841                 // Will it blend?
842                 GrColor clearColor;
843                 if (paint.isOpaqueAndConstantColor(&clearColor)) {
844                     target->clear(NULL, clearColor, true);
845                     return;
846                 }
847             }
848         }
849     }
850
851     SkRect devBoundRect;
852     bool needAA = paint.isAntiAlias() &&
853                   !target->getDrawState().getRenderTarget()->isMultisampled();
854     bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, &devBoundRect);
855
856     const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec();
857
858     if (doAA) {
859         GrDrawState::AutoViewMatrixRestore avmr;
860         if (!avmr.setIdentity(target->drawState())) {
861             return;
862         }
863         if (width >= 0) {
864             fAARectRenderer->strokeAARect(this->getGpu(), target, rect,
865                                           matrix, devBoundRect,
866                                           strokeRec);
867         } else {
868             // filled AA rect
869             fAARectRenderer->fillAARect(this->getGpu(), target,
870                                         rect, matrix, devBoundRect);
871         }
872         return;
873     }
874
875     if (width >= 0) {
876         // TODO: consider making static vertex buffers for these cases.
877         // Hairline could be done by just adding closing vertex to
878         // unitSquareVertexBuffer()
879
880         static const int worstCaseVertCount = 10;
881         target->drawState()->setDefaultVertexAttribs();
882         GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
883
884         if (!geo.succeeded()) {
885             GrPrintf("Failed to get space for vertices!\n");
886             return;
887         }
888
889         GrPrimitiveType primType;
890         int vertCount;
891         SkPoint* vertex = geo.positions();
892
893         if (width > 0) {
894             vertCount = 10;
895             primType = kTriangleStrip_GrPrimitiveType;
896             setStrokeRectStrip(vertex, rect, width);
897         } else {
898             // hairline
899             vertCount = 5;
900             primType = kLineStrip_GrPrimitiveType;
901             vertex[0].set(rect.fLeft, rect.fTop);
902             vertex[1].set(rect.fRight, rect.fTop);
903             vertex[2].set(rect.fRight, rect.fBottom);
904             vertex[3].set(rect.fLeft, rect.fBottom);
905             vertex[4].set(rect.fLeft, rect.fTop);
906         }
907
908         target->drawNonIndexed(primType, 0, vertCount);
909     } else {
910         // filled BW rect
911         target->drawSimpleRect(rect);
912     }
913 }
914
915 void GrContext::drawRectToRect(const GrPaint& paint,
916                                const SkRect& dstRect,
917                                const SkRect& localRect,
918                                const SkMatrix* localMatrix) {
919     AutoRestoreEffects are;
920     AutoCheckFlush acf(this);
921     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
922     if (NULL == target) {
923         return;
924     }
925
926     GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
927
928     target->drawRect(dstRect, &localRect, localMatrix);
929 }
930
931 namespace {
932
933 extern const GrVertexAttrib gPosUVColorAttribs[] = {
934     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding },
935     {kVec2f_GrVertexAttribType,  sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding },
936     {kVec4ub_GrVertexAttribType, 2*sizeof(SkPoint), kColor_GrVertexAttribBinding}
937 };
938
939 static const size_t kPosUVAttribsSize = 2 * sizeof(SkPoint);
940 static const size_t kPosUVColorAttribsSize = 2 * sizeof(SkPoint) + sizeof(GrColor);
941
942 extern const GrVertexAttrib gPosColorAttribs[] = {
943     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding},
944     {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
945 };
946
947 static const size_t kPosAttribsSize = sizeof(SkPoint);
948 static const size_t kPosColorAttribsSize = sizeof(SkPoint) + sizeof(GrColor);
949
950 static void set_vertex_attributes(GrDrawState* drawState,
951                                   const SkPoint* texCoords,
952                                   const GrColor* colors,
953                                   int* colorOffset,
954                                   int* texOffset) {
955     *texOffset = -1;
956     *colorOffset = -1;
957
958     if (texCoords && colors) {
959         *texOffset = sizeof(SkPoint);
960         *colorOffset = 2*sizeof(SkPoint);
961         drawState->setVertexAttribs<gPosUVColorAttribs>(3, kPosUVColorAttribsSize);
962     } else if (texCoords) {
963         *texOffset = sizeof(SkPoint);
964         drawState->setVertexAttribs<gPosUVColorAttribs>(2, kPosUVAttribsSize);
965     } else if (colors) {
966         *colorOffset = sizeof(SkPoint);
967         drawState->setVertexAttribs<gPosColorAttribs>(2, kPosColorAttribsSize);
968     } else {
969         drawState->setVertexAttribs<gPosColorAttribs>(1, kPosAttribsSize);
970     }
971 }
972
973 };
974
975 void GrContext::drawVertices(const GrPaint& paint,
976                              GrPrimitiveType primitiveType,
977                              int vertexCount,
978                              const SkPoint positions[],
979                              const SkPoint texCoords[],
980                              const GrColor colors[],
981                              const uint16_t indices[],
982                              int indexCount) {
983     AutoRestoreEffects are;
984     AutoCheckFlush acf(this);
985     GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
986
987     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
988     if (NULL == target) {
989         return;
990     }
991     GrDrawState* drawState = target->drawState();
992
993     GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
994
995     int colorOffset = -1, texOffset = -1;
996     set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
997
998     size_t VertexStride = drawState->getVertexStride();
999     if (sizeof(SkPoint) != VertexStride) {
1000         if (!geo.set(target, vertexCount, 0)) {
1001             GrPrintf("Failed to get space for vertices!\n");
1002             return;
1003         }
1004         void* curVertex = geo.vertices();
1005
1006         for (int i = 0; i < vertexCount; ++i) {
1007             *((SkPoint*)curVertex) = positions[i];
1008
1009             if (texOffset >= 0) {
1010                 *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
1011             }
1012             if (colorOffset >= 0) {
1013                 *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
1014             }
1015             curVertex = (void*)((intptr_t)curVertex + VertexStride);
1016         }
1017     } else {
1018         target->setVertexSourceToArray(positions, vertexCount);
1019     }
1020
1021     // we don't currently apply offscreen AA to this path. Need improved
1022     // management of GrDrawTarget's geometry to avoid copying points per-tile.
1023
1024     if (indices) {
1025         target->setIndexSourceToArray(indices, indexCount);
1026         target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
1027         target->resetIndexSource();
1028     } else {
1029         target->drawNonIndexed(primitiveType, 0, vertexCount);
1030     }
1031 }
1032
1033 ///////////////////////////////////////////////////////////////////////////////
1034
1035 void GrContext::drawRRect(const GrPaint& paint,
1036                           const SkRRect& rrect,
1037                           const GrStrokeInfo& strokeInfo) {
1038     if (rrect.isEmpty()) {
1039        return;
1040     }
1041
1042     if (strokeInfo.isDashed()) {
1043         SkPath path;
1044         path.addRRect(rrect);
1045         this->drawPath(paint, path, strokeInfo);
1046         return;
1047     }
1048
1049     AutoRestoreEffects are;
1050     AutoCheckFlush acf(this);
1051     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1052     if (NULL == target) {
1053         return;
1054     }
1055
1056     GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
1057
1058     const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1059
1060     if (!fOvalRenderer->drawRRect(target, this, paint.isAntiAlias(), rrect, strokeRec)) {
1061         SkPath path;
1062         path.addRRect(rrect);
1063         this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1064     }
1065 }
1066
1067 ///////////////////////////////////////////////////////////////////////////////
1068
1069 void GrContext::drawDRRect(const GrPaint& paint,
1070                            const SkRRect& outer,
1071                            const SkRRect& inner) {
1072     if (outer.isEmpty()) {
1073        return;
1074     }
1075
1076     AutoRestoreEffects are;
1077     AutoCheckFlush acf(this);
1078     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1079
1080     GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
1081
1082     if (!fOvalRenderer->drawDRRect(target, this, paint.isAntiAlias(), outer, inner)) {
1083         SkPath path;
1084         path.addRRect(inner);
1085         path.addRRect(outer);
1086         path.setFillType(SkPath::kEvenOdd_FillType);
1087
1088         GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle);
1089         this->internalDrawPath(target, paint.isAntiAlias(), path, fillRec);
1090     }
1091 }
1092
1093 ///////////////////////////////////////////////////////////////////////////////
1094
1095 void GrContext::drawOval(const GrPaint& paint,
1096                          const SkRect& oval,
1097                          const GrStrokeInfo& strokeInfo) {
1098     if (oval.isEmpty()) {
1099        return;
1100     }
1101
1102     if (strokeInfo.isDashed()) {
1103         SkPath path;
1104         path.addOval(oval);
1105         this->drawPath(paint, path, strokeInfo);
1106         return;
1107     }
1108
1109     AutoRestoreEffects are;
1110     AutoCheckFlush acf(this);
1111     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1112     if (NULL == target) {
1113         return;
1114     }
1115
1116     GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
1117
1118     const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1119
1120
1121     if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, strokeRec)) {
1122         SkPath path;
1123         path.addOval(oval);
1124         this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1125     }
1126 }
1127
1128 // Can 'path' be drawn as a pair of filled nested rectangles?
1129 static bool is_nested_rects(GrDrawTarget* target,
1130                             const SkPath& path,
1131                             const SkStrokeRec& stroke,
1132                             SkRect rects[2]) {
1133     SkASSERT(stroke.isFillStyle());
1134
1135     if (path.isInverseFillType()) {
1136         return false;
1137     }
1138
1139     const GrDrawState& drawState = target->getDrawState();
1140
1141     // TODO: this restriction could be lifted if we were willing to apply
1142     // the matrix to all the points individually rather than just to the rect
1143     if (!drawState.getViewMatrix().preservesAxisAlignment()) {
1144         return false;
1145     }
1146
1147     if (!target->getDrawState().canTweakAlphaForCoverage() &&
1148         target->shouldDisableCoverageAAForBlend()) {
1149         return false;
1150     }
1151
1152     SkPath::Direction dirs[2];
1153     if (!path.isNestedRects(rects, dirs)) {
1154         return false;
1155     }
1156
1157     if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
1158         // The two rects need to be wound opposite to each other
1159         return false;
1160     }
1161
1162     // Right now, nested rects where the margin is not the same width
1163     // all around do not render correctly
1164     const SkScalar* outer = rects[0].asScalars();
1165     const SkScalar* inner = rects[1].asScalars();
1166
1167     SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
1168     for (int i = 1; i < 4; ++i) {
1169         SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
1170         if (!SkScalarNearlyEqual(margin, temp)) {
1171             return false;
1172         }
1173     }
1174
1175     return true;
1176 }
1177
1178 void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const GrStrokeInfo& strokeInfo) {
1179
1180     if (path.isEmpty()) {
1181        if (path.isInverseFillType()) {
1182            this->drawPaint(paint);
1183        }
1184        return;
1185     }
1186
1187     if (strokeInfo.isDashed()) {
1188         SkPoint pts[2];
1189         if (path.isLine(pts)) {
1190             AutoRestoreEffects are;
1191             AutoCheckFlush acf(this);
1192             GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1193             if (NULL == target) {
1194                 return;
1195             }
1196             GrDrawState* drawState = target->drawState();
1197
1198             SkMatrix origViewMatrix = drawState->getViewMatrix();
1199             GrDrawState::AutoViewMatrixRestore avmr;
1200             if (avmr.setIdentity(target->drawState())) {
1201                 if (GrDashingEffect::DrawDashLine(pts, paint, strokeInfo, fGpu, target,
1202                                                   origViewMatrix)) {
1203                     return;
1204                 }
1205             }
1206         }
1207
1208         // Filter dashed path into new path with the dashing applied
1209         const SkPathEffect::DashInfo& info = strokeInfo.getDashInfo();
1210         SkTLazy<SkPath> effectPath;
1211         GrStrokeInfo newStrokeInfo(strokeInfo, false);
1212         SkStrokeRec* stroke = newStrokeInfo.getStrokeRecPtr();
1213         if (SkDashPath::FilterDashPath(effectPath.init(), path, stroke, NULL, info)) {
1214             this->drawPath(paint, *effectPath.get(), newStrokeInfo);
1215             return;
1216         }
1217
1218         this->drawPath(paint, path, newStrokeInfo);
1219         return;
1220     }
1221
1222     // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1223     // Scratch textures can be recycled after they are returned to the texture
1224     // cache. This presents a potential hazard for buffered drawing. However,
1225     // the writePixels that uploads to the scratch will perform a flush so we're
1226     // OK.
1227     AutoRestoreEffects are;
1228     AutoCheckFlush acf(this);
1229     GrDrawTarget* target = this->prepareToDraw(&paint, BUFFERED_DRAW, &are, &acf);
1230     if (NULL == target) {
1231         return;
1232     }
1233     GrDrawState* drawState = target->drawState();
1234
1235     GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex());
1236
1237     const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1238
1239     bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled();
1240
1241     if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) {
1242         // Concave AA paths are expensive - try to avoid them for special cases
1243         SkRect rects[2];
1244
1245         if (is_nested_rects(target, path, strokeRec, rects)) {
1246             SkMatrix origViewMatrix = drawState->getViewMatrix();
1247             GrDrawState::AutoViewMatrixRestore avmr;
1248             if (!avmr.setIdentity(target->drawState())) {
1249                 return;
1250             }
1251
1252             fAARectRenderer->fillAANestedRects(this->getGpu(), target, rects, origViewMatrix);
1253             return;
1254         }
1255     }
1256
1257     SkRect ovalRect;
1258     bool isOval = path.isOval(&ovalRect);
1259
1260     if (!isOval || path.isInverseFillType()
1261         || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, strokeRec)) {
1262         this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1263     }
1264 }
1265
1266 void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
1267                                  const GrStrokeInfo& strokeInfo) {
1268     SkASSERT(!path.isEmpty());
1269
1270     GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target);
1271
1272
1273     // An Assumption here is that path renderer would use some form of tweaking
1274     // the src color (either the input alpha or in the frag shader) to implement
1275     // aa. If we have some future driver-mojo path AA that can do the right
1276     // thing WRT to the blend then we'll need some query on the PR.
1277     bool useCoverageAA = useAA &&
1278         !target->getDrawState().getRenderTarget()->isMultisampled() &&
1279         !target->shouldDisableCoverageAAForBlend();
1280
1281
1282     GrPathRendererChain::DrawType type =
1283         useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1284                            GrPathRendererChain::kColor_DrawType;
1285
1286     const SkPath* pathPtr = &path;
1287     SkTLazy<SkPath> tmpPath;
1288     SkTCopyOnFirstWrite<SkStrokeRec> stroke(strokeInfo.getStrokeRec());
1289
1290     // Try a 1st time without stroking the path and without allowing the SW renderer
1291     GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type);
1292
1293     if (NULL == pr) {
1294         if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) {
1295             // It didn't work the 1st time, so try again with the stroked path
1296             if (stroke->applyToPath(tmpPath.init(), *pathPtr)) {
1297                 pathPtr = tmpPath.get();
1298                 stroke.writable()->setFillStyle();
1299                 if (pathPtr->isEmpty()) {
1300                     return;
1301                 }
1302             }
1303         }
1304
1305         // This time, allow SW renderer
1306         pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type);
1307     }
1308
1309     if (NULL == pr) {
1310 #ifdef SK_DEBUG
1311         GrPrintf("Unable to find path renderer compatible with path.\n");
1312 #endif
1313         return;
1314     }
1315
1316     pr->drawPath(*pathPtr, *stroke, target, useCoverageAA);
1317 }
1318
1319 ////////////////////////////////////////////////////////////////////////////////
1320
1321 void GrContext::flush(int flagsBitfield) {
1322     if (NULL == fDrawBuffer) {
1323         return;
1324     }
1325
1326     if (kDiscard_FlushBit & flagsBitfield) {
1327         fDrawBuffer->reset();
1328     } else {
1329         fDrawBuffer->flush();
1330     }
1331     fFlushToReduceCacheSize = false;
1332 }
1333
1334 bool GrContext::writeTexturePixels(GrTexture* texture,
1335                                    int left, int top, int width, int height,
1336                                    GrPixelConfig config, const void* buffer, size_t rowBytes,
1337                                    uint32_t flags) {
1338     ASSERT_OWNED_RESOURCE(texture);
1339
1340     if ((kUnpremul_PixelOpsFlag & flags) || !fGpu->canWriteTexturePixels(texture, config)) {
1341         if (texture->asRenderTarget()) {
1342             return this->writeRenderTargetPixels(texture->asRenderTarget(),
1343                                                  left, top, width, height,
1344                                                  config, buffer, rowBytes, flags);
1345         } else {
1346             return false;
1347         }
1348     }
1349
1350     if (!(kDontFlush_PixelOpsFlag & flags) && texture->hasPendingIO()) {
1351         this->flush();
1352     }
1353
1354     return fGpu->writeTexturePixels(texture, left, top, width, height,
1355                                     config, buffer, rowBytes);
1356 }
1357
1358 bool GrContext::readTexturePixels(GrTexture* texture,
1359                                   int left, int top, int width, int height,
1360                                   GrPixelConfig config, void* buffer, size_t rowBytes,
1361                                   uint32_t flags) {
1362     ASSERT_OWNED_RESOURCE(texture);
1363
1364     GrRenderTarget* target = texture->asRenderTarget();
1365     if (target) {
1366         return this->readRenderTargetPixels(target,
1367                                             left, top, width, height,
1368                                             config, buffer, rowBytes,
1369                                             flags);
1370     } else {
1371         // TODO: make this more efficient for cases where we're reading the entire
1372         //       texture, i.e., use GetTexImage() instead
1373
1374         // create scratch rendertarget and read from that
1375         GrAutoScratchTexture ast;
1376         GrTextureDesc desc;
1377         desc.fFlags = kRenderTarget_GrTextureFlagBit;
1378         desc.fWidth = width;
1379         desc.fHeight = height;
1380         desc.fConfig = config;
1381         desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1382         ast.set(this, desc, kExact_ScratchTexMatch);
1383         GrTexture* dst = ast.texture();
1384         if (dst && (target = dst->asRenderTarget())) {
1385             this->copyTexture(texture, target, NULL);
1386             return this->readRenderTargetPixels(target,
1387                                                 left, top, width, height,
1388                                                 config, buffer, rowBytes,
1389                                                 flags);
1390         }
1391
1392         return false;
1393     }
1394 }
1395
1396 #include "SkConfig8888.h"
1397
1398 // toggles between RGBA and BGRA
1399 static SkColorType toggle_colortype32(SkColorType ct) {
1400     if (kRGBA_8888_SkColorType == ct) {
1401         return kBGRA_8888_SkColorType;
1402     } else {
1403         SkASSERT(kBGRA_8888_SkColorType == ct);
1404         return kRGBA_8888_SkColorType;
1405     }
1406 }
1407
1408 bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1409                                        int left, int top, int width, int height,
1410                                        GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1411                                        uint32_t flags) {
1412     ASSERT_OWNED_RESOURCE(target);
1413
1414     if (NULL == target) {
1415         target = fRenderTarget.get();
1416         if (NULL == target) {
1417             return false;
1418         }
1419     }
1420
1421     if (!(kDontFlush_PixelOpsFlag & flags) && target->hasPendingWrite()) {
1422         this->flush();
1423     }
1424
1425     // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1426
1427     // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1428     // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1429     bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1430                                                  width, height, dstConfig,
1431                                                  rowBytes);
1432     // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1433     // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1434     // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1435     // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1436     // dstConfig.
1437     GrPixelConfig readConfig = dstConfig;
1438     bool swapRAndB = false;
1439     if (GrPixelConfigSwapRAndB(dstConfig) ==
1440         fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1441         readConfig = GrPixelConfigSwapRAndB(readConfig);
1442         swapRAndB = true;
1443     }
1444
1445     bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1446
1447     if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1448         // The unpremul flag is only allowed for these two configs.
1449         return false;
1450     }
1451
1452     // If the src is a texture and we would have to do conversions after read pixels, we instead
1453     // do the conversions by drawing the src to a scratch texture. If we handle any of the
1454     // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1455     // on the read back pixels.
1456     GrTexture* src = target->asTexture();
1457     GrAutoScratchTexture ast;
1458     if (src && (swapRAndB || unpremul || flipY)) {
1459         // Make the scratch a render target because we don't have a robust readTexturePixels as of
1460         // yet. It calls this function.
1461         GrTextureDesc desc;
1462         desc.fFlags = kRenderTarget_GrTextureFlagBit;
1463         desc.fWidth = width;
1464         desc.fHeight = height;
1465         desc.fConfig = readConfig;
1466         desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1467
1468         // When a full read back is faster than a partial we could always make the scratch exactly
1469         // match the passed rect. However, if we see many different size rectangles we will trash
1470         // our texture cache and pay the cost of creating and destroying many textures. So, we only
1471         // request an exact match when the caller is reading an entire RT.
1472         ScratchTexMatch match = kApprox_ScratchTexMatch;
1473         if (0 == left &&
1474             0 == top &&
1475             target->width() == width &&
1476             target->height() == height &&
1477             fGpu->fullReadPixelsIsFasterThanPartial()) {
1478             match = kExact_ScratchTexMatch;
1479         }
1480         ast.set(this, desc, match);
1481         GrTexture* texture = ast.texture();
1482         if (texture) {
1483             // compute a matrix to perform the draw
1484             SkMatrix textureMatrix;
1485             textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1486             textureMatrix.postIDiv(src->width(), src->height());
1487
1488             SkAutoTUnref<const GrFragmentProcessor> fp;
1489             if (unpremul) {
1490                 fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1491                 if (fp) {
1492                     unpremul = false; // we no longer need to do this on CPU after the read back.
1493                 }
1494             }
1495             // If we failed to create a PM->UPM effect and have no other conversions to perform then
1496             // there is no longer any point to using the scratch.
1497             if (fp || flipY || swapRAndB) {
1498                 if (!fp) {
1499                     fp.reset(GrConfigConversionEffect::Create(
1500                             src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion,
1501                             textureMatrix));
1502                 }
1503                 swapRAndB = false; // we will handle the swap in the draw.
1504
1505                 // We protect the existing geometry here since it may not be
1506                 // clear to the caller that a draw operation (i.e., drawSimpleRect)
1507                 // can be invoked in this method
1508                 GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit);
1509                 GrDrawState* drawState = fGpu->drawState();
1510                 SkASSERT(fp);
1511                 drawState->addColorProcessor(fp);
1512
1513                 drawState->setRenderTarget(texture->asRenderTarget());
1514                 SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1515                 fGpu->drawSimpleRect(rect);
1516                 // we want to read back from the scratch's origin
1517                 left = 0;
1518                 top = 0;
1519                 target = texture->asRenderTarget();
1520             }
1521         }
1522     }
1523     if (!fGpu->readPixels(target,
1524                           left, top, width, height,
1525                           readConfig, buffer, rowBytes)) {
1526         return false;
1527     }
1528     // Perform any conversions we weren't able to perform using a scratch texture.
1529     if (unpremul || swapRAndB) {
1530         SkDstPixelInfo dstPI;
1531         if (!GrPixelConfig2ColorType(dstConfig, &dstPI.fColorType)) {
1532             return false;
1533         }
1534         dstPI.fAlphaType = kUnpremul_SkAlphaType;
1535         dstPI.fPixels = buffer;
1536         dstPI.fRowBytes = rowBytes;
1537
1538         SkSrcPixelInfo srcPI;
1539         srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType;
1540         srcPI.fAlphaType = kPremul_SkAlphaType;
1541         srcPI.fPixels = buffer;
1542         srcPI.fRowBytes = rowBytes;
1543
1544         return srcPI.convertPixelsTo(&dstPI, width, height);
1545     }
1546     return true;
1547 }
1548
1549 void GrContext::resolveRenderTarget(GrRenderTarget* target) {
1550     SkASSERT(target);
1551     ASSERT_OWNED_RESOURCE(target);
1552     // In the future we may track whether there are any pending draws to this
1553     // target. We don't today so we always perform a flush. We don't promise
1554     // this to our clients, though.
1555     this->flush();
1556     if (fGpu) {
1557         fGpu->resolveRenderTarget(target);
1558     }
1559 }
1560
1561 void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) {
1562     SkASSERT(renderTarget);
1563     ASSERT_OWNED_RESOURCE(renderTarget);
1564     AutoRestoreEffects are;
1565     AutoCheckFlush acf(this);
1566     GrDrawTarget* target = this->prepareToDraw(NULL, BUFFERED_DRAW, &are, &acf);
1567     if (NULL == target) {
1568         return;
1569     }
1570     target->discard(renderTarget);
1571 }
1572
1573 void GrContext::copyTexture(GrTexture* src, GrRenderTarget* dst, const SkIPoint* topLeft) {
1574     if (NULL == src || NULL == dst) {
1575         return;
1576     }
1577     ASSERT_OWNED_RESOURCE(src);
1578
1579     SkIRect srcRect = SkIRect::MakeWH(dst->width(), dst->height());
1580     if (topLeft) {
1581         srcRect.offset(*topLeft);
1582     }
1583     SkIRect srcBounds = SkIRect::MakeWH(src->width(), src->height());
1584     if (!srcRect.intersect(srcBounds)) {
1585         return;
1586     }
1587
1588     GrDrawTarget* target = this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
1589     if (NULL == target) {
1590         return;
1591     }
1592     SkIPoint dstPoint;
1593     dstPoint.setZero();
1594     target->copySurface(dst, src, srcRect, dstPoint);
1595 }
1596
1597 bool GrContext::writeRenderTargetPixels(GrRenderTarget* target,
1598                                         int left, int top, int width, int height,
1599                                         GrPixelConfig srcConfig,
1600                                         const void* buffer,
1601                                         size_t rowBytes,
1602                                         uint32_t flags) {
1603     ASSERT_OWNED_RESOURCE(target);
1604
1605     if (NULL == target) {
1606         target = fRenderTarget.get();
1607         if (NULL == target) {
1608             return false;
1609         }
1610     }
1611
1612     // TODO: when underlying api has a direct way to do this we should use it (e.g. glDrawPixels on
1613     // desktop GL).
1614
1615     // We will always call some form of writeTexturePixels and we will pass our flags on to it.
1616     // Thus, we don't perform a flush here since that call will do it (if the kNoFlush flag isn't
1617     // set.)
1618
1619     // If the RT is also a texture and we don't have to premultiply then take the texture path.
1620     // We expect to be at least as fast or faster since it doesn't use an intermediate texture as
1621     // we do below.
1622
1623 #if !defined(SK_BUILD_FOR_MAC)
1624     // At least some drivers on the Mac get confused when glTexImage2D is called on a texture
1625     // attached to an FBO. The FBO still sees the old image. TODO: determine what OS versions and/or
1626     // HW is affected.
1627     if (target->asTexture() && !(kUnpremul_PixelOpsFlag & flags) &&
1628         fGpu->canWriteTexturePixels(target->asTexture(), srcConfig)) {
1629         return this->writeTexturePixels(target->asTexture(),
1630                                         left, top, width, height,
1631                                         srcConfig, buffer, rowBytes, flags);
1632     }
1633 #endif
1634
1635     // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1636     // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1637     // config. This scratch will then have R and B swapped. We correct for this by swapping again
1638     // when drawing the scratch to the dst using a conversion effect.
1639     bool swapRAndB = false;
1640     GrPixelConfig writeConfig = srcConfig;
1641     if (GrPixelConfigSwapRAndB(srcConfig) ==
1642         fGpu->preferredWritePixelsConfig(srcConfig, target->config())) {
1643         writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1644         swapRAndB = true;
1645     }
1646
1647     GrTextureDesc desc;
1648     desc.fWidth = width;
1649     desc.fHeight = height;
1650     desc.fConfig = writeConfig;
1651     GrAutoScratchTexture ast(this, desc);
1652     GrTexture* texture = ast.texture();
1653     if (NULL == texture) {
1654         return false;
1655     }
1656
1657     SkAutoTUnref<const GrFragmentProcessor> fp;
1658     SkMatrix textureMatrix;
1659     textureMatrix.setIDiv(texture->width(), texture->height());
1660
1661     // allocate a tmp buffer and sw convert the pixels to premul
1662     SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1663
1664     if (kUnpremul_PixelOpsFlag & flags) {
1665         if (!GrPixelConfigIs8888(srcConfig)) {
1666             return false;
1667         }
1668         fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1669         // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1670         if (NULL == fp) {
1671             SkSrcPixelInfo srcPI;
1672             if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
1673                 return false;
1674             }
1675             srcPI.fAlphaType = kUnpremul_SkAlphaType;
1676             srcPI.fPixels = buffer;
1677             srcPI.fRowBytes = rowBytes;
1678
1679             tmpPixels.reset(width * height);
1680
1681             SkDstPixelInfo dstPI;
1682             dstPI.fColorType = srcPI.fColorType;
1683             dstPI.fAlphaType = kPremul_SkAlphaType;
1684             dstPI.fPixels = tmpPixels.get();
1685             dstPI.fRowBytes = 4 * width;
1686
1687             if (!srcPI.convertPixelsTo(&dstPI, width, height)) {
1688                 return false;
1689             }
1690
1691             buffer = tmpPixels.get();
1692             rowBytes = 4 * width;
1693         }
1694     }
1695     if (NULL == fp) {
1696         fp.reset(GrConfigConversionEffect::Create(texture,
1697                                                       swapRAndB,
1698                                                       GrConfigConversionEffect::kNone_PMConversion,
1699                                                       textureMatrix));
1700     }
1701
1702     if (!this->writeTexturePixels(texture,
1703                                   0, 0, width, height,
1704                                   writeConfig, buffer, rowBytes,
1705                                   flags & ~kUnpremul_PixelOpsFlag)) {
1706         return false;
1707     }
1708
1709     // TODO: Usually this could go to fDrawBuffer but currently
1710     // writeRenderTargetPixels can be called in the midst of drawing another
1711     // object (e.g., when uploading a SW path rendering to the gpu while
1712     // drawing a rect). So we always draw directly to GrGpu and preserve the current geometry.
1713     // But that means we also have to flush the draw buffer if there is a pending IO operation to
1714     // the render target.
1715     if (!(kDontFlush_PixelOpsFlag & flags) && target->hasPendingIO()) {
1716         this->flush();
1717     }
1718     SkMatrix matrix;
1719     matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1720     GrDrawTarget::AutoGeometryAndStatePush agasp(fGpu, GrDrawTarget::kReset_ASRInit, &matrix);
1721     GrDrawState* drawState = fGpu->drawState();
1722     SkASSERT(fp);
1723     drawState->addColorProcessor(fp);
1724
1725     drawState->setRenderTarget(target);
1726
1727     fGpu->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)));
1728     return true;
1729 }
1730 ////////////////////////////////////////////////////////////////////////////////
1731
1732 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1733                                        BufferedDraw buffered,
1734                                        AutoRestoreEffects* are,
1735                                        AutoCheckFlush* acf) {
1736     // All users of this draw state should be freeing up all effects when they're done.
1737     // Otherwise effects that own resources may keep those resources alive indefinitely.
1738     SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages() &&
1739              !fDrawState->hasGeometryProcessor());
1740
1741     if (NULL == fGpu) {
1742         return NULL;
1743     }
1744
1745     if (kNo_BufferedDraw == buffered && kYes_BufferedDraw == fLastDrawWasBuffered) {
1746         fDrawBuffer->flush();
1747         fLastDrawWasBuffered = kNo_BufferedDraw;
1748     }
1749     ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1750     if (paint) {
1751         SkASSERT(are);
1752         SkASSERT(acf);
1753         are->set(fDrawState);
1754         fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1755 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1756         if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1757             !fDrawState->couldApplyCoverage(fGpu->caps())) {
1758             GrPrintf("Partial pixel coverage will be incorrectly blended.\n");
1759         }
1760 #endif
1761         // Clear any vertex attributes configured for the previous use of the
1762         // GrDrawState which can effect which blend optimizations are in effect.
1763         fDrawState->setDefaultVertexAttribs();
1764     } else {
1765         fDrawState->reset(fViewMatrix);
1766         fDrawState->setRenderTarget(fRenderTarget.get());
1767     }
1768     GrDrawTarget* target;
1769     if (kYes_BufferedDraw == buffered) {
1770         fLastDrawWasBuffered = kYes_BufferedDraw;
1771         target = fDrawBuffer;
1772     } else {
1773         SkASSERT(kNo_BufferedDraw == buffered);
1774         fLastDrawWasBuffered = kNo_BufferedDraw;
1775         target = fGpu;
1776     }
1777     fDrawState->setState(GrDrawState::kClip_StateBit, fClip &&
1778                                                      !fClip->fClipStack->isWideOpen());
1779     target->setClip(fClip);
1780     SkASSERT(fDrawState == target->drawState());
1781     return target;
1782 }
1783
1784 /*
1785  * This method finds a path renderer that can draw the specified path on
1786  * the provided target.
1787  * Due to its expense, the software path renderer has split out so it can
1788  * can be individually allowed/disallowed via the "allowSW" boolean.
1789  */
1790 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1791                                            const SkStrokeRec& stroke,
1792                                            const GrDrawTarget* target,
1793                                            bool allowSW,
1794                                            GrPathRendererChain::DrawType drawType,
1795                                            GrPathRendererChain::StencilSupport* stencilSupport) {
1796
1797     if (NULL == fPathRendererChain) {
1798         fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1799     }
1800
1801     GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1802                                                              stroke,
1803                                                              target,
1804                                                              drawType,
1805                                                              stencilSupport);
1806
1807     if (NULL == pr && allowSW) {
1808         if (NULL == fSoftwarePathRenderer) {
1809             fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1810         }
1811         pr = fSoftwarePathRenderer;
1812     }
1813
1814     return pr;
1815 }
1816
1817 ////////////////////////////////////////////////////////////////////////////////
1818 bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
1819     return fGpu->caps()->isConfigRenderable(config, withMSAA);
1820 }
1821
1822 int GrContext::getRecommendedSampleCount(GrPixelConfig config,
1823                                          SkScalar dpi) const {
1824     if (!this->isConfigRenderable(config, true)) {
1825         return 0;
1826     }
1827     int chosenSampleCount = 0;
1828     if (fGpu->caps()->pathRenderingSupport()) {
1829         if (dpi >= 250.0f) {
1830             chosenSampleCount = 4;
1831         } else {
1832             chosenSampleCount = 16;
1833         }
1834     }
1835     return chosenSampleCount <= fGpu->caps()->maxSampleCount() ?
1836         chosenSampleCount : 0;
1837 }
1838
1839 void GrContext::setupDrawBuffer() {
1840     SkASSERT(NULL == fDrawBuffer);
1841     SkASSERT(NULL == fDrawBufferVBAllocPool);
1842     SkASSERT(NULL == fDrawBufferIBAllocPool);
1843
1844     fDrawBufferVBAllocPool =
1845         SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1846                                     DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1847                                     DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1848     fDrawBufferIBAllocPool =
1849         SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1850                                    DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1851                                    DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1852
1853     fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1854                                                    fDrawBufferVBAllocPool,
1855                                                    fDrawBufferIBAllocPool));
1856
1857     fDrawBuffer->setDrawState(fDrawState);
1858 }
1859
1860 GrDrawTarget* GrContext::getTextTarget() {
1861     return this->prepareToDraw(NULL, BUFFERED_DRAW, NULL, NULL);
1862 }
1863
1864 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1865     return fGpu->getQuadIndexBuffer();
1866 }
1867
1868 namespace {
1869 void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1870     GrConfigConversionEffect::PMConversion pmToUPM;
1871     GrConfigConversionEffect::PMConversion upmToPM;
1872     GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1873     *pmToUPMValue = pmToUPM;
1874     *upmToPMValue = upmToPM;
1875 }
1876 }
1877
1878 const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture,
1879                                                           bool swapRAndB,
1880                                                           const SkMatrix& matrix) {
1881     if (!fDidTestPMConversions) {
1882         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1883         fDidTestPMConversions = true;
1884     }
1885     GrConfigConversionEffect::PMConversion pmToUPM =
1886         static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1887     if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1888         return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1889     } else {
1890         return NULL;
1891     }
1892 }
1893
1894 const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
1895                                                           bool swapRAndB,
1896                                                           const SkMatrix& matrix) {
1897     if (!fDidTestPMConversions) {
1898         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1899         fDidTestPMConversions = true;
1900     }
1901     GrConfigConversionEffect::PMConversion upmToPM =
1902         static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1903     if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1904         return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1905     } else {
1906         return NULL;
1907     }
1908 }
1909
1910 void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResource* resource) {
1911     fResourceCache->purgeAsNeeded(1, resource->gpuMemorySize());
1912     fResourceCache->addResource(resourceKey, resource);
1913 }
1914
1915 GrGpuResource* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
1916     GrGpuResource* resource = fResourceCache->find(resourceKey);
1917     SkSafeRef(resource);
1918     return resource;
1919 }
1920
1921 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
1922     fGpu->addGpuTraceMarker(marker);
1923     if (fDrawBuffer) {
1924         fDrawBuffer->addGpuTraceMarker(marker);
1925     }
1926 }
1927
1928 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
1929     fGpu->removeGpuTraceMarker(marker);
1930     if (fDrawBuffer) {
1931         fDrawBuffer->removeGpuTraceMarker(marker);
1932     }
1933 }
1934
1935 ///////////////////////////////////////////////////////////////////////////////
1936 #if GR_CACHE_STATS
1937 void GrContext::printCacheStats() const {
1938     fResourceCache->printStats();
1939 }
1940 #endif
1941
1942 #if GR_GPU_STATS
1943 const GrContext::GPUStats* GrContext::gpuStats() const {
1944     return fGpu->gpuStats();
1945 }
1946 #endif
1947