Update To 11.40.268.0
[platform/framework/web/crosswalk.git] / src / third_party / skia / src / gpu / GrContext.cpp
1
2 /*
3  * Copyright 2011 Google Inc.
4  *
5  * Use of this source code is governed by a BSD-style license that can be
6  * found in the LICENSE file.
7  */
8
9 #include "GrContext.h"
10
11 #include "effects/GrConfigConversionEffect.h"
12 #include "effects/GrDashingEffect.h"
13 #include "effects/GrSingleTextureEffect.h"
14
15 #include "GrAARectRenderer.h"
16 #include "GrBufferAllocPool.h"
17 #include "GrGpu.h"
18 #include "GrDistanceFieldTextContext.h"
19 #include "GrDrawTargetCaps.h"
20 #include "GrIndexBuffer.h"
21 #include "GrInOrderDrawBuffer.h"
22 #include "GrLayerCache.h"
23 #include "GrOvalRenderer.h"
24 #include "GrPathRenderer.h"
25 #include "GrPathUtils.h"
26 #include "GrResourceCache.h"
27 #include "GrResourceCache2.h"
28 #include "GrSoftwarePathRenderer.h"
29 #include "GrStencilBuffer.h"
30 #include "GrStencilAndCoverTextContext.h"
31 #include "GrStrokeInfo.h"
32 #include "GrSurfacePriv.h"
33 #include "GrTextStrike.h"
34 #include "GrTexturePriv.h"
35 #include "GrTraceMarker.h"
36 #include "GrTracing.h"
37 #include "SkDashPathPriv.h"
38 #include "SkConfig8888.h"
39 #include "SkGr.h"
40 #include "SkRRect.h"
41 #include "SkStrokeRec.h"
42 #include "SkTLazy.h"
43 #include "SkTLS.h"
44 #include "SkTraceEvent.h"
45
46 #ifdef SK_DEBUG
47     // change this to a 1 to see notifications when partial coverage fails
48     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
49 #else
50     #define GR_DEBUG_PARTIAL_COVERAGE_CHECK 0
51 #endif
52
53 static const size_t MAX_RESOURCE_CACHE_COUNT = GR_DEFAULT_RESOURCE_CACHE_COUNT_LIMIT;
54 static const size_t MAX_RESOURCE_CACHE_BYTES = GR_DEFAULT_RESOURCE_CACHE_MB_LIMIT * 1024 * 1024;
55
56 static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
57 static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
58
59 static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
60 static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
61
62 #define ASSERT_OWNED_RESOURCE(R) SkASSERT(!(R) || (R)->getContext() == this)
63
64 // Glorified typedef to avoid including GrDrawState.h in GrContext.h
65 class GrContext::AutoRestoreEffects : public GrDrawState::AutoRestoreEffects {};
66
67 class GrContext::AutoCheckFlush {
68 public:
69     AutoCheckFlush(GrContext* context) : fContext(context) { SkASSERT(context); }
70
71     ~AutoCheckFlush() {
72         if (fContext->fFlushToReduceCacheSize) {
73             fContext->flush();
74         }
75     }
76
77 private:
78     GrContext* fContext;
79 };
80
81 GrContext* GrContext::Create(GrBackend backend, GrBackendContext backendContext,
82                              const Options* opts) {
83     GrContext* context;
84     if (NULL == opts) {
85         context = SkNEW_ARGS(GrContext, (Options()));
86     } else {
87         context = SkNEW_ARGS(GrContext, (*opts));
88     }
89
90     if (context->init(backend, backendContext)) {
91         return context;
92     } else {
93         context->unref();
94         return NULL;
95     }
96 }
97
98 GrContext::GrContext(const Options& opts) : fOptions(opts) {
99     fDrawState = NULL;
100     fGpu = NULL;
101     fClip = NULL;
102     fPathRendererChain = NULL;
103     fSoftwarePathRenderer = NULL;
104     fResourceCache = NULL;
105     fResourceCache2 = NULL;
106     fFontCache = NULL;
107     fDrawBuffer = NULL;
108     fDrawBufferVBAllocPool = NULL;
109     fDrawBufferIBAllocPool = NULL;
110     fFlushToReduceCacheSize = false;
111     fAARectRenderer = NULL;
112     fOvalRenderer = NULL;
113     fViewMatrix.reset();
114     fMaxTextureSizeOverride = 1 << 20;
115 }
116
117 bool GrContext::init(GrBackend backend, GrBackendContext backendContext) {
118     SkASSERT(NULL == fGpu);
119
120     fGpu = GrGpu::Create(backend, backendContext, this);
121     if (NULL == fGpu) {
122         return false;
123     }
124     this->initCommon();
125     return true;
126 }
127
128 void GrContext::initCommon() {
129     fDrawState = SkNEW(GrDrawState);
130     fGpu->setDrawState(fDrawState);
131
132     fResourceCache = SkNEW_ARGS(GrResourceCache, (fGpu->caps(),
133                                                   MAX_RESOURCE_CACHE_COUNT,
134                                                   MAX_RESOURCE_CACHE_BYTES));
135     fResourceCache->setOverbudgetCallback(OverbudgetCB, this);
136     fResourceCache2 = SkNEW(GrResourceCache2);
137
138     fFontCache = SkNEW_ARGS(GrFontCache, (fGpu));
139
140     fLayerCache.reset(SkNEW_ARGS(GrLayerCache, (this)));
141
142     fAARectRenderer = SkNEW_ARGS(GrAARectRenderer, (fGpu));
143     fOvalRenderer = SkNEW(GrOvalRenderer);
144
145     fDidTestPMConversions = false;
146
147     this->setupDrawBuffer();
148 }
149
150 GrContext::~GrContext() {
151     if (NULL == fGpu) {
152         return;
153     }
154
155     this->flush();
156
157     for (int i = 0; i < fCleanUpData.count(); ++i) {
158         (*fCleanUpData[i].fFunc)(this, fCleanUpData[i].fInfo);
159     }
160
161     SkDELETE(fResourceCache2);
162     fResourceCache2 = NULL;
163     SkDELETE(fResourceCache);
164     fResourceCache = NULL;
165     SkDELETE(fFontCache);
166     SkDELETE(fDrawBuffer);
167     SkDELETE(fDrawBufferVBAllocPool);
168     SkDELETE(fDrawBufferIBAllocPool);
169
170     fAARectRenderer->unref();
171     fOvalRenderer->unref();
172
173     fGpu->unref();
174     SkSafeUnref(fPathRendererChain);
175     SkSafeUnref(fSoftwarePathRenderer);
176     fDrawState->unref();
177 }
178
179 void GrContext::abandonContext() {
180     // abandon first to so destructors
181     // don't try to free the resources in the API.
182     fResourceCache2->abandonAll();
183
184     fGpu->contextAbandoned();
185
186     // a path renderer may be holding onto resources that
187     // are now unusable
188     SkSafeSetNull(fPathRendererChain);
189     SkSafeSetNull(fSoftwarePathRenderer);
190
191     delete fDrawBuffer;
192     fDrawBuffer = NULL;
193
194     delete fDrawBufferVBAllocPool;
195     fDrawBufferVBAllocPool = NULL;
196
197     delete fDrawBufferIBAllocPool;
198     fDrawBufferIBAllocPool = NULL;
199
200     fAARectRenderer->reset();
201     fOvalRenderer->reset();
202
203     fResourceCache->purgeAllUnlocked();
204
205     fFontCache->freeAll();
206     fLayerCache->freeAll();
207 }
208
209 void GrContext::resetContext(uint32_t state) {
210     fGpu->markContextDirty(state);
211 }
212
213 void GrContext::freeGpuResources() {
214     this->flush();
215
216     fGpu->purgeResources();
217     if (fDrawBuffer) {
218         fDrawBuffer->purgeResources();
219     }
220
221     fAARectRenderer->reset();
222     fOvalRenderer->reset();
223
224     fResourceCache->purgeAllUnlocked();
225     fFontCache->freeAll();
226     fLayerCache->freeAll();
227     // a path renderer may be holding onto resources
228     SkSafeSetNull(fPathRendererChain);
229     SkSafeSetNull(fSoftwarePathRenderer);
230 }
231
232 void GrContext::getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const {
233   if (resourceCount) {
234     *resourceCount = fResourceCache->getCachedResourceCount();
235   }
236   if (resourceBytes) {
237     *resourceBytes = fResourceCache->getCachedResourceBytes();
238   }
239 }
240
241 GrTextContext* GrContext::createTextContext(GrRenderTarget* renderTarget,
242                                             const SkDeviceProperties&
243                                             leakyProperties,
244                                             bool enableDistanceFieldFonts) {
245     if (fGpu->caps()->pathRenderingSupport() && renderTarget->getStencilBuffer() && 
246                                                 renderTarget->isMultisampled()) {
247         return GrStencilAndCoverTextContext::Create(this, leakyProperties);
248     } 
249
250     return GrDistanceFieldTextContext::Create(this, leakyProperties, enableDistanceFieldFonts);
251 }
252
253 ////////////////////////////////////////////////////////////////////////////////
254
255 GrTexture* GrContext::findAndRefTexture(const GrSurfaceDesc& desc,
256                                         const GrCacheID& cacheID,
257                                         const GrTextureParams* params) {
258     GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
259     GrGpuResource* resource = fResourceCache->find(resourceKey);
260     if (resource) {
261         resource->ref();
262         return static_cast<GrSurface*>(resource)->asTexture();
263     } else {
264         return NULL;
265     }
266 }
267
268 bool GrContext::isTextureInCache(const GrSurfaceDesc& desc,
269                                  const GrCacheID& cacheID,
270                                  const GrTextureParams* params) const {
271     GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
272     return fResourceCache->hasKey(resourceKey);
273 }
274
275 void GrContext::addStencilBuffer(GrStencilBuffer* sb) {
276     ASSERT_OWNED_RESOURCE(sb);
277
278     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(sb->width(),
279                                                             sb->height(),
280                                                             sb->numSamples());
281     fResourceCache->addResource(resourceKey, sb);
282 }
283
284 GrStencilBuffer* GrContext::findStencilBuffer(int width, int height,
285                                               int sampleCnt) {
286     GrResourceKey resourceKey = GrStencilBuffer::ComputeKey(width,
287                                                             height,
288                                                             sampleCnt);
289     GrGpuResource* resource = fResourceCache->find(resourceKey);
290     return static_cast<GrStencilBuffer*>(resource);
291 }
292
293 static void stretch_image(void* dst,
294                           int dstW,
295                           int dstH,
296                           const void* src,
297                           int srcW,
298                           int srcH,
299                           size_t bpp) {
300     SkFixed dx = (srcW << 16) / dstW;
301     SkFixed dy = (srcH << 16) / dstH;
302
303     SkFixed y = dy >> 1;
304
305     size_t dstXLimit = dstW*bpp;
306     for (int j = 0; j < dstH; ++j) {
307         SkFixed x = dx >> 1;
308         const uint8_t* srcRow = reinterpret_cast<const uint8_t *>(src) + (y>>16)*srcW*bpp;
309         uint8_t* dstRow = reinterpret_cast<uint8_t *>(dst) + j*dstW*bpp;
310         for (size_t i = 0; i < dstXLimit; i += bpp) {
311             memcpy(dstRow + i, srcRow + (x>>16)*bpp, bpp);
312             x += dx;
313         }
314         y += dy;
315     }
316 }
317
318 namespace {
319
320 // position + local coordinate
321 extern const GrVertexAttrib gVertexAttribs[] = {
322     {kVec2f_GrVertexAttribType, 0,               kPosition_GrVertexAttribBinding},
323     {kVec2f_GrVertexAttribType, sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding}
324 };
325
326 };
327
328 // The desired texture is NPOT and tiled but that isn't supported by
329 // the current hardware. Resize the texture to be a POT
330 GrTexture* GrContext::createResizedTexture(const GrSurfaceDesc& desc,
331                                            const GrCacheID& cacheID,
332                                            const void* srcData,
333                                            size_t rowBytes,
334                                            bool filter) {
335     SkAutoTUnref<GrTexture> clampedTexture(this->findAndRefTexture(desc, cacheID, NULL));
336     if (NULL == clampedTexture) {
337         clampedTexture.reset(this->createTexture(NULL, desc, cacheID, srcData, rowBytes));
338
339         if (NULL == clampedTexture) {
340             return NULL;
341         }
342     }
343
344     GrSurfaceDesc rtDesc = desc;
345     rtDesc.fFlags =  rtDesc.fFlags |
346                      kRenderTarget_GrSurfaceFlag |
347                      kNoStencil_GrSurfaceFlag;
348     rtDesc.fWidth  = GrNextPow2(desc.fWidth);
349     rtDesc.fHeight = GrNextPow2(desc.fHeight);
350
351     GrTexture* texture = fGpu->createTexture(rtDesc, NULL, 0);
352
353     if (texture) {
354         GrDrawTarget::AutoStateRestore asr(fDrawBuffer, GrDrawTarget::kReset_ASRInit);
355         GrDrawState* drawState = fDrawBuffer->drawState();
356         drawState->setRenderTarget(texture->asRenderTarget());
357
358         // if filtering is not desired then we want to ensure all
359         // texels in the resampled image are copies of texels from
360         // the original.
361         GrTextureParams params(SkShader::kClamp_TileMode,
362                                filter ? GrTextureParams::kBilerp_FilterMode :
363                                         GrTextureParams::kNone_FilterMode);
364         drawState->addColorTextureProcessor(clampedTexture, SkMatrix::I(), params);
365
366         drawState->setVertexAttribs<gVertexAttribs>(SK_ARRAY_COUNT(gVertexAttribs),
367                                                     2 * sizeof(SkPoint));
368
369         GrDrawTarget::AutoReleaseGeometry arg(fDrawBuffer, 4, 0);
370
371         if (arg.succeeded()) {
372             SkPoint* verts = (SkPoint*) arg.vertices();
373             verts[0].setIRectFan(0, 0, texture->width(), texture->height(), 2 * sizeof(SkPoint));
374             verts[1].setIRectFan(0, 0, 1, 1, 2 * sizeof(SkPoint));
375             fDrawBuffer->drawNonIndexed(kTriangleFan_GrPrimitiveType, 0, 4);
376         }
377     } else {
378         // TODO: Our CPU stretch doesn't filter. But we create separate
379         // stretched textures when the texture params is either filtered or
380         // not. Either implement filtered stretch blit on CPU or just create
381         // one when FBO case fails.
382
383         rtDesc.fFlags = kNone_GrSurfaceFlags;
384         // no longer need to clamp at min RT size.
385         rtDesc.fWidth  = GrNextPow2(desc.fWidth);
386         rtDesc.fHeight = GrNextPow2(desc.fHeight);
387
388         // We shouldn't be resizing a compressed texture.
389         SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
390
391         size_t bpp = GrBytesPerPixel(desc.fConfig);
392         GrAutoMalloc<128*128*4> stretchedPixels(bpp * rtDesc.fWidth * rtDesc.fHeight);
393         stretch_image(stretchedPixels.get(), rtDesc.fWidth, rtDesc.fHeight,
394                       srcData, desc.fWidth, desc.fHeight, bpp);
395
396         size_t stretchedRowBytes = rtDesc.fWidth * bpp;
397
398         texture = fGpu->createTexture(rtDesc, stretchedPixels.get(), stretchedRowBytes);
399         SkASSERT(texture);
400     }
401
402     return texture;
403 }
404
405 GrTexture* GrContext::createTexture(const GrTextureParams* params,
406                                     const GrSurfaceDesc& desc,
407                                     const GrCacheID& cacheID,
408                                     const void* srcData,
409                                     size_t rowBytes,
410                                     GrResourceKey* cacheKey) {
411     GrResourceKey resourceKey = GrTexturePriv::ComputeKey(fGpu, params, desc, cacheID);
412
413     GrTexture* texture;
414     if (GrTexturePriv::NeedsResizing(resourceKey)) {
415         // We do not know how to resize compressed textures.
416         SkASSERT(!GrPixelConfigIsCompressed(desc.fConfig));
417
418         texture = this->createResizedTexture(desc, cacheID,
419                                              srcData, rowBytes,
420                                              GrTexturePriv::NeedsBilerp(resourceKey));
421     } else {
422         texture = fGpu->createTexture(desc, srcData, rowBytes);
423     }
424
425     if (texture) {
426         fResourceCache->addResource(resourceKey, texture);
427
428         if (cacheKey) {
429             *cacheKey = resourceKey;
430         }
431     }
432
433     return texture;
434 }
435
436 GrTexture* GrContext::createNewScratchTexture(const GrSurfaceDesc& desc) {
437     GrTexture* texture = fGpu->createTexture(desc, NULL, 0);
438     if (!texture) {
439         return NULL;
440     }
441     fResourceCache->addResource(texture->getScratchKey(), texture);
442     return texture;
443 }
444
445 GrTexture* GrContext::refScratchTexture(const GrSurfaceDesc& inDesc, ScratchTexMatch match,
446                                         bool calledDuringFlush) {
447     // kNoStencil has no meaning if kRT isn't set.
448     SkASSERT((inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
449              !(inDesc.fFlags & kNoStencil_GrSurfaceFlag));
450
451     // Make sure caller has checked for renderability if kRT is set.
452     SkASSERT(!(inDesc.fFlags & kRenderTarget_GrSurfaceFlag) ||
453              this->isConfigRenderable(inDesc.fConfig, inDesc.fSampleCnt > 0));
454
455     SkTCopyOnFirstWrite<GrSurfaceDesc> desc(inDesc);
456
457     if (fGpu->caps()->reuseScratchTextures() || (desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
458         GrSurfaceFlags origFlags = desc->fFlags;
459         if (kApprox_ScratchTexMatch == match) {
460             // bin by pow2 with a reasonable min
461             static const int MIN_SIZE = 16;
462             GrSurfaceDesc* wdesc = desc.writable();
463             wdesc->fWidth  = SkTMax(MIN_SIZE, GrNextPow2(desc->fWidth));
464             wdesc->fHeight = SkTMax(MIN_SIZE, GrNextPow2(desc->fHeight));
465         }
466
467         do {
468             GrResourceKey key = GrTexturePriv::ComputeScratchKey(*desc);
469             uint32_t scratchFlags = 0;
470             if (calledDuringFlush) {
471                 scratchFlags = GrResourceCache2::kRequireNoPendingIO_ScratchFlag;
472             } else  if (!(desc->fFlags & kRenderTarget_GrSurfaceFlag)) {
473                 // If it is not a render target then it will most likely be populated by
474                 // writePixels() which will trigger a flush if the texture has pending IO.
475                 scratchFlags = GrResourceCache2::kPreferNoPendingIO_ScratchFlag;
476             }
477             GrGpuResource* resource = fResourceCache2->findAndRefScratchResource(key, scratchFlags);
478             if (resource) {
479                 fResourceCache->makeResourceMRU(resource);
480                 return static_cast<GrSurface*>(resource)->asTexture();
481             }
482
483             if (kExact_ScratchTexMatch == match) {
484                 break;
485             }
486             // We had a cache miss and we are in approx mode, relax the fit of the flags.
487
488             // We no longer try to reuse textures that were previously used as render targets in
489             // situations where no RT is needed; doing otherwise can confuse the video driver and
490             // cause significant performance problems in some cases.
491             if (desc->fFlags & kNoStencil_GrSurfaceFlag) {
492                 desc.writable()->fFlags = desc->fFlags & ~kNoStencil_GrSurfaceFlag;
493             } else {
494                 break;
495             }
496
497         } while (true);
498
499         desc.writable()->fFlags = origFlags;
500     }
501
502     GrTexture* texture = this->createNewScratchTexture(*desc);
503     SkASSERT(NULL == texture || 
504              texture->getScratchKey() == GrTexturePriv::ComputeScratchKey(*desc));
505     return texture;
506 }
507
508 bool GrContext::OverbudgetCB(void* data) {
509     SkASSERT(data);
510
511     GrContext* context = reinterpret_cast<GrContext*>(data);
512
513     // Flush the InOrderDrawBuffer to possibly free up some textures
514     context->fFlushToReduceCacheSize = true;
515
516     return true;
517 }
518
519
520 GrTexture* GrContext::createUncachedTexture(const GrSurfaceDesc& descIn,
521                                             void* srcData,
522                                             size_t rowBytes) {
523     GrSurfaceDesc descCopy = descIn;
524     return fGpu->createTexture(descCopy, srcData, rowBytes);
525 }
526
527 void GrContext::getResourceCacheLimits(int* maxTextures, size_t* maxTextureBytes) const {
528     fResourceCache->getLimits(maxTextures, maxTextureBytes);
529 }
530
531 void GrContext::setResourceCacheLimits(int maxTextures, size_t maxTextureBytes) {
532     fResourceCache->setLimits(maxTextures, maxTextureBytes);
533 }
534
535 int GrContext::getMaxTextureSize() const {
536     return SkTMin(fGpu->caps()->maxTextureSize(), fMaxTextureSizeOverride);
537 }
538
539 int GrContext::getMaxRenderTargetSize() const {
540     return fGpu->caps()->maxRenderTargetSize();
541 }
542
543 int GrContext::getMaxSampleCount() const {
544     return fGpu->caps()->maxSampleCount();
545 }
546
547 ///////////////////////////////////////////////////////////////////////////////
548
549 GrTexture* GrContext::wrapBackendTexture(const GrBackendTextureDesc& desc) {
550     return fGpu->wrapBackendTexture(desc);
551 }
552
553 GrRenderTarget* GrContext::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
554     return fGpu->wrapBackendRenderTarget(desc);
555 }
556
557 ///////////////////////////////////////////////////////////////////////////////
558
559 bool GrContext::supportsIndex8PixelConfig(const GrTextureParams* params,
560                                           int width, int height) const {
561     const GrDrawTargetCaps* caps = fGpu->caps();
562     if (!caps->isConfigTexturable(kIndex_8_GrPixelConfig)) {
563         return false;
564     }
565
566     bool isPow2 = SkIsPow2(width) && SkIsPow2(height);
567
568     if (!isPow2) {
569         bool tiled = params && params->isTiled();
570         if (tiled && !caps->npotTextureTileSupport()) {
571             return false;
572         }
573     }
574     return true;
575 }
576
577
578 ////////////////////////////////////////////////////////////////////////////////
579
580 void GrContext::clear(const SkIRect* rect,
581                       const GrColor color,
582                       bool canIgnoreRect,
583                       GrRenderTarget* renderTarget) {
584     ASSERT_OWNED_RESOURCE(renderTarget);
585     SkASSERT(renderTarget);
586
587     AutoRestoreEffects are;
588     AutoCheckFlush acf(this);
589     GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::clear", this);
590     GrDrawTarget* target = this->prepareToDraw(NULL, &are, &acf);
591     if (NULL == target) {
592         return;
593     }
594     target->clear(rect, color, canIgnoreRect, renderTarget);
595 }
596
597 void GrContext::drawPaint(const GrPaint& origPaint) {
598     // set rect to be big enough to fill the space, but not super-huge, so we
599     // don't overflow fixed-point implementations
600     SkRect r;
601     r.setLTRB(0, 0,
602               SkIntToScalar(getRenderTarget()->width()),
603               SkIntToScalar(getRenderTarget()->height()));
604     SkMatrix inverse;
605     SkTCopyOnFirstWrite<GrPaint> paint(origPaint);
606     AutoMatrix am;
607     GR_CREATE_TRACE_MARKER_CONTEXT("GrContext::drawPaint", this);
608
609     // We attempt to map r by the inverse matrix and draw that. mapRect will
610     // map the four corners and bound them with a new rect. This will not
611     // produce a correct result for some perspective matrices.
612     if (!this->getMatrix().hasPerspective()) {
613         if (!fViewMatrix.invert(&inverse)) {
614             SkDebugf("Could not invert matrix\n");
615             return;
616         }
617         inverse.mapRect(&r);
618     } else {
619         if (!am.setIdentity(this, paint.writable())) {
620             SkDebugf("Could not invert matrix\n");
621             return;
622         }
623     }
624     // by definition this fills the entire clip, no need for AA
625     if (paint->isAntiAlias()) {
626         paint.writable()->setAntiAlias(false);
627     }
628     this->drawRect(*paint, r);
629 }
630
631 #ifdef SK_DEVELOPER
632 void GrContext::dumpFontCache() const {
633     fFontCache->dump();
634 }
635 #endif
636
637 ////////////////////////////////////////////////////////////////////////////////
638
639 /*  create a triangle strip that strokes the specified triangle. There are 8
640  unique vertices, but we repreat the last 2 to close up. Alternatively we
641  could use an indices array, and then only send 8 verts, but not sure that
642  would be faster.
643  */
644 static void setStrokeRectStrip(SkPoint verts[10], SkRect rect,
645                                SkScalar width) {
646     const SkScalar rad = SkScalarHalf(width);
647     rect.sort();
648
649     verts[0].set(rect.fLeft + rad, rect.fTop + rad);
650     verts[1].set(rect.fLeft - rad, rect.fTop - rad);
651     verts[2].set(rect.fRight - rad, rect.fTop + rad);
652     verts[3].set(rect.fRight + rad, rect.fTop - rad);
653     verts[4].set(rect.fRight - rad, rect.fBottom - rad);
654     verts[5].set(rect.fRight + rad, rect.fBottom + rad);
655     verts[6].set(rect.fLeft + rad, rect.fBottom - rad);
656     verts[7].set(rect.fLeft - rad, rect.fBottom + rad);
657     verts[8] = verts[0];
658     verts[9] = verts[1];
659 }
660
661 static inline bool is_irect(const SkRect& r) {
662   return SkScalarIsInt(r.fLeft)  && SkScalarIsInt(r.fTop) &&
663          SkScalarIsInt(r.fRight) && SkScalarIsInt(r.fBottom);
664 }
665
666 static bool apply_aa_to_rect(GrDrawTarget* target,
667                              const SkRect& rect,
668                              SkScalar strokeWidth,
669                              const SkMatrix& combinedMatrix,
670                              SkRect* devBoundRect) {
671     if (!target->getDrawState().canTweakAlphaForCoverage() &&
672         target->shouldDisableCoverageAAForBlend()) {
673 #ifdef SK_DEBUG
674         //SkDebugf("Turning off AA to correctly apply blend.\n");
675 #endif
676         return false;
677     }
678     const GrDrawState& drawState = target->getDrawState();
679     if (drawState.getRenderTarget()->isMultisampled()) {
680         return false;
681     }
682
683 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
684     if (strokeWidth >= 0) {
685 #endif
686         if (!combinedMatrix.preservesAxisAlignment()) {
687             return false;
688         }
689
690 #if defined(SHADER_AA_FILL_RECT) || !defined(IGNORE_ROT_AA_RECT_OPT)
691     } else {
692         if (!combinedMatrix.preservesRightAngles()) {
693             return false;
694         }
695     }
696 #endif
697
698     combinedMatrix.mapRect(devBoundRect, rect);
699     if (strokeWidth < 0) {
700         return !is_irect(*devBoundRect);
701     }
702
703     return true;
704 }
705
706 static inline bool rect_contains_inclusive(const SkRect& rect, const SkPoint& point) {
707     return point.fX >= rect.fLeft && point.fX <= rect.fRight &&
708            point.fY >= rect.fTop && point.fY <= rect.fBottom;
709 }
710
711 void GrContext::drawRect(const GrPaint& paint,
712                          const SkRect& rect,
713                          const GrStrokeInfo* strokeInfo) {
714     if (strokeInfo && strokeInfo->isDashed()) {
715         SkPath path;
716         path.addRect(rect);
717         this->drawPath(paint, path, *strokeInfo);
718         return;
719     }
720
721     AutoRestoreEffects are;
722     AutoCheckFlush acf(this);
723     GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
724     if (NULL == target) {
725         return;
726     }
727
728     GR_CREATE_TRACE_MARKER("GrContext::drawRect", target);
729     SkScalar width = NULL == strokeInfo ? -1 : strokeInfo->getStrokeRec().getWidth();
730     SkMatrix matrix = target->drawState()->getViewMatrix();
731
732     // Check if this is a full RT draw and can be replaced with a clear. We don't bother checking
733     // cases where the RT is fully inside a stroke.
734     if (width < 0) {
735         SkRect rtRect;
736         target->getDrawState().getRenderTarget()->getBoundsRect(&rtRect);
737         SkRect clipSpaceRTRect = rtRect;
738         bool checkClip = false;
739         if (this->getClip()) {
740             checkClip = true;
741             clipSpaceRTRect.offset(SkIntToScalar(this->getClip()->fOrigin.fX),
742                                    SkIntToScalar(this->getClip()->fOrigin.fY));
743         }
744         // Does the clip contain the entire RT?
745         if (!checkClip || target->getClip()->fClipStack->quickContains(clipSpaceRTRect)) {
746             SkMatrix invM;
747             if (!matrix.invert(&invM)) {
748                 return;
749             }
750             // Does the rect bound the RT?
751             SkPoint srcSpaceRTQuad[4];
752             invM.mapRectToQuad(srcSpaceRTQuad, rtRect);
753             if (rect_contains_inclusive(rect, srcSpaceRTQuad[0]) &&
754                 rect_contains_inclusive(rect, srcSpaceRTQuad[1]) &&
755                 rect_contains_inclusive(rect, srcSpaceRTQuad[2]) &&
756                 rect_contains_inclusive(rect, srcSpaceRTQuad[3])) {
757                 // Will it blend?
758                 GrColor clearColor;
759                 if (paint.isOpaqueAndConstantColor(&clearColor)) {
760                     target->clear(NULL, clearColor, true, fRenderTarget);
761                     return;
762                 }
763             }
764         }
765     }
766
767     SkRect devBoundRect;
768     bool needAA = paint.isAntiAlias() &&
769                   !target->getDrawState().getRenderTarget()->isMultisampled();
770     bool doAA = needAA && apply_aa_to_rect(target, rect, width, matrix, &devBoundRect);
771
772     const SkStrokeRec& strokeRec = strokeInfo->getStrokeRec();
773
774     if (doAA) {
775         GrDrawState::AutoViewMatrixRestore avmr;
776         if (!avmr.setIdentity(target->drawState())) {
777             return;
778         }
779         if (width >= 0) {
780             fAARectRenderer->strokeAARect(target, rect,
781                                           matrix, devBoundRect,
782                                           strokeRec);
783         } else {
784             // filled AA rect
785             fAARectRenderer->fillAARect(target,
786                                         rect, matrix, devBoundRect);
787         }
788         return;
789     }
790
791     if (width >= 0) {
792         // TODO: consider making static vertex buffers for these cases.
793         // Hairline could be done by just adding closing vertex to
794         // unitSquareVertexBuffer()
795
796         static const int worstCaseVertCount = 10;
797         target->drawState()->setDefaultVertexAttribs();
798         GrDrawTarget::AutoReleaseGeometry geo(target, worstCaseVertCount, 0);
799
800         if (!geo.succeeded()) {
801             SkDebugf("Failed to get space for vertices!\n");
802             return;
803         }
804
805         GrPrimitiveType primType;
806         int vertCount;
807         SkPoint* vertex = geo.positions();
808
809         if (width > 0) {
810             vertCount = 10;
811             primType = kTriangleStrip_GrPrimitiveType;
812             setStrokeRectStrip(vertex, rect, width);
813         } else {
814             // hairline
815             vertCount = 5;
816             primType = kLineStrip_GrPrimitiveType;
817             vertex[0].set(rect.fLeft, rect.fTop);
818             vertex[1].set(rect.fRight, rect.fTop);
819             vertex[2].set(rect.fRight, rect.fBottom);
820             vertex[3].set(rect.fLeft, rect.fBottom);
821             vertex[4].set(rect.fLeft, rect.fTop);
822         }
823
824         target->drawNonIndexed(primType, 0, vertCount);
825     } else {
826         // filled BW rect
827         target->drawSimpleRect(rect);
828     }
829 }
830
831 void GrContext::drawRectToRect(const GrPaint& paint,
832                                const SkRect& dstRect,
833                                const SkRect& localRect,
834                                const SkMatrix* localMatrix) {
835     AutoRestoreEffects are;
836     AutoCheckFlush acf(this);
837     GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
838     if (NULL == target) {
839         return;
840     }
841
842     GR_CREATE_TRACE_MARKER("GrContext::drawRectToRect", target);
843
844     target->drawRect(dstRect, &localRect, localMatrix);
845 }
846
847 namespace {
848
849 extern const GrVertexAttrib gPosUVColorAttribs[] = {
850     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding },
851     {kVec2f_GrVertexAttribType,  sizeof(SkPoint), kLocalCoord_GrVertexAttribBinding },
852     {kVec4ub_GrVertexAttribType, 2*sizeof(SkPoint), kColor_GrVertexAttribBinding}
853 };
854
855 static const size_t kPosUVAttribsSize = 2 * sizeof(SkPoint);
856 static const size_t kPosUVColorAttribsSize = 2 * sizeof(SkPoint) + sizeof(GrColor);
857
858 extern const GrVertexAttrib gPosColorAttribs[] = {
859     {kVec2f_GrVertexAttribType,  0, kPosition_GrVertexAttribBinding},
860     {kVec4ub_GrVertexAttribType, sizeof(SkPoint), kColor_GrVertexAttribBinding},
861 };
862
863 static const size_t kPosAttribsSize = sizeof(SkPoint);
864 static const size_t kPosColorAttribsSize = sizeof(SkPoint) + sizeof(GrColor);
865
866 static void set_vertex_attributes(GrDrawState* drawState,
867                                   const SkPoint* texCoords,
868                                   const GrColor* colors,
869                                   int* colorOffset,
870                                   int* texOffset) {
871     *texOffset = -1;
872     *colorOffset = -1;
873
874     if (texCoords && colors) {
875         *texOffset = sizeof(SkPoint);
876         *colorOffset = 2*sizeof(SkPoint);
877         drawState->setVertexAttribs<gPosUVColorAttribs>(3, kPosUVColorAttribsSize);
878     } else if (texCoords) {
879         *texOffset = sizeof(SkPoint);
880         drawState->setVertexAttribs<gPosUVColorAttribs>(2, kPosUVAttribsSize);
881     } else if (colors) {
882         *colorOffset = sizeof(SkPoint);
883         drawState->setVertexAttribs<gPosColorAttribs>(2, kPosColorAttribsSize);
884     } else {
885         drawState->setVertexAttribs<gPosColorAttribs>(1, kPosAttribsSize);
886     }
887 }
888
889 };
890
891 void GrContext::drawVertices(const GrPaint& paint,
892                              GrPrimitiveType primitiveType,
893                              int vertexCount,
894                              const SkPoint positions[],
895                              const SkPoint texCoords[],
896                              const GrColor colors[],
897                              const uint16_t indices[],
898                              int indexCount) {
899     AutoRestoreEffects are;
900     AutoCheckFlush acf(this);
901     GrDrawTarget::AutoReleaseGeometry geo; // must be inside AutoCheckFlush scope
902
903     GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
904     if (NULL == target) {
905         return;
906     }
907     GrDrawState* drawState = target->drawState();
908
909     GR_CREATE_TRACE_MARKER("GrContext::drawVertices", target);
910
911     int colorOffset = -1, texOffset = -1;
912     set_vertex_attributes(drawState, texCoords, colors, &colorOffset, &texOffset);
913
914     size_t VertexStride = drawState->getVertexStride();
915     if (!geo.set(target, vertexCount, indexCount)) {
916         SkDebugf("Failed to get space for vertices!\n");
917         return;
918     }
919     void* curVertex = geo.vertices();
920
921     for (int i = 0; i < vertexCount; ++i) {
922         *((SkPoint*)curVertex) = positions[i];
923
924         if (texOffset >= 0) {
925             *(SkPoint*)((intptr_t)curVertex + texOffset) = texCoords[i];
926         }
927         if (colorOffset >= 0) {
928             *(GrColor*)((intptr_t)curVertex + colorOffset) = colors[i];
929         }
930         curVertex = (void*)((intptr_t)curVertex + VertexStride);
931     }
932
933     // we don't currently apply offscreen AA to this path. Need improved
934     // management of GrDrawTarget's geometry to avoid copying points per-tile.
935     if (indices) {
936         uint16_t* curIndex = (uint16_t*)geo.indices();
937         for (int i = 0; i < indexCount; ++i) {
938             curIndex[i] = indices[i];
939         }
940         target->drawIndexed(primitiveType, 0, 0, vertexCount, indexCount);
941     } else {
942         target->drawNonIndexed(primitiveType, 0, vertexCount);
943     }
944 }
945
946 ///////////////////////////////////////////////////////////////////////////////
947
948 void GrContext::drawRRect(const GrPaint& paint,
949                           const SkRRect& rrect,
950                           const GrStrokeInfo& strokeInfo) {
951     if (rrect.isEmpty()) {
952        return;
953     }
954
955     if (strokeInfo.isDashed()) {
956         SkPath path;
957         path.addRRect(rrect);
958         this->drawPath(paint, path, strokeInfo);
959         return;
960     }
961
962     AutoRestoreEffects are;
963     AutoCheckFlush acf(this);
964     GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
965     if (NULL == target) {
966         return;
967     }
968
969     GR_CREATE_TRACE_MARKER("GrContext::drawRRect", target);
970
971     const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
972
973     if (!fOvalRenderer->drawRRect(target, this, paint.isAntiAlias(), rrect, strokeRec)) {
974         SkPath path;
975         path.addRRect(rrect);
976         this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
977     }
978 }
979
980 ///////////////////////////////////////////////////////////////////////////////
981
982 void GrContext::drawDRRect(const GrPaint& paint,
983                            const SkRRect& outer,
984                            const SkRRect& inner) {
985     if (outer.isEmpty()) {
986        return;
987     }
988
989     AutoRestoreEffects are;
990     AutoCheckFlush acf(this);
991     GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
992
993     GR_CREATE_TRACE_MARKER("GrContext::drawDRRect", target);
994
995     if (!fOvalRenderer->drawDRRect(target, this, paint.isAntiAlias(), outer, inner)) {
996         SkPath path;
997         path.addRRect(inner);
998         path.addRRect(outer);
999         path.setFillType(SkPath::kEvenOdd_FillType);
1000
1001         GrStrokeInfo fillRec(SkStrokeRec::kFill_InitStyle);
1002         this->internalDrawPath(target, paint.isAntiAlias(), path, fillRec);
1003     }
1004 }
1005
1006 ///////////////////////////////////////////////////////////////////////////////
1007
1008 void GrContext::drawOval(const GrPaint& paint,
1009                          const SkRect& oval,
1010                          const GrStrokeInfo& strokeInfo) {
1011     if (oval.isEmpty()) {
1012        return;
1013     }
1014
1015     if (strokeInfo.isDashed()) {
1016         SkPath path;
1017         path.addOval(oval);
1018         this->drawPath(paint, path, strokeInfo);
1019         return;
1020     }
1021
1022     AutoRestoreEffects are;
1023     AutoCheckFlush acf(this);
1024     GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
1025     if (NULL == target) {
1026         return;
1027     }
1028
1029     GR_CREATE_TRACE_MARKER("GrContext::drawOval", target);
1030
1031     const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1032
1033
1034     if (!fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), oval, strokeRec)) {
1035         SkPath path;
1036         path.addOval(oval);
1037         this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1038     }
1039 }
1040
1041 // Can 'path' be drawn as a pair of filled nested rectangles?
1042 static bool is_nested_rects(GrDrawTarget* target,
1043                             const SkPath& path,
1044                             const SkStrokeRec& stroke,
1045                             SkRect rects[2]) {
1046     SkASSERT(stroke.isFillStyle());
1047
1048     if (path.isInverseFillType()) {
1049         return false;
1050     }
1051
1052     const GrDrawState& drawState = target->getDrawState();
1053
1054     // TODO: this restriction could be lifted if we were willing to apply
1055     // the matrix to all the points individually rather than just to the rect
1056     if (!drawState.getViewMatrix().preservesAxisAlignment()) {
1057         return false;
1058     }
1059
1060     if (!target->getDrawState().canTweakAlphaForCoverage() &&
1061         target->shouldDisableCoverageAAForBlend()) {
1062         return false;
1063     }
1064
1065     SkPath::Direction dirs[2];
1066     if (!path.isNestedRects(rects, dirs)) {
1067         return false;
1068     }
1069
1070     if (SkPath::kWinding_FillType == path.getFillType() && dirs[0] == dirs[1]) {
1071         // The two rects need to be wound opposite to each other
1072         return false;
1073     }
1074
1075     // Right now, nested rects where the margin is not the same width
1076     // all around do not render correctly
1077     const SkScalar* outer = rects[0].asScalars();
1078     const SkScalar* inner = rects[1].asScalars();
1079
1080     bool allEq = true;
1081
1082     SkScalar margin = SkScalarAbs(outer[0] - inner[0]);
1083     bool allGoE1 = margin >= SK_Scalar1;
1084
1085     for (int i = 1; i < 4; ++i) {
1086         SkScalar temp = SkScalarAbs(outer[i] - inner[i]);
1087         if (temp < SK_Scalar1) {
1088             allGoE1 = false;
1089         }
1090         if (!SkScalarNearlyEqual(margin, temp)) {
1091             allEq = false;
1092         }
1093     }
1094
1095     return allEq || allGoE1;
1096 }
1097
1098 void GrContext::drawPath(const GrPaint& paint, const SkPath& path, const GrStrokeInfo& strokeInfo) {
1099
1100     if (path.isEmpty()) {
1101        if (path.isInverseFillType()) {
1102            this->drawPaint(paint);
1103        }
1104        return;
1105     }
1106
1107     if (strokeInfo.isDashed()) {
1108         SkPoint pts[2];
1109         if (path.isLine(pts)) {
1110             AutoRestoreEffects are;
1111             AutoCheckFlush acf(this);
1112             GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
1113             if (NULL == target) {
1114                 return;
1115             }
1116             GrDrawState* drawState = target->drawState();
1117
1118             SkMatrix origViewMatrix = drawState->getViewMatrix();
1119             GrDrawState::AutoViewMatrixRestore avmr;
1120             if (avmr.setIdentity(target->drawState())) {
1121                 if (GrDashingEffect::DrawDashLine(pts, paint, strokeInfo, fGpu, target,
1122                                                   origViewMatrix)) {
1123                     return;
1124                 }
1125             }
1126         }
1127
1128         // Filter dashed path into new path with the dashing applied
1129         const SkPathEffect::DashInfo& info = strokeInfo.getDashInfo();
1130         SkTLazy<SkPath> effectPath;
1131         GrStrokeInfo newStrokeInfo(strokeInfo, false);
1132         SkStrokeRec* stroke = newStrokeInfo.getStrokeRecPtr();
1133         if (SkDashPath::FilterDashPath(effectPath.init(), path, stroke, NULL, info)) {
1134             this->drawPath(paint, *effectPath.get(), newStrokeInfo);
1135             return;
1136         }
1137
1138         this->drawPath(paint, path, newStrokeInfo);
1139         return;
1140     }
1141
1142     // Note that internalDrawPath may sw-rasterize the path into a scratch texture.
1143     // Scratch textures can be recycled after they are returned to the texture
1144     // cache. This presents a potential hazard for buffered drawing. However,
1145     // the writePixels that uploads to the scratch will perform a flush so we're
1146     // OK.
1147     AutoRestoreEffects are;
1148     AutoCheckFlush acf(this);
1149     GrDrawTarget* target = this->prepareToDraw(&paint, &are, &acf);
1150     if (NULL == target) {
1151         return;
1152     }
1153     GrDrawState* drawState = target->drawState();
1154
1155     GR_CREATE_TRACE_MARKER1("GrContext::drawPath", target, "Is Convex", path.isConvex());
1156
1157     const SkStrokeRec& strokeRec = strokeInfo.getStrokeRec();
1158
1159     bool useCoverageAA = paint.isAntiAlias() && !drawState->getRenderTarget()->isMultisampled();
1160
1161     if (useCoverageAA && strokeRec.getWidth() < 0 && !path.isConvex()) {
1162         // Concave AA paths are expensive - try to avoid them for special cases
1163         SkRect rects[2];
1164
1165         if (is_nested_rects(target, path, strokeRec, rects)) {
1166             SkMatrix origViewMatrix = drawState->getViewMatrix();
1167             GrDrawState::AutoViewMatrixRestore avmr;
1168             if (!avmr.setIdentity(target->drawState())) {
1169                 return;
1170             }
1171
1172             fAARectRenderer->fillAANestedRects(target, rects, origViewMatrix);
1173             return;
1174         }
1175     }
1176
1177     SkRect ovalRect;
1178     bool isOval = path.isOval(&ovalRect);
1179
1180     if (!isOval || path.isInverseFillType()
1181         || !fOvalRenderer->drawOval(target, this, paint.isAntiAlias(), ovalRect, strokeRec)) {
1182         this->internalDrawPath(target, paint.isAntiAlias(), path, strokeInfo);
1183     }
1184 }
1185
1186 void GrContext::internalDrawPath(GrDrawTarget* target, bool useAA, const SkPath& path,
1187                                  const GrStrokeInfo& strokeInfo) {
1188     SkASSERT(!path.isEmpty());
1189
1190     GR_CREATE_TRACE_MARKER("GrContext::internalDrawPath", target);
1191
1192
1193     // An Assumption here is that path renderer would use some form of tweaking
1194     // the src color (either the input alpha or in the frag shader) to implement
1195     // aa. If we have some future driver-mojo path AA that can do the right
1196     // thing WRT to the blend then we'll need some query on the PR.
1197     bool useCoverageAA = useAA &&
1198         !target->getDrawState().getRenderTarget()->isMultisampled() &&
1199         !target->shouldDisableCoverageAAForBlend();
1200
1201
1202     GrPathRendererChain::DrawType type =
1203         useCoverageAA ? GrPathRendererChain::kColorAntiAlias_DrawType :
1204                            GrPathRendererChain::kColor_DrawType;
1205
1206     const SkPath* pathPtr = &path;
1207     SkTLazy<SkPath> tmpPath;
1208     SkTCopyOnFirstWrite<SkStrokeRec> stroke(strokeInfo.getStrokeRec());
1209
1210     // Try a 1st time without stroking the path and without allowing the SW renderer
1211     GrPathRenderer* pr = this->getPathRenderer(*pathPtr, *stroke, target, false, type);
1212
1213     if (NULL == pr) {
1214         if (!GrPathRenderer::IsStrokeHairlineOrEquivalent(*stroke, this->getMatrix(), NULL)) {
1215             // It didn't work the 1st time, so try again with the stroked path
1216             if (stroke->applyToPath(tmpPath.init(), *pathPtr)) {
1217                 pathPtr = tmpPath.get();
1218                 stroke.writable()->setFillStyle();
1219                 if (pathPtr->isEmpty()) {
1220                     return;
1221                 }
1222             }
1223         }
1224
1225         // This time, allow SW renderer
1226         pr = this->getPathRenderer(*pathPtr, *stroke, target, true, type);
1227     }
1228
1229     if (NULL == pr) {
1230 #ifdef SK_DEBUG
1231         SkDebugf("Unable to find path renderer compatible with path.\n");
1232 #endif
1233         return;
1234     }
1235
1236     pr->drawPath(*pathPtr, *stroke, target, useCoverageAA);
1237 }
1238
1239 ////////////////////////////////////////////////////////////////////////////////
1240
1241 void GrContext::flush(int flagsBitfield) {
1242     if (NULL == fDrawBuffer) {
1243         return;
1244     }
1245
1246     if (kDiscard_FlushBit & flagsBitfield) {
1247         fDrawBuffer->reset();
1248     } else {
1249         fDrawBuffer->flush();
1250     }
1251     fResourceCache->purgeAsNeeded();
1252     fFlushToReduceCacheSize = false;
1253 }
1254
1255 bool sw_convert_to_premul(GrPixelConfig srcConfig, int width, int height, size_t inRowBytes,
1256                           const void* inPixels, size_t outRowBytes, void* outPixels) {
1257     SkSrcPixelInfo srcPI;
1258     if (!GrPixelConfig2ColorType(srcConfig, &srcPI.fColorType)) {
1259         return false;
1260     }
1261     srcPI.fAlphaType = kUnpremul_SkAlphaType;
1262     srcPI.fPixels = inPixels;
1263     srcPI.fRowBytes = inRowBytes;
1264
1265     SkDstPixelInfo dstPI;
1266     dstPI.fColorType = srcPI.fColorType;
1267     dstPI.fAlphaType = kPremul_SkAlphaType;
1268     dstPI.fPixels = outPixels;
1269     dstPI.fRowBytes = outRowBytes;
1270
1271     return srcPI.convertPixelsTo(&dstPI, width, height);
1272 }
1273
1274 bool GrContext::writeSurfacePixels(GrSurface* surface,
1275                                    int left, int top, int width, int height,
1276                                    GrPixelConfig srcConfig, const void* buffer, size_t rowBytes,
1277                                    uint32_t pixelOpsFlags) {
1278
1279     {
1280         GrTexture* texture = NULL;
1281         if (!(kUnpremul_PixelOpsFlag & pixelOpsFlags) && (texture = surface->asTexture()) &&
1282             fGpu->canWriteTexturePixels(texture, srcConfig)) {
1283
1284             if (!(kDontFlush_PixelOpsFlag & pixelOpsFlags) &&
1285                 surface->surfacePriv().hasPendingIO()) {
1286                 this->flush();
1287             }
1288             return fGpu->writeTexturePixels(texture, left, top, width, height,
1289                                             srcConfig, buffer, rowBytes);
1290             // Don't need to check kFlushWrites_PixelOp here, we just did a direct write so the
1291             // upload is already flushed.
1292         }
1293     }
1294
1295     // If we didn't do a direct texture write then we upload the pixels to a texture and draw.
1296     GrRenderTarget* renderTarget = surface->asRenderTarget();
1297     if (NULL == renderTarget) {
1298         return false;
1299     }
1300
1301     // We ignore the preferred config unless it is a R/B swap of the src config. In that case
1302     // we will upload the original src data to a scratch texture but we will spoof it as the swapped
1303     // config. This scratch will then have R and B swapped. We correct for this by swapping again
1304     // when drawing the scratch to the dst using a conversion effect.
1305     bool swapRAndB = false;
1306     GrPixelConfig writeConfig = srcConfig;
1307     if (GrPixelConfigSwapRAndB(srcConfig) ==
1308         fGpu->preferredWritePixelsConfig(srcConfig, renderTarget->config())) {
1309         writeConfig = GrPixelConfigSwapRAndB(srcConfig);
1310         swapRAndB = true;
1311     }
1312
1313     GrSurfaceDesc desc;
1314     desc.fWidth = width;
1315     desc.fHeight = height;
1316     desc.fConfig = writeConfig;
1317     SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, kApprox_ScratchTexMatch));
1318     if (!texture) {
1319         return false;
1320     }
1321
1322     SkAutoTUnref<const GrFragmentProcessor> fp;
1323     SkMatrix textureMatrix;
1324     textureMatrix.setIDiv(texture->width(), texture->height());
1325
1326     // allocate a tmp buffer and sw convert the pixels to premul
1327     SkAutoSTMalloc<128 * 128, uint32_t> tmpPixels(0);
1328
1329     if (kUnpremul_PixelOpsFlag & pixelOpsFlags) {
1330         if (!GrPixelConfigIs8888(srcConfig)) {
1331             return false;
1332         }
1333         fp.reset(this->createUPMToPMEffect(texture, swapRAndB, textureMatrix));
1334         // handle the unpremul step on the CPU if we couldn't create an effect to do it.
1335         if (NULL == fp) {
1336             size_t tmpRowBytes = 4 * width;
1337             tmpPixels.reset(width * height);
1338             if (!sw_convert_to_premul(srcConfig, width, height, rowBytes, buffer, tmpRowBytes,
1339                                       tmpPixels.get())) {
1340                 return false;
1341             }
1342             rowBytes = tmpRowBytes;
1343             buffer = tmpPixels.get();
1344         }
1345     }
1346     if (NULL == fp) {
1347         fp.reset(GrConfigConversionEffect::Create(texture,
1348                                                   swapRAndB,
1349                                                   GrConfigConversionEffect::kNone_PMConversion,
1350                                                   textureMatrix));
1351     }
1352
1353     // Even if the client told us not to flush, we still flush here. The client may have known that
1354     // writes to the original surface caused no data hazards, but they can't know that the scratch
1355     // we just got is safe.
1356     if (texture->surfacePriv().hasPendingIO()) {
1357         this->flush();
1358     }
1359     if (!fGpu->writeTexturePixels(texture, 0, 0, width, height,
1360                                   writeConfig, buffer, rowBytes)) {
1361         return false;
1362     }
1363
1364     SkMatrix matrix;
1365     matrix.setTranslate(SkIntToScalar(left), SkIntToScalar(top));
1366
1367     // This function can be called in the midst of drawing another object (e.g., when uploading a
1368     // SW-rasterized clip while issuing a draw). So we push the current geometry state before
1369     // drawing a rect to the render target.
1370     // The bracket ensures we pop the stack if we wind up flushing below.
1371     {
1372         GrDrawTarget* drawTarget = this->prepareToDraw(NULL, NULL, NULL);
1373         GrDrawTarget::AutoGeometryAndStatePush agasp(drawTarget, GrDrawTarget::kReset_ASRInit,
1374                                                      &matrix);
1375         GrDrawState* drawState = drawTarget->drawState();
1376         drawState->addColorProcessor(fp);
1377         drawState->setRenderTarget(renderTarget);
1378         drawState->disableState(GrDrawState::kClip_StateBit);
1379         drawTarget->drawSimpleRect(SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height)));
1380     }
1381
1382     if (kFlushWrites_PixelOp & pixelOpsFlags) {
1383         this->flushSurfaceWrites(surface);
1384     }
1385
1386     return true;
1387 }
1388
1389 // toggles between RGBA and BGRA
1390 static SkColorType toggle_colortype32(SkColorType ct) {
1391     if (kRGBA_8888_SkColorType == ct) {
1392         return kBGRA_8888_SkColorType;
1393     } else {
1394         SkASSERT(kBGRA_8888_SkColorType == ct);
1395         return kRGBA_8888_SkColorType;
1396     }
1397 }
1398
1399 bool GrContext::readRenderTargetPixels(GrRenderTarget* target,
1400                                        int left, int top, int width, int height,
1401                                        GrPixelConfig dstConfig, void* buffer, size_t rowBytes,
1402                                        uint32_t flags) {
1403     ASSERT_OWNED_RESOURCE(target);
1404     SkASSERT(target);
1405
1406     if (!(kDontFlush_PixelOpsFlag & flags) && target->surfacePriv().hasPendingWrite()) {
1407         this->flush();
1408     }
1409
1410     // Determine which conversions have to be applied: flipY, swapRAnd, and/or unpremul.
1411
1412     // If fGpu->readPixels would incur a y-flip cost then we will read the pixels upside down. We'll
1413     // either do the flipY by drawing into a scratch with a matrix or on the cpu after the read.
1414     bool flipY = fGpu->readPixelsWillPayForYFlip(target, left, top,
1415                                                  width, height, dstConfig,
1416                                                  rowBytes);
1417     // We ignore the preferred config if it is different than our config unless it is an R/B swap.
1418     // In that case we'll perform an R and B swap while drawing to a scratch texture of the swapped
1419     // config. Then we will call readPixels on the scratch with the swapped config. The swaps during
1420     // the draw cancels out the fact that we call readPixels with a config that is R/B swapped from
1421     // dstConfig.
1422     GrPixelConfig readConfig = dstConfig;
1423     bool swapRAndB = false;
1424     if (GrPixelConfigSwapRAndB(dstConfig) ==
1425         fGpu->preferredReadPixelsConfig(dstConfig, target->config())) {
1426         readConfig = GrPixelConfigSwapRAndB(readConfig);
1427         swapRAndB = true;
1428     }
1429
1430     bool unpremul = SkToBool(kUnpremul_PixelOpsFlag & flags);
1431
1432     if (unpremul && !GrPixelConfigIs8888(dstConfig)) {
1433         // The unpremul flag is only allowed for these two configs.
1434         return false;
1435     }
1436
1437     // If the src is a texture and we would have to do conversions after read pixels, we instead
1438     // do the conversions by drawing the src to a scratch texture. If we handle any of the
1439     // conversions in the draw we set the corresponding bool to false so that we don't reapply it
1440     // on the read back pixels.
1441     GrTexture* src = target->asTexture();
1442     if (src && (swapRAndB || unpremul || flipY)) {
1443         // Make the scratch a render so we can read its pixels.
1444         GrSurfaceDesc desc;
1445         desc.fFlags = kRenderTarget_GrSurfaceFlag;
1446         desc.fWidth = width;
1447         desc.fHeight = height;
1448         desc.fConfig = readConfig;
1449         desc.fOrigin = kTopLeft_GrSurfaceOrigin;
1450
1451         // When a full read back is faster than a partial we could always make the scratch exactly
1452         // match the passed rect. However, if we see many different size rectangles we will trash
1453         // our texture cache and pay the cost of creating and destroying many textures. So, we only
1454         // request an exact match when the caller is reading an entire RT.
1455         ScratchTexMatch match = kApprox_ScratchTexMatch;
1456         if (0 == left &&
1457             0 == top &&
1458             target->width() == width &&
1459             target->height() == height &&
1460             fGpu->fullReadPixelsIsFasterThanPartial()) {
1461             match = kExact_ScratchTexMatch;
1462         }
1463         SkAutoTUnref<GrTexture> texture(this->refScratchTexture(desc, match));
1464         if (texture) {
1465             // compute a matrix to perform the draw
1466             SkMatrix textureMatrix;
1467             textureMatrix.setTranslate(SK_Scalar1 *left, SK_Scalar1 *top);
1468             textureMatrix.postIDiv(src->width(), src->height());
1469
1470             SkAutoTUnref<const GrFragmentProcessor> fp;
1471             if (unpremul) {
1472                 fp.reset(this->createPMToUPMEffect(src, swapRAndB, textureMatrix));
1473                 if (fp) {
1474                     unpremul = false; // we no longer need to do this on CPU after the read back.
1475                 }
1476             }
1477             // If we failed to create a PM->UPM effect and have no other conversions to perform then
1478             // there is no longer any point to using the scratch.
1479             if (fp || flipY || swapRAndB) {
1480                 if (!fp) {
1481                     fp.reset(GrConfigConversionEffect::Create(
1482                             src, swapRAndB, GrConfigConversionEffect::kNone_PMConversion,
1483                             textureMatrix));
1484                 }
1485                 swapRAndB = false; // we will handle the swap in the draw.
1486
1487                 // We protect the existing geometry here since it may not be
1488                 // clear to the caller that a draw operation (i.e., drawSimpleRect)
1489                 // can be invoked in this method
1490                 {
1491                     GrDrawTarget::AutoGeometryAndStatePush agasp(fDrawBuffer,
1492                                                                  GrDrawTarget::kReset_ASRInit);
1493                     GrDrawState* drawState = fDrawBuffer->drawState();
1494                     SkASSERT(fp);
1495                     drawState->addColorProcessor(fp);
1496
1497                     drawState->setRenderTarget(texture->asRenderTarget());
1498                     SkRect rect = SkRect::MakeWH(SkIntToScalar(width), SkIntToScalar(height));
1499                     fDrawBuffer->drawSimpleRect(rect);
1500                     // we want to read back from the scratch's origin
1501                     left = 0;
1502                     top = 0;
1503                     target = texture->asRenderTarget();
1504                 }
1505                 this->flushSurfaceWrites(target);
1506             }
1507         }
1508     }
1509
1510     if (!fGpu->readPixels(target,
1511                           left, top, width, height,
1512                           readConfig, buffer, rowBytes)) {
1513         return false;
1514     }
1515     // Perform any conversions we weren't able to perform using a scratch texture.
1516     if (unpremul || swapRAndB) {
1517         SkDstPixelInfo dstPI;
1518         if (!GrPixelConfig2ColorType(dstConfig, &dstPI.fColorType)) {
1519             return false;
1520         }
1521         dstPI.fAlphaType = kUnpremul_SkAlphaType;
1522         dstPI.fPixels = buffer;
1523         dstPI.fRowBytes = rowBytes;
1524
1525         SkSrcPixelInfo srcPI;
1526         srcPI.fColorType = swapRAndB ? toggle_colortype32(dstPI.fColorType) : dstPI.fColorType;
1527         srcPI.fAlphaType = kPremul_SkAlphaType;
1528         srcPI.fPixels = buffer;
1529         srcPI.fRowBytes = rowBytes;
1530
1531         return srcPI.convertPixelsTo(&dstPI, width, height);
1532     }
1533     return true;
1534 }
1535
1536 void GrContext::prepareSurfaceForExternalRead(GrSurface* surface) {
1537     SkASSERT(surface);
1538     ASSERT_OWNED_RESOURCE(surface);
1539     if (surface->surfacePriv().hasPendingIO()) {
1540         this->flush();
1541     }
1542     GrRenderTarget* rt = surface->asRenderTarget();
1543     if (fGpu && rt) {
1544         fGpu->resolveRenderTarget(rt);
1545     }
1546 }
1547
1548 void GrContext::discardRenderTarget(GrRenderTarget* renderTarget) {
1549     SkASSERT(renderTarget);
1550     ASSERT_OWNED_RESOURCE(renderTarget);
1551     AutoRestoreEffects are;
1552     AutoCheckFlush acf(this);
1553     GrDrawTarget* target = this->prepareToDraw(NULL, &are, &acf);
1554     if (NULL == target) {
1555         return;
1556     }
1557     target->discard(renderTarget);
1558 }
1559
1560 void GrContext::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
1561                             const SkIPoint& dstPoint, uint32_t pixelOpsFlags) {
1562     if (NULL == src || NULL == dst) {
1563         return;
1564     }
1565     ASSERT_OWNED_RESOURCE(src);
1566     ASSERT_OWNED_RESOURCE(dst);
1567
1568     // Since we're going to the draw target and not GPU, no need to check kNoFlush
1569     // here.
1570
1571     GrDrawTarget* target = this->prepareToDraw(NULL, NULL, NULL);
1572     if (NULL == target) {
1573         return;
1574     }
1575     target->copySurface(dst, src, srcRect, dstPoint);
1576
1577     if (kFlushWrites_PixelOp & pixelOpsFlags) {
1578         this->flush();
1579     }
1580 }
1581
1582 void GrContext::flushSurfaceWrites(GrSurface* surface) {
1583     if (surface->surfacePriv().hasPendingWrite()) {
1584         this->flush();
1585     }
1586 }
1587
1588 ////////////////////////////////////////////////////////////////////////////////
1589
1590 GrDrawTarget* GrContext::prepareToDraw(const GrPaint* paint,
1591                                        AutoRestoreEffects* are,
1592                                        AutoCheckFlush* acf) {
1593     // All users of this draw state should be freeing up all effects when they're done.
1594     // Otherwise effects that own resources may keep those resources alive indefinitely.
1595     SkASSERT(0 == fDrawState->numColorStages() && 0 == fDrawState->numCoverageStages() &&
1596              !fDrawState->hasGeometryProcessor());
1597
1598     if (NULL == fGpu) {
1599         return NULL;
1600     }
1601
1602     ASSERT_OWNED_RESOURCE(fRenderTarget.get());
1603     if (paint) {
1604         SkASSERT(are);
1605         SkASSERT(acf);
1606         are->set(fDrawState);
1607         fDrawState->setFromPaint(*paint, fViewMatrix, fRenderTarget.get());
1608 #if GR_DEBUG_PARTIAL_COVERAGE_CHECK
1609         if ((paint->hasMask() || 0xff != paint->fCoverage) &&
1610             !fDrawState->couldApplyCoverage(fGpu->caps())) {
1611             SkDebugf("Partial pixel coverage will be incorrectly blended.\n");
1612         }
1613 #endif
1614         // Clear any vertex attributes configured for the previous use of the
1615         // GrDrawState which can effect which blend optimizations are in effect.
1616         fDrawState->setDefaultVertexAttribs();
1617     } else {
1618         fDrawState->reset(fViewMatrix);
1619         fDrawState->setRenderTarget(fRenderTarget.get());
1620     }
1621     fDrawState->setState(GrDrawState::kClip_StateBit, fClip &&
1622                                                      !fClip->fClipStack->isWideOpen());
1623     fDrawBuffer->setClip(fClip);
1624     SkASSERT(fDrawState == fDrawBuffer->drawState());
1625     return fDrawBuffer;
1626 }
1627
1628 /*
1629  * This method finds a path renderer that can draw the specified path on
1630  * the provided target.
1631  * Due to its expense, the software path renderer has split out so it can
1632  * can be individually allowed/disallowed via the "allowSW" boolean.
1633  */
1634 GrPathRenderer* GrContext::getPathRenderer(const SkPath& path,
1635                                            const SkStrokeRec& stroke,
1636                                            const GrDrawTarget* target,
1637                                            bool allowSW,
1638                                            GrPathRendererChain::DrawType drawType,
1639                                            GrPathRendererChain::StencilSupport* stencilSupport) {
1640
1641     if (NULL == fPathRendererChain) {
1642         fPathRendererChain = SkNEW_ARGS(GrPathRendererChain, (this));
1643     }
1644
1645     GrPathRenderer* pr = fPathRendererChain->getPathRenderer(path,
1646                                                              stroke,
1647                                                              target,
1648                                                              drawType,
1649                                                              stencilSupport);
1650
1651     if (NULL == pr && allowSW) {
1652         if (NULL == fSoftwarePathRenderer) {
1653             fSoftwarePathRenderer = SkNEW_ARGS(GrSoftwarePathRenderer, (this));
1654         }
1655         pr = fSoftwarePathRenderer;
1656     }
1657
1658     return pr;
1659 }
1660
1661 ////////////////////////////////////////////////////////////////////////////////
1662 bool GrContext::isConfigRenderable(GrPixelConfig config, bool withMSAA) const {
1663     return fGpu->caps()->isConfigRenderable(config, withMSAA);
1664 }
1665
1666 int GrContext::getRecommendedSampleCount(GrPixelConfig config,
1667                                          SkScalar dpi) const {
1668     if (!this->isConfigRenderable(config, true)) {
1669         return 0;
1670     }
1671     int chosenSampleCount = 0;
1672     if (fGpu->caps()->pathRenderingSupport()) {
1673         if (dpi >= 250.0f) {
1674             chosenSampleCount = 4;
1675         } else {
1676             chosenSampleCount = 16;
1677         }
1678     }
1679     return chosenSampleCount <= fGpu->caps()->maxSampleCount() ?
1680         chosenSampleCount : 0;
1681 }
1682
1683 void GrContext::setupDrawBuffer() {
1684     SkASSERT(NULL == fDrawBuffer);
1685     SkASSERT(NULL == fDrawBufferVBAllocPool);
1686     SkASSERT(NULL == fDrawBufferIBAllocPool);
1687
1688     fDrawBufferVBAllocPool =
1689         SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu, false,
1690                                     DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
1691                                     DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS));
1692     fDrawBufferIBAllocPool =
1693         SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu, false,
1694                                    DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
1695                                    DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS));
1696
1697     fDrawBuffer = SkNEW_ARGS(GrInOrderDrawBuffer, (fGpu,
1698                                                    fDrawBufferVBAllocPool,
1699                                                    fDrawBufferIBAllocPool));
1700
1701     fDrawBuffer->setDrawState(fDrawState);
1702 }
1703
1704 GrDrawTarget* GrContext::getTextTarget() {
1705     return this->prepareToDraw(NULL, NULL, NULL);
1706 }
1707
1708 const GrIndexBuffer* GrContext::getQuadIndexBuffer() const {
1709     return fGpu->getQuadIndexBuffer();
1710 }
1711
1712 namespace {
1713 void test_pm_conversions(GrContext* ctx, int* pmToUPMValue, int* upmToPMValue) {
1714     GrConfigConversionEffect::PMConversion pmToUPM;
1715     GrConfigConversionEffect::PMConversion upmToPM;
1716     GrConfigConversionEffect::TestForPreservingPMConversions(ctx, &pmToUPM, &upmToPM);
1717     *pmToUPMValue = pmToUPM;
1718     *upmToPMValue = upmToPM;
1719 }
1720 }
1721
1722 const GrFragmentProcessor* GrContext::createPMToUPMEffect(GrTexture* texture,
1723                                                           bool swapRAndB,
1724                                                           const SkMatrix& matrix) {
1725     if (!fDidTestPMConversions) {
1726         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1727         fDidTestPMConversions = true;
1728     }
1729     GrConfigConversionEffect::PMConversion pmToUPM =
1730         static_cast<GrConfigConversionEffect::PMConversion>(fPMToUPMConversion);
1731     if (GrConfigConversionEffect::kNone_PMConversion != pmToUPM) {
1732         return GrConfigConversionEffect::Create(texture, swapRAndB, pmToUPM, matrix);
1733     } else {
1734         return NULL;
1735     }
1736 }
1737
1738 const GrFragmentProcessor* GrContext::createUPMToPMEffect(GrTexture* texture,
1739                                                           bool swapRAndB,
1740                                                           const SkMatrix& matrix) {
1741     if (!fDidTestPMConversions) {
1742         test_pm_conversions(this, &fPMToUPMConversion, &fUPMToPMConversion);
1743         fDidTestPMConversions = true;
1744     }
1745     GrConfigConversionEffect::PMConversion upmToPM =
1746         static_cast<GrConfigConversionEffect::PMConversion>(fUPMToPMConversion);
1747     if (GrConfigConversionEffect::kNone_PMConversion != upmToPM) {
1748         return GrConfigConversionEffect::Create(texture, swapRAndB, upmToPM, matrix);
1749     } else {
1750         return NULL;
1751     }
1752 }
1753
1754 void GrContext::addResourceToCache(const GrResourceKey& resourceKey, GrGpuResource* resource) {
1755     fResourceCache->addResource(resourceKey, resource);
1756 }
1757
1758 GrGpuResource* GrContext::findAndRefCachedResource(const GrResourceKey& resourceKey) {
1759     GrGpuResource* resource = fResourceCache->find(resourceKey);
1760     SkSafeRef(resource);
1761     return resource;
1762 }
1763
1764 void GrContext::addGpuTraceMarker(const GrGpuTraceMarker* marker) {
1765     fGpu->addGpuTraceMarker(marker);
1766     if (fDrawBuffer) {
1767         fDrawBuffer->addGpuTraceMarker(marker);
1768     }
1769 }
1770
1771 void GrContext::removeGpuTraceMarker(const GrGpuTraceMarker* marker) {
1772     fGpu->removeGpuTraceMarker(marker);
1773     if (fDrawBuffer) {
1774         fDrawBuffer->removeGpuTraceMarker(marker);
1775     }
1776 }
1777
1778 ///////////////////////////////////////////////////////////////////////////////
1779 #if GR_CACHE_STATS
1780 void GrContext::printCacheStats() const {
1781     fResourceCache->printStats();
1782 }
1783 #endif
1784
1785 #if GR_GPU_STATS
1786 const GrContext::GPUStats* GrContext::gpuStats() const {
1787     return fGpu->gpuStats();
1788 }
1789 #endif
1790