3 * Copyright 2011 Google Inc.
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
8 #include "SkBitmapProcState.h"
9 #include "SkColorPriv.h"
10 #include "SkFilterProc.h"
12 #include "SkShader.h" // for tilemodes
13 #include "SkUtilsArm.h"
14 #include "SkBitmapScaler.h"
16 #include "SkPixelRef.h"
17 #include "SkScaledImageCache.h"
19 #if !SK_ARM_NEON_IS_NONE
20 // These are defined in src/opts/SkBitmapProcState_arm_neon.cpp
21 extern const SkBitmapProcState::SampleProc16 gSkBitmapProcStateSample16_neon[];
22 extern const SkBitmapProcState::SampleProc32 gSkBitmapProcStateSample32_neon[];
23 extern void S16_D16_filter_DX_neon(const SkBitmapProcState&, const uint32_t*, int, uint16_t*);
24 extern void Clamp_S16_D16_filter_DX_shaderproc_neon(const SkBitmapProcState&, int, int, uint16_t*, int);
25 extern void Repeat_S16_D16_filter_DX_shaderproc_neon(const SkBitmapProcState&, int, int, uint16_t*, int);
26 extern void SI8_opaque_D32_filter_DX_neon(const SkBitmapProcState&, const uint32_t*, int, SkPMColor*);
27 extern void SI8_opaque_D32_filter_DX_shaderproc_neon(const SkBitmapProcState&, int, int, uint32_t*, int);
28 extern void Clamp_SI8_opaque_D32_filter_DX_shaderproc_neon(const SkBitmapProcState&, int, int, uint32_t*, int);
31 #define NAME_WRAP(x) x
32 #include "SkBitmapProcState_filter.h"
33 #include "SkBitmapProcState_procs.h"
35 ///////////////////////////////////////////////////////////////////////////////
37 // true iff the matrix contains, at most, scale and translate elements
38 static bool matrix_only_scale_translate(const SkMatrix& m) {
39 return m.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask);
43 * For the purposes of drawing bitmaps, if a matrix is "almost" translate
44 * go ahead and treat it as if it were, so that subsequent code can go fast.
46 static bool just_trans_clamp(const SkMatrix& matrix, const SkBitmap& bitmap) {
47 SkASSERT(matrix_only_scale_translate(matrix));
49 if (matrix.getType() & SkMatrix::kScale_Mask) {
51 bitmap.getBounds(&src);
53 // Can't call mapRect(), since that will fix up inverted rectangles,
54 // e.g. when scale is negative, and we don't want to return true for
56 matrix.mapPoints(SkTCast<SkPoint*>(&dst),
57 SkTCast<const SkPoint*>(&src),
60 // Now round all 4 edges to device space, and then compare the device
61 // width/height to the original. Note: we must map all 4 and subtract
62 // rather than map the "width" and compare, since we care about the
63 // phase (in pixel space) that any translate in the matrix might impart.
66 return idst.width() == bitmap.width() && idst.height() == bitmap.height();
68 // if we got here, we're either kTranslate_Mask or identity
72 static bool just_trans_general(const SkMatrix& matrix) {
73 SkASSERT(matrix_only_scale_translate(matrix));
75 if (matrix.getType() & SkMatrix::kScale_Mask) {
76 const SkScalar tol = SK_Scalar1 / 32768;
78 if (!SkScalarNearlyZero(matrix[SkMatrix::kMScaleX] - SK_Scalar1, tol)) {
81 if (!SkScalarNearlyZero(matrix[SkMatrix::kMScaleY] - SK_Scalar1, tol)) {
85 // if we got here, treat us as either kTranslate_Mask or identity
89 ///////////////////////////////////////////////////////////////////////////////
91 static bool valid_for_filtering(unsigned dimension) {
92 // for filtering, width and height must fit in 14bits, since we use steal
93 // 2 bits from each to store our 4bit subpixel data
94 return (dimension & ~0x3FFF) == 0;
97 static SkScalar effective_matrix_scale_sqrd(const SkMatrix& mat) {
100 v1.fX = mat.getScaleX();
101 v1.fY = mat.getSkewY();
103 v2.fX = mat.getSkewX();
104 v2.fY = mat.getScaleY();
106 return SkMaxScalar(v1.lengthSqd(), v2.lengthSqd());
109 class AutoScaledCacheUnlocker {
111 AutoScaledCacheUnlocker(SkScaledImageCache::ID** idPtr) : fIDPtr(idPtr) {}
112 ~AutoScaledCacheUnlocker() {
113 if (fIDPtr && *fIDPtr) {
114 SkScaledImageCache::Unlock(*fIDPtr);
119 // forgets the ID, so it won't call Unlock
125 SkScaledImageCache::ID** fIDPtr;
127 #define AutoScaledCacheUnlocker(...) SK_REQUIRE_LOCAL_VAR(AutoScaledCacheUnlocker)
129 // TODO -- we may want to pass the clip into this function so we only scale
130 // the portion of the image that we're going to need. This will complicate
131 // the interface to the cache, but might be well worth it.
133 bool SkBitmapProcState::possiblyScaleImage() {
134 AutoScaledCacheUnlocker unlocker(&fScaledCacheID);
136 SkASSERT(NULL == fBitmap);
137 SkASSERT(NULL == fScaledCacheID);
139 if (fFilterLevel <= SkPaint::kLow_FilterLevel) {
143 // Check to see if the transformation matrix is simple, and if we're
144 // doing high quality scaling. If so, do the bitmap scale here and
145 // remove the scaling component from the matrix.
147 if (SkPaint::kHigh_FilterLevel == fFilterLevel &&
148 fInvMatrix.getType() <= (SkMatrix::kScale_Mask | SkMatrix::kTranslate_Mask) &&
149 fOrigBitmap.config() == SkBitmap::kARGB_8888_Config) {
151 SkScalar invScaleX = fInvMatrix.getScaleX();
152 SkScalar invScaleY = fInvMatrix.getScaleY();
154 fScaledCacheID = SkScaledImageCache::FindAndLock(fOrigBitmap,
155 invScaleX, invScaleY,
157 if (fScaledCacheID) {
158 fScaledBitmap.lockPixels();
159 if (!fScaledBitmap.getPixels()) {
160 fScaledBitmap.unlockPixels();
161 // found a purged entry (discardablememory?), release it
162 SkScaledImageCache::Unlock(fScaledCacheID);
163 fScaledCacheID = NULL;
164 // fall through to rebuild
168 if (NULL == fScaledCacheID) {
169 int dest_width = SkScalarCeilToInt(fOrigBitmap.width() / invScaleX);
170 int dest_height = SkScalarCeilToInt(fOrigBitmap.height() / invScaleY);
172 // All the criteria are met; let's make a new bitmap.
174 SkConvolutionProcs simd;
175 sk_bzero(&simd, sizeof(simd));
176 this->platformConvolutionProcs(&simd);
178 if (!SkBitmapScaler::Resize(&fScaledBitmap,
180 SkBitmapScaler::RESIZE_BEST,
184 SkScaledImageCache::GetAllocator())) {
185 // we failed to create fScaledBitmap, so just return and let
186 // the scanline proc handle it.
190 SkASSERT(NULL != fScaledBitmap.getPixels());
191 fScaledCacheID = SkScaledImageCache::AddAndLock(fOrigBitmap,
195 if (!fScaledCacheID) {
196 fScaledBitmap.reset();
199 SkASSERT(NULL != fScaledBitmap.getPixels());
202 SkASSERT(NULL != fScaledBitmap.getPixels());
203 fBitmap = &fScaledBitmap;
205 // set the inv matrix type to translate-only;
206 fInvMatrix.setTranslate(fInvMatrix.getTranslateX() / fInvMatrix.getScaleX(),
207 fInvMatrix.getTranslateY() / fInvMatrix.getScaleY());
209 // no need for any further filtering; we just did it!
210 fFilterLevel = SkPaint::kNone_FilterLevel;
216 * If High, then our special-case for scale-only did not take, and so we
217 * have to make a choice:
218 * 1. fall back on mipmaps + bilerp
219 * 2. fall back on scanline bicubic filter
220 * For now, we compute the "scale" value from the matrix, and have a
221 * threshold to decide when bicubic is better, and when mips are better.
222 * No doubt a fancier decision tree could be used uere.
224 * If Medium, then we just try to build a mipmap and select a level,
225 * setting the filter-level to kLow to signal that we just need bilerp
226 * to process the selected level.
229 SkScalar scaleSqd = effective_matrix_scale_sqrd(fInvMatrix);
231 if (SkPaint::kHigh_FilterLevel == fFilterLevel) {
232 // Set the limit at 0.25 for the CTM... if the CTM is scaling smaller
233 // than this, then the mipmaps quality may be greater (certainly faster)
234 // so we only keep High quality if the scale is greater than this.
236 // Since we're dealing with the inverse, we compare against its inverse.
237 const SkScalar bicubicLimit = 4.0f;
238 const SkScalar bicubicLimitSqd = bicubicLimit * bicubicLimit;
239 if (scaleSqd < bicubicLimitSqd) { // use bicubic scanline
243 // else set the filter-level to Medium, since we're scaling down and
244 // want to reqeust mipmaps
245 fFilterLevel = SkPaint::kMedium_FilterLevel;
248 SkASSERT(SkPaint::kMedium_FilterLevel == fFilterLevel);
251 * Medium quality means use a mipmap for down-scaling, and just bilper
252 * for upscaling. Since we're examining the inverse matrix, we look for
253 * a scale > 1 to indicate down scaling by the CTM.
255 if (scaleSqd > SK_Scalar1) {
256 const SkMipMap* mip = NULL;
258 SkASSERT(NULL == fScaledCacheID);
259 fScaledCacheID = SkScaledImageCache::FindAndLockMip(fOrigBitmap, &mip);
260 if (!fScaledCacheID) {
261 SkASSERT(NULL == mip);
262 mip = SkMipMap::Build(fOrigBitmap);
264 fScaledCacheID = SkScaledImageCache::AddAndLockMip(fOrigBitmap,
266 SkASSERT(mip->getRefCnt() > 1);
267 mip->unref(); // the cache took a ref
268 SkASSERT(fScaledCacheID);
275 SkScalar levelScale = SkScalarInvert(SkScalarSqrt(scaleSqd));
276 SkMipMap::Level level;
277 if (mip->extractLevel(levelScale, &level)) {
278 SkScalar invScaleFixup = level.fScale;
279 fInvMatrix.postScale(invScaleFixup, invScaleFixup);
281 fScaledBitmap.setConfig(fOrigBitmap.config(),
282 level.fWidth, level.fHeight,
284 fScaledBitmap.setPixels(level.fPixels);
285 fBitmap = &fScaledBitmap;
286 fFilterLevel = SkPaint::kLow_FilterLevel;
296 static bool get_locked_pixels(const SkBitmap& src, int pow2, SkBitmap* dst) {
297 SkPixelRef* pr = src.pixelRef();
298 if (pr && pr->decodeInto(pow2, dst)) {
303 * If decodeInto() fails, it is possibe that we have an old subclass that
304 * does not, or cannot, implement that. In that case we fall back to the
305 * older protocol of having the pixelRef handle the caching for us.
309 return SkToBool(dst->getPixels());
312 bool SkBitmapProcState::lockBaseBitmap() {
313 AutoScaledCacheUnlocker unlocker(&fScaledCacheID);
315 SkPixelRef* pr = fOrigBitmap.pixelRef();
317 SkASSERT(NULL == fScaledCacheID);
319 if (pr->isLocked() || !pr->implementsDecodeInto()) {
320 // fast-case, no need to look in our cache
321 fScaledBitmap = fOrigBitmap;
322 fScaledBitmap.lockPixels();
323 if (NULL == fScaledBitmap.getPixels()) {
327 fScaledCacheID = SkScaledImageCache::FindAndLock(fOrigBitmap,
328 SK_Scalar1, SK_Scalar1,
330 if (fScaledCacheID) {
331 fScaledBitmap.lockPixels();
332 if (!fScaledBitmap.getPixels()) {
333 fScaledBitmap.unlockPixels();
334 // found a purged entry (discardablememory?), release it
335 SkScaledImageCache::Unlock(fScaledCacheID);
336 fScaledCacheID = NULL;
337 // fall through to rebuild
341 if (NULL == fScaledCacheID) {
342 if (!get_locked_pixels(fOrigBitmap, 0, &fScaledBitmap)) {
346 // TODO: if fScaled comes back at a different width/height than fOrig,
347 // we need to update the matrix we are using to sample from this guy.
349 fScaledCacheID = SkScaledImageCache::AddAndLock(fOrigBitmap,
350 SK_Scalar1, SK_Scalar1,
352 if (!fScaledCacheID) {
353 fScaledBitmap.reset();
358 fBitmap = &fScaledBitmap;
363 SkBitmapProcState::~SkBitmapProcState() {
364 if (fScaledCacheID) {
365 SkScaledImageCache::Unlock(fScaledCacheID);
367 SkDELETE(fBitmapFilter);
370 bool SkBitmapProcState::chooseProcs(const SkMatrix& inv, const SkPaint& paint) {
371 SkASSERT(fOrigBitmap.width() && fOrigBitmap.height());
375 fFilterLevel = paint.getFilterLevel();
377 SkASSERT(NULL == fScaledCacheID);
379 // possiblyScaleImage will look to see if it can rescale the image as a
380 // preprocess; either by scaling up to the target size, or by selecting
381 // a nearby mipmap level. If it does, it will adjust the working
382 // matrix as well as the working bitmap. It may also adjust the filter
383 // quality to avoid re-filtering an already perfectly scaled image.
384 if (!this->possiblyScaleImage()) {
385 if (!this->lockBaseBitmap()) {
389 // The above logic should have always assigned fBitmap, but in case it
390 // didn't, we check for that now...
391 // TODO(dominikg): Ask humper@ if we can just use an SkASSERT(fBitmap)?
392 if (NULL == fBitmap) {
396 // If we are "still" kMedium_FilterLevel, then the request was not fulfilled by possiblyScale,
397 // so we downgrade to kLow (so the rest of the sniffing code can assume that)
398 if (SkPaint::kMedium_FilterLevel == fFilterLevel) {
399 fFilterLevel = SkPaint::kLow_FilterLevel;
402 bool trivialMatrix = (fInvMatrix.getType() & ~SkMatrix::kTranslate_Mask) == 0;
403 bool clampClamp = SkShader::kClamp_TileMode == fTileModeX &&
404 SkShader::kClamp_TileMode == fTileModeY;
406 if (!(clampClamp || trivialMatrix)) {
407 fInvMatrix.postIDiv(fOrigBitmap.width(), fOrigBitmap.height());
410 // Now that all possible changes to the matrix have taken place, check
411 // to see if we're really close to a no-scale matrix. If so, explicitly
412 // set it to be so. Subsequent code may inspect this matrix to choose
413 // a faster path in this case.
415 // This code will only execute if the matrix has some scale component;
416 // if it's already pure translate then we won't do this inversion.
418 if (matrix_only_scale_translate(fInvMatrix)) {
420 if (fInvMatrix.invert(&forward)) {
421 if (clampClamp ? just_trans_clamp(forward, *fBitmap)
422 : just_trans_general(forward)) {
423 SkScalar tx = -SkScalarRoundToScalar(forward.getTranslateX());
424 SkScalar ty = -SkScalarRoundToScalar(forward.getTranslateY());
425 fInvMatrix.setTranslate(tx, ty);
430 fInvProc = fInvMatrix.getMapXYProc();
431 fInvType = fInvMatrix.getType();
432 fInvSx = SkScalarToFixed(fInvMatrix.getScaleX());
433 fInvSxFractionalInt = SkScalarToFractionalInt(fInvMatrix.getScaleX());
434 fInvKy = SkScalarToFixed(fInvMatrix.getSkewY());
435 fInvKyFractionalInt = SkScalarToFractionalInt(fInvMatrix.getSkewY());
437 fAlphaScale = SkAlpha255To256(paint.getAlpha());
439 fShaderProc32 = NULL;
440 fShaderProc16 = NULL;
441 fSampleProc32 = NULL;
442 fSampleProc16 = NULL;
444 // recompute the triviality of the matrix here because we may have
447 trivialMatrix = (fInvMatrix.getType() & ~SkMatrix::kTranslate_Mask) == 0;
449 if (SkPaint::kHigh_FilterLevel == fFilterLevel) {
450 // If this is still set, that means we wanted HQ sampling
451 // but couldn't do it as a preprocess. Let's try to install
452 // the scanline version of the HQ sampler. If that process fails,
453 // downgrade to bilerp.
455 // NOTE: Might need to be careful here in the future when we want
456 // to have the platform proc have a shot at this; it's possible that
457 // the chooseBitmapFilterProc will fail to install a shader but a
458 // platform-specific one might succeed, so it might be premature here
459 // to fall back to bilerp. This needs thought.
461 if (!this->setBitmapFilterProcs()) {
462 fFilterLevel = SkPaint::kLow_FilterLevel;
466 if (SkPaint::kLow_FilterLevel == fFilterLevel) {
467 // Only try bilerp if the matrix is "interesting" and
468 // the image has a suitable size.
470 if (fInvType <= SkMatrix::kTranslate_Mask ||
471 !valid_for_filtering(fBitmap->width() | fBitmap->height())) {
472 fFilterLevel = SkPaint::kNone_FilterLevel;
476 // At this point, we know exactly what kind of sampling the per-scanline
477 // shader will perform.
479 fMatrixProc = this->chooseMatrixProc(trivialMatrix);
480 // TODO(dominikg): SkASSERT(fMatrixProc) instead? chooseMatrixProc never returns NULL.
481 if (NULL == fMatrixProc) {
485 ///////////////////////////////////////////////////////////////////////
487 // No need to do this if we're doing HQ sampling; if filter quality is
488 // still set to HQ by the time we get here, then we must have installed
489 // the shader procs above and can skip all this.
491 if (fFilterLevel < SkPaint::kHigh_FilterLevel) {
494 if (fAlphaScale < 256) { // note: this distinction is not used for D16
497 if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
500 if (fFilterLevel > SkPaint::kNone_FilterLevel) {
503 // bits 3,4,5 encoding the source bitmap format
504 switch (fBitmap->config()) {
505 case SkBitmap::kARGB_8888_Config:
508 case SkBitmap::kRGB_565_Config:
511 case SkBitmap::kIndex8_Config:
514 case SkBitmap::kARGB_4444_Config:
517 case SkBitmap::kA8_Config:
519 fPaintPMColor = SkPreMultiplyColor(paint.getColor());
522 // TODO(dominikg): Should we ever get here? SkASSERT(false) instead?
526 #if !SK_ARM_NEON_IS_ALWAYS
527 static const SampleProc32 gSkBitmapProcStateSample32[] = {
528 S32_opaque_D32_nofilter_DXDY,
529 S32_alpha_D32_nofilter_DXDY,
530 S32_opaque_D32_nofilter_DX,
531 S32_alpha_D32_nofilter_DX,
532 S32_opaque_D32_filter_DXDY,
533 S32_alpha_D32_filter_DXDY,
534 S32_opaque_D32_filter_DX,
535 S32_alpha_D32_filter_DX,
537 S16_opaque_D32_nofilter_DXDY,
538 S16_alpha_D32_nofilter_DXDY,
539 S16_opaque_D32_nofilter_DX,
540 S16_alpha_D32_nofilter_DX,
541 S16_opaque_D32_filter_DXDY,
542 S16_alpha_D32_filter_DXDY,
543 S16_opaque_D32_filter_DX,
544 S16_alpha_D32_filter_DX,
546 SI8_opaque_D32_nofilter_DXDY,
547 SI8_alpha_D32_nofilter_DXDY,
548 SI8_opaque_D32_nofilter_DX,
549 SI8_alpha_D32_nofilter_DX,
550 SI8_opaque_D32_filter_DXDY,
551 SI8_alpha_D32_filter_DXDY,
552 SI8_opaque_D32_filter_DX,
553 SI8_alpha_D32_filter_DX,
555 S4444_opaque_D32_nofilter_DXDY,
556 S4444_alpha_D32_nofilter_DXDY,
557 S4444_opaque_D32_nofilter_DX,
558 S4444_alpha_D32_nofilter_DX,
559 S4444_opaque_D32_filter_DXDY,
560 S4444_alpha_D32_filter_DXDY,
561 S4444_opaque_D32_filter_DX,
562 S4444_alpha_D32_filter_DX,
564 // A8 treats alpha/opaque the same (equally efficient)
565 SA8_alpha_D32_nofilter_DXDY,
566 SA8_alpha_D32_nofilter_DXDY,
567 SA8_alpha_D32_nofilter_DX,
568 SA8_alpha_D32_nofilter_DX,
569 SA8_alpha_D32_filter_DXDY,
570 SA8_alpha_D32_filter_DXDY,
571 SA8_alpha_D32_filter_DX,
572 SA8_alpha_D32_filter_DX
575 static const SampleProc16 gSkBitmapProcStateSample16[] = {
576 S32_D16_nofilter_DXDY,
581 S16_D16_nofilter_DXDY,
586 SI8_D16_nofilter_DXDY,
591 // Don't support 4444 -> 565
592 NULL, NULL, NULL, NULL,
593 // Don't support A8 -> 565
594 NULL, NULL, NULL, NULL
598 fSampleProc32 = SK_ARM_NEON_WRAP(gSkBitmapProcStateSample32)[index];
599 index >>= 1; // shift away any opaque/alpha distinction
600 fSampleProc16 = SK_ARM_NEON_WRAP(gSkBitmapProcStateSample16)[index];
602 // our special-case shaderprocs
603 if (SK_ARM_NEON_WRAP(S16_D16_filter_DX) == fSampleProc16) {
605 fShaderProc16 = SK_ARM_NEON_WRAP(Clamp_S16_D16_filter_DX_shaderproc);
606 } else if (SkShader::kRepeat_TileMode == fTileModeX &&
607 SkShader::kRepeat_TileMode == fTileModeY) {
608 fShaderProc16 = SK_ARM_NEON_WRAP(Repeat_S16_D16_filter_DX_shaderproc);
610 } else if (SK_ARM_NEON_WRAP(SI8_opaque_D32_filter_DX) == fSampleProc32 && clampClamp) {
611 fShaderProc32 = SK_ARM_NEON_WRAP(Clamp_SI8_opaque_D32_filter_DX_shaderproc);
614 if (NULL == fShaderProc32) {
615 fShaderProc32 = this->chooseShaderProc32();
619 // see if our platform has any accelerated overrides
620 this->platformProcs();
625 static void Clamp_S32_D32_nofilter_trans_shaderproc(const SkBitmapProcState& s,
627 SkPMColor* SK_RESTRICT colors,
629 SkASSERT(((s.fInvType & ~SkMatrix::kTranslate_Mask)) == 0);
630 SkASSERT(s.fInvKy == 0);
631 SkASSERT(count > 0 && colors != NULL);
632 SkASSERT(SkPaint::kNone_FilterLevel == s.fFilterLevel);
634 const int maxX = s.fBitmap->width() - 1;
635 const int maxY = s.fBitmap->height() - 1;
636 int ix = s.fFilterOneX + x;
637 int iy = SkClampMax(s.fFilterOneY + y, maxY);
641 s.fInvProc(s.fInvMatrix, SkIntToScalar(x) + SK_ScalarHalf,
642 SkIntToScalar(y) + SK_ScalarHalf, &pt);
643 int iy2 = SkClampMax(SkScalarFloorToInt(pt.fY), maxY);
644 int ix2 = SkScalarFloorToInt(pt.fX);
650 const SkPMColor* row = s.fBitmap->getAddr32(0, iy);
654 int n = SkMin32(-ix, count);
655 sk_memset32(colors, row[0], n);
666 int n = SkMin32(maxX - ix + 1, count);
667 memcpy(colors, row + ix, n * sizeof(SkPMColor));
675 // clamp to the right
676 sk_memset32(colors, row[maxX], count);
679 static inline int sk_int_mod(int x, int n) {
681 if ((unsigned)x >= (unsigned)n) {
691 static inline int sk_int_mirror(int x, int n) {
692 x = sk_int_mod(x, 2 * n);
699 static void Repeat_S32_D32_nofilter_trans_shaderproc(const SkBitmapProcState& s,
701 SkPMColor* SK_RESTRICT colors,
703 SkASSERT(((s.fInvType & ~SkMatrix::kTranslate_Mask)) == 0);
704 SkASSERT(s.fInvKy == 0);
705 SkASSERT(count > 0 && colors != NULL);
706 SkASSERT(SkPaint::kNone_FilterLevel == s.fFilterLevel);
708 const int stopX = s.fBitmap->width();
709 const int stopY = s.fBitmap->height();
710 int ix = s.fFilterOneX + x;
711 int iy = sk_int_mod(s.fFilterOneY + y, stopY);
715 s.fInvProc(s.fInvMatrix, SkIntToScalar(x) + SK_ScalarHalf,
716 SkIntToScalar(y) + SK_ScalarHalf, &pt);
717 int iy2 = sk_int_mod(SkScalarFloorToInt(pt.fY), stopY);
718 int ix2 = SkScalarFloorToInt(pt.fX);
724 const SkPMColor* row = s.fBitmap->getAddr32(0, iy);
726 ix = sk_int_mod(ix, stopX);
728 int n = SkMin32(stopX - ix, count);
729 memcpy(colors, row + ix, n * sizeof(SkPMColor));
739 static void S32_D32_constX_shaderproc(const SkBitmapProcState& s,
741 SkPMColor* SK_RESTRICT colors,
743 SkASSERT((s.fInvType & ~(SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) == 0);
744 SkASSERT(s.fInvKy == 0);
745 SkASSERT(count > 0 && colors != NULL);
746 SkASSERT(1 == s.fBitmap->width());
749 int iY1 SK_INIT_TO_AVOID_WARNING;
750 int iSubY SK_INIT_TO_AVOID_WARNING;
752 if (SkPaint::kNone_FilterLevel != s.fFilterLevel) {
753 SkBitmapProcState::MatrixProc mproc = s.getMatrixProc();
756 mproc(s, xy, 1, x, y);
759 iY1 = xy[0] & 0x3FFF;
760 iSubY = (xy[0] >> 14) & 0xF;
764 if (s.fInvType > SkMatrix::kTranslate_Mask) {
766 s.fInvProc(s.fInvMatrix,
767 SkIntToScalar(x) + SK_ScalarHalf,
768 SkIntToScalar(y) + SK_ScalarHalf,
770 // When the matrix has a scale component the setup code in
771 // chooseProcs multiples the inverse matrix by the inverse of the
772 // bitmap's width and height. Since this method is going to do
773 // its own tiling and sampling we need to undo that here.
774 if (SkShader::kClamp_TileMode != s.fTileModeX ||
775 SkShader::kClamp_TileMode != s.fTileModeY) {
776 yTemp = SkScalarFloorToInt(pt.fY * s.fBitmap->height());
778 yTemp = SkScalarFloorToInt(pt.fY);
781 yTemp = s.fFilterOneY + y;
784 const int stopY = s.fBitmap->height();
785 switch (s.fTileModeY) {
786 case SkShader::kClamp_TileMode:
787 iY0 = SkClampMax(yTemp, stopY-1);
789 case SkShader::kRepeat_TileMode:
790 iY0 = sk_int_mod(yTemp, stopY);
792 case SkShader::kMirror_TileMode:
794 iY0 = sk_int_mirror(yTemp, stopY);
801 s.fInvProc(s.fInvMatrix,
802 SkIntToScalar(x) + SK_ScalarHalf,
803 SkIntToScalar(y) + SK_ScalarHalf,
805 if (s.fInvType > SkMatrix::kTranslate_Mask &&
806 (SkShader::kClamp_TileMode != s.fTileModeX ||
807 SkShader::kClamp_TileMode != s.fTileModeY)) {
808 pt.fY *= s.fBitmap->height();
812 switch (s.fTileModeY) {
813 case SkShader::kClamp_TileMode:
814 iY2 = SkClampMax(SkScalarFloorToInt(pt.fY), stopY-1);
816 case SkShader::kRepeat_TileMode:
817 iY2 = sk_int_mod(SkScalarFloorToInt(pt.fY), stopY);
819 case SkShader::kMirror_TileMode:
821 iY2 = sk_int_mirror(SkScalarFloorToInt(pt.fY), stopY);
825 SkASSERT(iY0 == iY2);
830 const SkPMColor* row0 = s.fBitmap->getAddr32(0, iY0);
833 if (SkPaint::kNone_FilterLevel != s.fFilterLevel) {
834 const SkPMColor* row1 = s.fBitmap->getAddr32(0, iY1);
836 if (s.fAlphaScale < 256) {
837 Filter_32_alpha(iSubY, *row0, *row1, &color, s.fAlphaScale);
839 Filter_32_opaque(iSubY, *row0, *row1, &color);
842 if (s.fAlphaScale < 256) {
843 color = SkAlphaMulQ(*row0, s.fAlphaScale);
849 sk_memset32(colors, color, count);
852 static void DoNothing_shaderproc(const SkBitmapProcState&, int x, int y,
853 SkPMColor* SK_RESTRICT colors, int count) {
854 // if we get called, the matrix is too tricky, so we just draw nothing
855 sk_memset32(colors, 0, count);
858 bool SkBitmapProcState::setupForTranslate() {
860 fInvProc(fInvMatrix, SK_ScalarHalf, SK_ScalarHalf, &pt);
863 * if the translate is larger than our ints, we can get random results, or
864 * worse, we might get 0x80000000, which wreaks havoc on us, since we can't
867 const SkScalar too_big = SkIntToScalar(1 << 30);
868 if (SkScalarAbs(pt.fX) > too_big || SkScalarAbs(pt.fY) > too_big) {
872 // Since we know we're not filtered, we re-purpose these fields allow
873 // us to go from device -> src coordinates w/ just an integer add,
874 // rather than running through the inverse-matrix
875 fFilterOneX = SkScalarFloorToInt(pt.fX);
876 fFilterOneY = SkScalarFloorToInt(pt.fY);
880 SkBitmapProcState::ShaderProc32 SkBitmapProcState::chooseShaderProc32() {
882 if (SkBitmap::kARGB_8888_Config != fBitmap->config()) {
886 static const unsigned kMask = SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask;
888 if (1 == fBitmap->width() && 0 == (fInvType & ~kMask)) {
889 if (SkPaint::kNone_FilterLevel == fFilterLevel &&
890 fInvType <= SkMatrix::kTranslate_Mask &&
891 !this->setupForTranslate()) {
892 return DoNothing_shaderproc;
894 return S32_D32_constX_shaderproc;
897 if (fAlphaScale < 256) {
900 if (fInvType > SkMatrix::kTranslate_Mask) {
903 if (SkPaint::kNone_FilterLevel != fFilterLevel) {
907 SkShader::TileMode tx = (SkShader::TileMode)fTileModeX;
908 SkShader::TileMode ty = (SkShader::TileMode)fTileModeY;
910 if (SkShader::kClamp_TileMode == tx && SkShader::kClamp_TileMode == ty) {
911 if (this->setupForTranslate()) {
912 return Clamp_S32_D32_nofilter_trans_shaderproc;
914 return DoNothing_shaderproc;
916 if (SkShader::kRepeat_TileMode == tx && SkShader::kRepeat_TileMode == ty) {
917 if (this->setupForTranslate()) {
918 return Repeat_S32_D32_nofilter_trans_shaderproc;
920 return DoNothing_shaderproc;
925 ///////////////////////////////////////////////////////////////////////////////
929 static void check_scale_nofilter(uint32_t bitmapXY[], int count,
930 unsigned mx, unsigned my) {
931 unsigned y = *bitmapXY++;
934 const uint16_t* xptr = reinterpret_cast<const uint16_t*>(bitmapXY);
935 for (int i = 0; i < count; ++i) {
936 SkASSERT(xptr[i] < mx);
940 static void check_scale_filter(uint32_t bitmapXY[], int count,
941 unsigned mx, unsigned my) {
942 uint32_t YY = *bitmapXY++;
943 unsigned y0 = YY >> 18;
944 unsigned y1 = YY & 0x3FFF;
948 for (int i = 0; i < count; ++i) {
949 uint32_t XX = bitmapXY[i];
950 unsigned x0 = XX >> 18;
951 unsigned x1 = XX & 0x3FFF;
957 static void check_affine_nofilter(uint32_t bitmapXY[], int count,
958 unsigned mx, unsigned my) {
959 for (int i = 0; i < count; ++i) {
960 uint32_t XY = bitmapXY[i];
961 unsigned x = XY & 0xFFFF;
962 unsigned y = XY >> 16;
968 static void check_affine_filter(uint32_t bitmapXY[], int count,
969 unsigned mx, unsigned my) {
970 for (int i = 0; i < count; ++i) {
971 uint32_t YY = *bitmapXY++;
972 unsigned y0 = YY >> 18;
973 unsigned y1 = YY & 0x3FFF;
977 uint32_t XX = *bitmapXY++;
978 unsigned x0 = XX >> 18;
979 unsigned x1 = XX & 0x3FFF;
985 void SkBitmapProcState::DebugMatrixProc(const SkBitmapProcState& state,
986 uint32_t bitmapXY[], int count,
991 state.fMatrixProc(state, bitmapXY, count, x, y);
993 void (*proc)(uint32_t bitmapXY[], int count, unsigned mx, unsigned my);
995 // There are four formats possible:
997 // filter -vs- nofilter
998 if (state.fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
999 proc = state.fFilterLevel != SkPaint::kNone_FilterLevel ? check_scale_filter : check_scale_nofilter;
1001 proc = state.fFilterLevel != SkPaint::kNone_FilterLevel ? check_affine_filter : check_affine_nofilter;
1003 proc(bitmapXY, count, state.fBitmap->width(), state.fBitmap->height());
1006 SkBitmapProcState::MatrixProc SkBitmapProcState::getMatrixProc() const {
1007 return DebugMatrixProc;
1012 ///////////////////////////////////////////////////////////////////////////////
1014 The storage requirements for the different matrix procs are as follows,
1015 where each X or Y is 2 bytes, and N is the number of pixels/elements:
1017 scale/translate nofilter Y(4bytes) + N * X
1018 affine/perspective nofilter N * (X Y)
1019 scale/translate filter Y Y + N * (X X)
1020 affine/perspective filter N * (Y Y X X)
1022 int SkBitmapProcState::maxCountForBufferSize(size_t bufferSize) const {
1023 int32_t size = static_cast<int32_t>(bufferSize);
1025 size &= ~3; // only care about 4-byte aligned chunks
1026 if (fInvType <= (SkMatrix::kTranslate_Mask | SkMatrix::kScale_Mask)) {
1027 size -= 4; // the shared Y (or YY) coordinate
1036 if (fFilterLevel != SkPaint::kNone_FilterLevel) {