// sampling, deltaSrc should equal bpp.
SkASSERT(deltaSrc == bpp);
- // These swizzles trust that the alpha value is already 0xFF.
#ifdef SK_PMCOLOR_IS_RGBA
SkOpts::RGBA_to_BGRA((uint32_t*) dst, src + offset, width);
#else
src += offset;
SkPMColor* SK_RESTRICT dst = (SkPMColor*)dstRow;
for (int x = 0; x < dstWidth; x++) {
- dst[x] = SkPackARGB32(0xFF, src[0], src[1], src[2]);
+ dst[x] = SkPackARGB32NoCheck(0xFF, src[0], src[1], src[2]);
src += deltaSrc;
}
}
+static void fast_swizzle_rgb_to_n32(
+ void* dst, const uint8_t* src, int width, int bpp, int deltaSrc,
+ int offset, const SkPMColor ctable[]) {
+
+ // This function must not be called if we are sampling. If we are not
+ // sampling, deltaSrc should equal bpp.
+ SkASSERT(deltaSrc == bpp);
+#ifdef SK_PMCOLOR_IS_RGBA
+ SkOpts::RGB_to_RGB1((uint32_t*) dst, src + offset, width);
+#else
+ SkOpts::RGB_to_BGR1((uint32_t*) dst, src + offset, width);
+#endif
+}
static void swizzle_rgb_to_565(
void* SK_RESTRICT dstRow, const uint8_t* SK_RESTRICT src, int dstWidth,
// sampling, deltaSrc should equal bpp.
SkASSERT(deltaSrc == bpp);
- // These swizzles trust that the alpha value is already 0xFF.
#ifdef SK_PMCOLOR_IS_RGBA
memcpy(dst, src + offset, width * bpp);
#else
switch (dstInfo.colorType()) {
case kN32_SkColorType:
proc = &swizzle_rgb_to_n32;
+ fastProc = &fast_swizzle_rgb_to_n32;
break;
case kRGB_565_SkColorType:
proc = &swizzle_rgb_to_565;
}
}
+static void RGB_to_RGB1_portable(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t r = src[0],
+ g = src[1],
+ b = src[2];
+ src += 3;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)b << 16
+ | (uint32_t)g << 8
+ | (uint32_t)r << 0;
+ }
+}
+
+static void RGB_to_BGR1_portable(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*)vsrc;
+ for (int i = 0; i < count; i++) {
+ uint8_t r = src[0],
+ g = src[1],
+ b = src[2];
+ src += 3;
+ dst[i] = (uint32_t)0xFF << 24
+ | (uint32_t)r << 16
+ | (uint32_t)g << 8
+ | (uint32_t)b << 0;
+ }
+}
+
#if defined(SK_ARM_HAS_NEON)
// Rounded divide by 255, (x + 127) / 255
auto src = (const uint32_t*)vsrc;
while (count >= 8) {
// Load 8 pixels.
- uint8x8x4_t bgra = vld4_u8((const uint8_t*) src);
+ uint8x8x4_t rgba = vld4_u8((const uint8_t*) src);
- uint8x8_t a = bgra.val[3],
- b = bgra.val[2],
- g = bgra.val[1],
- r = bgra.val[0];
+ uint8x8_t a = rgba.val[3],
+ b = rgba.val[2],
+ g = rgba.val[1],
+ r = rgba.val[0];
// Premultiply.
b = scale(b, a);
// Store 8 premultiplied pixels.
if (kSwapRB) {
- bgra.val[2] = r;
- bgra.val[1] = g;
- bgra.val[0] = b;
+ rgba.val[2] = r;
+ rgba.val[1] = g;
+ rgba.val[0] = b;
} else {
- bgra.val[2] = b;
- bgra.val[1] = g;
- bgra.val[0] = r;
+ rgba.val[2] = b;
+ rgba.val[1] = g;
+ rgba.val[0] = r;
}
- vst4_u8((uint8_t*) dst, bgra);
+ vst4_u8((uint8_t*) dst, rgba);
src += 8;
dst += 8;
count -= 8;
auto src = (const uint32_t*)vsrc;
while (count >= 16) {
// Load 16 pixels.
- uint8x16x4_t bgra = vld4q_u8((const uint8_t*) src);
+ uint8x16x4_t rgba = vld4q_u8((const uint8_t*) src);
// Swap r and b.
- SkTSwap(bgra.val[0], bgra.val[2]);
+ SkTSwap(rgba.val[0], rgba.val[2]);
// Store 16 pixels.
- vst4q_u8((uint8_t*) dst, bgra);
+ vst4q_u8((uint8_t*) dst, rgba);
src += 16;
dst += 16;
count -= 16;
if (count >= 8) {
// Load 8 pixels.
- uint8x8x4_t bgra = vld4_u8((const uint8_t*) src);
+ uint8x8x4_t rgba = vld4_u8((const uint8_t*) src);
// Swap r and b.
- SkTSwap(bgra.val[0], bgra.val[2]);
+ SkTSwap(rgba.val[0], rgba.val[2]);
// Store 8 pixels.
- vst4_u8((uint8_t*) dst, bgra);
+ vst4_u8((uint8_t*) dst, rgba);
src += 8;
dst += 8;
count -= 8;
RGBA_to_BGRA_portable(dst, src, count);
}
+template <bool kSwapRB>
+static void insert_alpha_should_swaprb(uint32_t dst[], const void* vsrc, int count) {
+ const uint8_t* src = (const uint8_t*) vsrc;
+ while (count >= 16) {
+ // Load 16 pixels.
+ uint8x16x3_t rgb = vld3q_u8(src);
+
+ // Insert an opaque alpha channel and swap if needed.
+ uint8x16x4_t rgba;
+ if (kSwapRB) {
+ rgba.val[0] = rgb.val[2];
+ rgba.val[2] = rgb.val[0];
+ } else {
+ rgba.val[0] = rgb.val[0];
+ rgba.val[2] = rgb.val[2];
+ }
+ rgba.val[1] = rgb.val[1];
+ rgba.val[3] = vdupq_n_u8(0xFF);
+
+ // Store 16 pixels.
+ vst4q_u8((uint8_t*) dst, rgba);
+ src += 16*3;
+ dst += 16;
+ count -= 16;
+ }
+
+ if (count >= 8) {
+ // Load 8 pixels.
+ uint8x8x3_t rgb = vld3_u8(src);
+
+ // Insert an opaque alpha channel and swap if needed.
+ uint8x8x4_t rgba;
+ if (kSwapRB) {
+ rgba.val[0] = rgb.val[2];
+ rgba.val[2] = rgb.val[0];
+ } else {
+ rgba.val[0] = rgb.val[0];
+ rgba.val[2] = rgb.val[2];
+ }
+ rgba.val[1] = rgb.val[1];
+ rgba.val[3] = vdup_n_u8(0xFF);
+
+ // Store 8 pixels.
+ vst4_u8((uint8_t*) dst, rgba);
+ src += 8*3;
+ dst += 8;
+ count -= 8;
+ }
+
+ // Call portable code to finish up the tail of [0,8) pixels.
+ auto proc = kSwapRB ? RGB_to_BGR1_portable : RGB_to_RGB1_portable;
+ proc(dst, src, count);
+}
+
+static void RGB_to_RGB1(uint32_t dst[], const void* src, int count) {
+ insert_alpha_should_swaprb<false>(dst, src, count);
+}
+
+static void RGB_to_BGR1(uint32_t dst[], const void* src, int count) {
+ insert_alpha_should_swaprb<true>(dst, src, count);
+}
+
#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
template <bool kSwapRB>
RGBA_to_BGRA_portable(dst, src, count);
}
+static void RGB_to_RGB1(uint32_t dst[], const void* src, int count) {
+ RGB_to_RGB1_portable(dst, src, count);
+}
+
+static void RGB_to_BGR1(uint32_t dst[], const void* src, int count) {
+ RGB_to_BGR1_portable(dst, src, count);
+}
+
#else
static void RGBA_to_rgbA(uint32_t* dst, const void* src, int count) {
RGBA_to_BGRA_portable(dst, src, count);
}
+static void RGB_to_RGB1(uint32_t dst[], const void* src, int count) {
+ RGB_to_RGB1_portable(dst, src, count);
+}
+
+static void RGB_to_BGR1(uint32_t dst[], const void* src, int count) {
+ RGB_to_BGR1_portable(dst, src, count);
+}
+
#endif
}