2 * Copyright (c) 2021 Samsung Electronics Co., Ltd. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in all
12 * copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 #ifdef THORVG_AVX_VECTOR_SUPPORT
25 #include <immintrin.h>
27 #define N_32BITS_IN_128REG 4
28 #define N_32BITS_IN_256REG 8
30 static inline __m128i ALPHA_BLEND(__m128i c, __m128i a)
32 //1. set the masks for the A/G and R/B channels
33 auto AG = _mm_set1_epi32(0xff00ff00);
34 auto RB = _mm_set1_epi32(0x00ff00ff);
36 //2. mask the alpha vector - originally quartet [a, a, a, a]
37 auto aAG = _mm_and_si128(a, AG);
38 auto aRB = _mm_and_si128(a, RB);
40 //3. calculate the alpha blending of the 2nd and 4th channel
41 //- mask the color vector
42 //- multiply it by the masked alpha vector
43 //- add the correction to compensate bit shifting used instead of dividing by 255
44 //- shift bits - corresponding to division by 256
45 auto even = _mm_and_si128(c, RB);
46 even = _mm_mullo_epi16(even, aRB);
47 even =_mm_add_epi16(even, RB);
48 even = _mm_srli_epi16(even, 8);
50 //4. calculate the alpha blending of the 1st and 3rd channel:
51 //- mask the color vector
52 //- multiply it by the corresponding masked alpha vector and store the high bits of the result
53 //- add the correction to compensate division by 256 instead of by 255 (next step)
54 //- remove the low 8 bits to mimic the division by 256
55 auto odd = _mm_and_si128(c, AG);
56 odd = _mm_mulhi_epu16(odd, aAG);
57 odd = _mm_add_epi16(odd, RB);
58 odd = _mm_and_si128(odd, AG);
61 return _mm_or_si128(odd, even);
65 static void avxRasterRGBA32(uint32_t *dst, uint32_t val, uint32_t offset, int32_t len)
67 //1. calculate how many iterations we need to cover the length
68 uint32_t iterations = len / N_32BITS_IN_256REG;
69 uint32_t avxFilled = iterations * N_32BITS_IN_256REG;
71 //2. set the beginning of the array
75 for (uint32_t i = 0; i < iterations; ++i, dst += N_32BITS_IN_256REG) {
76 _mm256_storeu_si256((__m256i*)dst, _mm256_set1_epi32(val));
79 //4. fill leftovers (in the first step we have to set the pointer to the place where the avx job is done)
80 int32_t leftovers = len - avxFilled;
81 while (leftovers--) *dst++ = val;
85 static bool avxRasterTranslucentRect(SwSurface* surface, const SwBBox& region, uint32_t color)
87 auto buffer = surface->buffer + (region.min.y * surface->stride) + region.min.x;
88 auto h = static_cast<uint32_t>(region.max.y - region.min.y);
89 auto w = static_cast<uint32_t>(region.max.x - region.min.x);
91 auto ialpha = 255 - static_cast<uint8_t>(surface->blender.alpha(color));
93 auto avxColor = _mm_set1_epi32(color);
94 auto avxIalpha = _mm_set1_epi8(ialpha);
96 for (uint32_t y = 0; y < h; ++y) {
97 auto dst = &buffer[y * surface->stride];
99 //1. fill the not aligned memory (for 128-bit registers a 16-bytes alignment is required)
100 auto notAligned = ((uintptr_t)dst & 0xf) / 4;
102 notAligned = (N_32BITS_IN_128REG - notAligned > w ? w : N_32BITS_IN_128REG - notAligned);
103 for (uint32_t x = 0; x < notAligned; ++x, ++dst) {
104 *dst = color + ALPHA_BLEND(*dst, ialpha);
108 //2. fill the aligned memory - N_32BITS_IN_128REG pixels processed at once
109 uint32_t iterations = (w - notAligned) / N_32BITS_IN_128REG;
110 uint32_t avxFilled = iterations * N_32BITS_IN_128REG;
111 auto avxDst = (__m128i*)dst;
112 for (uint32_t x = 0; x < iterations; ++x, ++avxDst) {
113 *avxDst = _mm_add_epi32(avxColor, ALPHA_BLEND(*avxDst, avxIalpha));
116 //3. fill the remaining pixels
117 int32_t leftovers = w - notAligned - avxFilled;
119 while (leftovers--) {
120 *dst = color + ALPHA_BLEND(*dst, ialpha);
128 static bool avxRasterTranslucentRle(SwSurface* surface, const SwRleData* rle, uint32_t color)
130 auto span = rle->spans;
133 for (uint32_t i = 0; i < rle->size; ++i) {
134 auto dst = &surface->buffer[span->y * surface->stride + span->x];
136 if (span->coverage < 255) src = ALPHA_BLEND(color, span->coverage);
139 auto ialpha = 255 - static_cast<uint8_t>(surface->blender.alpha(src));
141 //1. fill the not aligned memory (for 128-bit registers a 16-bytes alignment is required)
142 auto notAligned = ((uintptr_t)dst & 0xf) / 4;
144 notAligned = (N_32BITS_IN_128REG - notAligned > span->len ? span->len : N_32BITS_IN_128REG - notAligned);
145 for (uint32_t x = 0; x < notAligned; ++x, ++dst) {
146 *dst = src + ALPHA_BLEND(*dst, ialpha);
150 //2. fill the aligned memory using avx - N_32BITS_IN_128REG pixels processed at once
151 //In order to avoid unneccessary avx variables declarations a check is made whether there are any iterations at all
152 uint32_t iterations = (span->len - notAligned) / N_32BITS_IN_128REG;
153 uint32_t avxFilled = 0;
154 if (iterations > 0) {
155 auto avxSrc = _mm_set1_epi32(src);
156 auto avxIalpha = _mm_set1_epi8(ialpha);
158 avxFilled = iterations * N_32BITS_IN_128REG;
159 auto avxDst = (__m128i*)dst;
160 for (uint32_t x = 0; x < iterations; ++x, ++avxDst) {
161 *avxDst = _mm_add_epi32(avxSrc, ALPHA_BLEND(*avxDst, avxIalpha));
165 //3. fill the remaining pixels
166 int32_t leftovers = span->len - notAligned - avxFilled;
168 while (leftovers--) {
169 *dst = src + ALPHA_BLEND(*dst, ialpha);