1 /****************************************************************************
3 ** Copyright (C) 2012 Nokia Corporation and/or its subsidiary(-ies).
4 ** Contact: http://www.qt-project.org/
6 ** This file is part of the QtGui module of the Qt Toolkit.
8 ** $QT_BEGIN_LICENSE:LGPL$
9 ** GNU Lesser General Public License Usage
10 ** This file may be used under the terms of the GNU Lesser General Public
11 ** License version 2.1 as published by the Free Software Foundation and
12 ** appearing in the file LICENSE.LGPL included in the packaging of this
13 ** file. Please review the following information to ensure the GNU Lesser
14 ** General Public License version 2.1 requirements will be met:
15 ** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
17 ** In addition, as a special exception, Nokia gives you certain additional
18 ** rights. These rights are described in the Nokia Qt LGPL Exception
19 ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
21 ** GNU General Public License Usage
22 ** Alternatively, this file may be used under the terms of the GNU General
23 ** Public License version 3.0 as published by the Free Software Foundation
24 ** and appearing in the file LICENSE.GPL included in the packaging of this
25 ** file. Please review the following information to ensure the GNU General
26 ** Public License version 3.0 requirements will be met:
27 ** http://www.gnu.org/copyleft/gpl.html.
30 ** Alternatively, this file may be used in accordance with the terms and
31 ** conditions contained in a signed written agreement between you and Nokia.
40 ****************************************************************************/
42 #ifndef QDRAWINGPRIMITIVE_SSE2_P_H
43 #define QDRAWINGPRIMITIVE_SSE2_P_H
45 #include <private/qsimd_p.h>
47 #ifdef QT_COMPILER_SUPPORTS_SSE2
53 // This file is not part of the Qt API. It exists purely as an
54 // implementation detail. This header file may change from version to
55 // version without notice, or even be removed.
63 * Multiply the components of pixelVector by alphaChannel
64 * Each 32bits components of alphaChannel must be in the form 0x00AA00AA
65 * colorMask must have 0x00ff00ff on each 32 bits component
66 * half must have the value 128 (0x80) for each 32 bits compnent
68 #define BYTE_MUL_SSE2(result, pixelVector, alphaChannel, colorMask, half) \
70 /* 1. separate the colors in 2 vectors so each color is on 16 bits \
71 (in order to be multiplied by the alpha \
72 each 32 bit of dstVectorAG are in the form 0x00AA00GG \
73 each 32 bit of dstVectorRB are in the form 0x00RR00BB */\
74 __m128i pixelVectorAG = _mm_srli_epi16(pixelVector, 8); \
75 __m128i pixelVectorRB = _mm_and_si128(pixelVector, colorMask); \
77 /* 2. multiply the vectors by the alpha channel */\
78 pixelVectorAG = _mm_mullo_epi16(pixelVectorAG, alphaChannel); \
79 pixelVectorRB = _mm_mullo_epi16(pixelVectorRB, alphaChannel); \
81 /* 3. divide by 255, that's the tricky part. \
82 we do it like for BYTE_MUL(), with bit shift: X/255 ~= (X + X/256 + rounding)/256 */ \
83 /** so first (X + X/256 + rounding) */\
84 pixelVectorRB = _mm_add_epi16(pixelVectorRB, _mm_srli_epi16(pixelVectorRB, 8)); \
85 pixelVectorRB = _mm_add_epi16(pixelVectorRB, half); \
86 pixelVectorAG = _mm_add_epi16(pixelVectorAG, _mm_srli_epi16(pixelVectorAG, 8)); \
87 pixelVectorAG = _mm_add_epi16(pixelVectorAG, half); \
89 /** second divide by 256 */\
90 pixelVectorRB = _mm_srli_epi16(pixelVectorRB, 8); \
91 /** for AG, we could >> 8 to divide followed by << 8 to put the \
92 bytes in the correct position. By masking instead, we execute \
93 only one instruction */\
94 pixelVectorAG = _mm_andnot_si128(colorMask, pixelVectorAG); \
96 /* 4. combine the 2 pairs of colors */ \
97 result = _mm_or_si128(pixelVectorAG, pixelVectorRB); \
101 * Each 32bits components of alphaChannel must be in the form 0x00AA00AA
102 * oneMinusAlphaChannel must be 255 - alpha for each 32 bits component
103 * colorMask must have 0x00ff00ff on each 32 bits component
104 * half must have the value 128 (0x80) for each 32 bits compnent
106 #define INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, alphaChannel, oneMinusAlphaChannel, colorMask, half) { \
107 /* interpolate AG */\
108 __m128i srcVectorAG = _mm_srli_epi16(srcVector, 8); \
109 __m128i dstVectorAG = _mm_srli_epi16(dstVector, 8); \
110 __m128i srcVectorAGalpha = _mm_mullo_epi16(srcVectorAG, alphaChannel); \
111 __m128i dstVectorAGoneMinusAlphalpha = _mm_mullo_epi16(dstVectorAG, oneMinusAlphaChannel); \
112 __m128i finalAG = _mm_add_epi16(srcVectorAGalpha, dstVectorAGoneMinusAlphalpha); \
113 finalAG = _mm_add_epi16(finalAG, _mm_srli_epi16(finalAG, 8)); \
114 finalAG = _mm_add_epi16(finalAG, half); \
115 finalAG = _mm_andnot_si128(colorMask, finalAG); \
117 /* interpolate RB */\
118 __m128i srcVectorRB = _mm_and_si128(srcVector, colorMask); \
119 __m128i dstVectorRB = _mm_and_si128(dstVector, colorMask); \
120 __m128i srcVectorRBalpha = _mm_mullo_epi16(srcVectorRB, alphaChannel); \
121 __m128i dstVectorRBoneMinusAlphalpha = _mm_mullo_epi16(dstVectorRB, oneMinusAlphaChannel); \
122 __m128i finalRB = _mm_add_epi16(srcVectorRBalpha, dstVectorRBoneMinusAlphalpha); \
123 finalRB = _mm_add_epi16(finalRB, _mm_srli_epi16(finalRB, 8)); \
124 finalRB = _mm_add_epi16(finalRB, half); \
125 finalRB = _mm_srli_epi16(finalRB, 8); \
128 result = _mm_or_si128(finalAG, finalRB); \
131 // same as BLEND_SOURCE_OVER_ARGB32_SSE2, but for one vector srcVector
132 #define BLEND_SOURCE_OVER_ARGB32_SSE2_helper(dst, srcVector, nullVector, half, one, colorMask, alphaMask) { \
133 const __m128i srcVectorAlpha = _mm_and_si128(srcVector, alphaMask); \
134 if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, alphaMask)) == 0xffff) { \
136 _mm_store_si128((__m128i *)&dst[x], srcVector); \
137 } else if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVectorAlpha, nullVector)) != 0xffff) { \
138 /* not fully transparent */ \
139 /* extract the alpha channel on 2 x 16 bits */ \
140 /* so we have room for the multiplication */ \
141 /* each 32 bits will be in the form 0x00AA00AA */ \
142 /* with A being the 1 - alpha */ \
143 __m128i alphaChannel = _mm_srli_epi32(srcVector, 24); \
144 alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16)); \
145 alphaChannel = _mm_sub_epi16(one, alphaChannel); \
147 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]); \
148 __m128i destMultipliedByOneMinusAlpha; \
149 BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half); \
151 /* result = s + d * (1-alpha) */\
152 const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha); \
153 _mm_store_si128((__m128i *)&dst[x], result); \
158 // Basically blend src over dst with the const alpha defined as constAlphaVector.
159 // nullVector, half, one, colorMask are constant across the whole image/texture, and should be defined as:
160 //const __m128i nullVector = _mm_set1_epi32(0);
161 //const __m128i half = _mm_set1_epi16(0x80);
162 //const __m128i one = _mm_set1_epi16(0xff);
163 //const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
164 //const __m128i alphaMask = _mm_set1_epi32(0xff000000);
166 // The computation being done is:
167 // result = s + d * (1-alpha)
168 // with shortcuts if fully opaque or fully transparent.
169 #define BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, length, nullVector, half, one, colorMask, alphaMask) { \
172 /* First, get dst aligned. */ \
173 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length) { \
175 if (s >= 0xff000000) \
178 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \
181 for (; x < length-3; x += 4) { \
182 const __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]); \
183 BLEND_SOURCE_OVER_ARGB32_SSE2_helper(dst, srcVector, nullVector, half, one, colorMask, alphaMask) \
185 for (; x < length; ++x) { \
187 if (s >= 0xff000000) \
190 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \
194 // Basically blend src over dst with the const alpha defined as constAlphaVector.
195 // nullVector, half, one, colorMask are constant across the whole image/texture, and should be defined as:
196 //const __m128i nullVector = _mm_set1_epi32(0);
197 //const __m128i half = _mm_set1_epi16(0x80);
198 //const __m128i one = _mm_set1_epi16(0xff);
199 //const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
201 // The computation being done is:
202 // dest = (s + d * sia) * ca + d * cia
203 // = s * ca + d * (sia * ca + cia)
204 // = s * ca + d * (1 - sa*ca)
205 #define BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, length, nullVector, half, one, colorMask, constAlphaVector) \
209 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length) { \
210 quint32 s = src[x]; \
212 s = BYTE_MUL(s, const_alpha); \
213 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \
217 for (; x < length-3; x += 4) { \
218 __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]); \
219 if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) { \
220 BYTE_MUL_SSE2(srcVector, srcVector, constAlphaVector, colorMask, half); \
222 __m128i alphaChannel = _mm_srli_epi32(srcVector, 24); \
223 alphaChannel = _mm_or_si128(alphaChannel, _mm_slli_epi32(alphaChannel, 16)); \
224 alphaChannel = _mm_sub_epi16(one, alphaChannel); \
226 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]); \
227 __m128i destMultipliedByOneMinusAlpha; \
228 BYTE_MUL_SSE2(destMultipliedByOneMinusAlpha, dstVector, alphaChannel, colorMask, half); \
230 const __m128i result = _mm_add_epi8(srcVector, destMultipliedByOneMinusAlpha); \
231 _mm_store_si128((__m128i *)&dst[x], result); \
234 for (; x < length; ++x) { \
235 quint32 s = src[x]; \
237 s = BYTE_MUL(s, const_alpha); \
238 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s)); \
245 #endif // QT_COMPILER_SUPPORTS_SSE2
247 #endif // QDRAWINGPRIMITIVE_SSE2_P_H