1 /****************************************************************************
3 ** Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
4 ** All rights reserved.
5 ** Contact: Nokia Corporation (qt-info@nokia.com)
7 ** This file is part of the QtGui module of the Qt Toolkit.
9 ** $QT_BEGIN_LICENSE:LGPL$
10 ** GNU Lesser General Public License Usage
11 ** This file may be used under the terms of the GNU Lesser General Public
12 ** License version 2.1 as published by the Free Software Foundation and
13 ** appearing in the file LICENSE.LGPL included in the packaging of this
14 ** file. Please review the following information to ensure the GNU Lesser
15 ** General Public License version 2.1 requirements will be met:
16 ** http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
18 ** In addition, as a special exception, Nokia gives you certain additional
19 ** rights. These rights are described in the Nokia Qt LGPL Exception
20 ** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
22 ** GNU General Public License Usage
23 ** Alternatively, this file may be used under the terms of the GNU General
24 ** Public License version 3.0 as published by the Free Software Foundation
25 ** and appearing in the file LICENSE.GPL included in the packaging of this
26 ** file. Please review the following information to ensure the GNU General
27 ** Public License version 3.0 requirements will be met:
28 ** http://www.gnu.org/copyleft/gpl.html.
31 ** Alternatively, this file may be used in accordance with the terms and
32 ** conditions contained in a signed written agreement between you and Nokia.
40 ****************************************************************************/
42 #include <private/qdrawhelper_x86_p.h>
46 #include <private/qdrawingprimitive_sse2_p.h>
47 #include <private/qpaintengine_raster_p.h>
51 void qt_blend_argb32_on_argb32_sse2(uchar *destPixels, int dbpl,
52 const uchar *srcPixels, int sbpl,
56 const quint32 *src = (const quint32 *) srcPixels;
57 quint32 *dst = (quint32 *) destPixels;
58 if (const_alpha == 256) {
59 const __m128i alphaMask = _mm_set1_epi32(0xff000000);
60 const __m128i nullVector = _mm_set1_epi32(0);
61 const __m128i half = _mm_set1_epi16(0x80);
62 const __m128i one = _mm_set1_epi16(0xff);
63 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
64 for (int y = 0; y < h; ++y) {
65 BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, w, nullVector, half, one, colorMask, alphaMask);
66 dst = (quint32 *)(((uchar *) dst) + dbpl);
67 src = (const quint32 *)(((const uchar *) src) + sbpl);
69 } else if (const_alpha != 0) {
70 // dest = (s + d * sia) * ca + d * cia
71 // = s * ca + d * (sia * ca + cia)
72 // = s * ca + d * (1 - sa*ca)
73 const_alpha = (const_alpha * 255) >> 8;
74 const __m128i nullVector = _mm_set1_epi32(0);
75 const __m128i half = _mm_set1_epi16(0x80);
76 const __m128i one = _mm_set1_epi16(0xff);
77 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
78 const __m128i constAlphaVector = _mm_set1_epi16(const_alpha);
79 for (int y = 0; y < h; ++y) {
80 BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, w, nullVector, half, one, colorMask, constAlphaVector)
81 dst = (quint32 *)(((uchar *) dst) + dbpl);
82 src = (const quint32 *)(((const uchar *) src) + sbpl);
87 // qblendfunctions.cpp
88 void qt_blend_rgb32_on_rgb32(uchar *destPixels, int dbpl,
89 const uchar *srcPixels, int sbpl,
93 void qt_blend_rgb32_on_rgb32_sse2(uchar *destPixels, int dbpl,
94 const uchar *srcPixels, int sbpl,
98 const quint32 *src = (const quint32 *) srcPixels;
99 quint32 *dst = (quint32 *) destPixels;
100 if (const_alpha != 256) {
101 if (const_alpha != 0) {
102 const __m128i nullVector = _mm_set1_epi32(0);
103 const __m128i half = _mm_set1_epi16(0x80);
104 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
106 const_alpha = (const_alpha * 255) >> 8;
107 int one_minus_const_alpha = 255 - const_alpha;
108 const __m128i constAlphaVector = _mm_set1_epi16(const_alpha);
109 const __m128i oneMinusConstAlpha = _mm_set1_epi16(one_minus_const_alpha);
110 for (int y = 0; y < h; ++y) {
113 // First, align dest to 16 bytes:
114 ALIGNMENT_PROLOGUE_16BYTES(dst, x, w) {
115 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], one_minus_const_alpha);
118 for (; x < w-3; x += 4) {
119 __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]);
120 if (_mm_movemask_epi8(_mm_cmpeq_epi32(srcVector, nullVector)) != 0xffff) {
121 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
123 INTERPOLATE_PIXEL_255_SSE2(result, srcVector, dstVector, constAlphaVector, oneMinusConstAlpha, colorMask, half);
124 _mm_store_si128((__m128i *)&dst[x], result);
128 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], one_minus_const_alpha);
130 dst = (quint32 *)(((uchar *) dst) + dbpl);
131 src = (const quint32 *)(((const uchar *) src) + sbpl);
135 qt_blend_rgb32_on_rgb32(destPixels, dbpl, srcPixels, sbpl, w, h, const_alpha);
139 void QT_FASTCALL comp_func_SourceOver_sse2(uint *destPixels, const uint *srcPixels, int length, uint const_alpha)
141 Q_ASSERT(const_alpha < 256);
143 const quint32 *src = (const quint32 *) srcPixels;
144 quint32 *dst = (quint32 *) destPixels;
146 const __m128i nullVector = _mm_set1_epi32(0);
147 const __m128i half = _mm_set1_epi16(0x80);
148 const __m128i one = _mm_set1_epi16(0xff);
149 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
150 if (const_alpha == 255) {
151 const __m128i alphaMask = _mm_set1_epi32(0xff000000);
152 BLEND_SOURCE_OVER_ARGB32_SSE2(dst, src, length, nullVector, half, one, colorMask, alphaMask);
154 const __m128i constAlphaVector = _mm_set1_epi16(const_alpha);
155 BLEND_SOURCE_OVER_ARGB32_WITH_CONST_ALPHA_SSE2(dst, src, length, nullVector, half, one, colorMask, constAlphaVector);
159 void QT_FASTCALL comp_func_Plus_sse2(uint *dst, const uint *src, int length, uint const_alpha)
163 if (const_alpha == 255) {
164 // 1) Prologue: align destination on 16 bytes
165 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length)
166 dst[x] = comp_func_Plus_one_pixel(dst[x], src[x]);
168 // 2) composition with SSE2
169 for (; x < length - 3; x += 4) {
170 const __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]);
171 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
173 const __m128i result = _mm_adds_epu8(srcVector, dstVector);
174 _mm_store_si128((__m128i *)&dst[x], result);
178 for (; x < length; ++x)
179 dst[x] = comp_func_Plus_one_pixel(dst[x], src[x]);
181 const int one_minus_const_alpha = 255 - const_alpha;
182 const __m128i constAlphaVector = _mm_set1_epi16(const_alpha);
183 const __m128i oneMinusConstAlpha = _mm_set1_epi16(one_minus_const_alpha);
185 // 1) Prologue: align destination on 16 bytes
186 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length)
187 dst[x] = comp_func_Plus_one_pixel_const_alpha(dst[x], src[x], const_alpha, one_minus_const_alpha);
189 const __m128i half = _mm_set1_epi16(0x80);
190 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
191 // 2) composition with SSE2
192 for (; x < length - 3; x += 4) {
193 const __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]);
194 const __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
196 __m128i result = _mm_adds_epu8(srcVector, dstVector);
197 INTERPOLATE_PIXEL_255_SSE2(result, result, dstVector, constAlphaVector, oneMinusConstAlpha, colorMask, half)
198 _mm_store_si128((__m128i *)&dst[x], result);
202 for (; x < length; ++x)
203 dst[x] = comp_func_Plus_one_pixel_const_alpha(dst[x], src[x], const_alpha, one_minus_const_alpha);
207 void QT_FASTCALL comp_func_Source_sse2(uint *dst, const uint *src, int length, uint const_alpha)
209 if (const_alpha == 255) {
210 ::memcpy(dst, src, length * sizeof(uint));
212 const int ialpha = 255 - const_alpha;
216 // 1) prologue, align on 16 bytes
217 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length)
218 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], ialpha);
220 // 2) interpolate pixels with SSE2
221 const __m128i half = _mm_set1_epi16(0x80);
222 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
223 const __m128i constAlphaVector = _mm_set1_epi16(const_alpha);
224 const __m128i oneMinusConstAlpha = _mm_set1_epi16(ialpha);
225 for (; x < length - 3; x += 4) {
226 const __m128i srcVector = _mm_loadu_si128((__m128i *)&src[x]);
227 __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
228 INTERPOLATE_PIXEL_255_SSE2(dstVector, srcVector, dstVector, constAlphaVector, oneMinusConstAlpha, colorMask, half)
229 _mm_store_si128((__m128i *)&dst[x], dstVector);
233 for (; x < length; ++x)
234 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], ialpha);
238 void qt_memfill32_sse2(quint32 *dest, quint32 value, int count)
242 case 6: *dest++ = value;
243 case 5: *dest++ = value;
244 case 4: *dest++ = value;
245 case 3: *dest++ = value;
246 case 2: *dest++ = value;
247 case 1: *dest = value;
252 const int align = (quintptr)(dest) & 0xf;
254 case 4: *dest++ = value; --count;
255 case 8: *dest++ = value; --count;
256 case 12: *dest++ = value; --count;
259 int count128 = count / 4;
260 __m128i *dst128 = reinterpret_cast<__m128i*>(dest);
261 const __m128i value128 = _mm_set_epi32(value, value, value, value);
263 int n = (count128 + 3) / 4;
264 switch (count128 & 0x3) {
265 case 0: do { _mm_stream_si128(dst128++, value128);
266 case 3: _mm_stream_si128(dst128++, value128);
267 case 2: _mm_stream_si128(dst128++, value128);
268 case 1: _mm_stream_si128(dst128++, value128);
272 const int rest = count & 0x3;
275 case 3: dest[count - 3] = value;
276 case 2: dest[count - 2] = value;
277 case 1: dest[count - 1] = value;
282 void QT_FASTCALL comp_func_solid_SourceOver_sse2(uint *destPixels, int length, uint color, uint const_alpha)
284 if ((const_alpha & qAlpha(color)) == 255) {
285 qt_memfill32_sse2(destPixels, color, length);
287 if (const_alpha != 255)
288 color = BYTE_MUL(color, const_alpha);
290 const quint32 minusAlphaOfColor = qAlpha(~color);
293 quint32 *dst = (quint32 *) destPixels;
294 const __m128i colorVector = _mm_set1_epi32(color);
295 const __m128i colorMask = _mm_set1_epi32(0x00ff00ff);
296 const __m128i half = _mm_set1_epi16(0x80);
297 const __m128i minusAlphaOfColorVector = _mm_set1_epi16(minusAlphaOfColor);
299 ALIGNMENT_PROLOGUE_16BYTES(dst, x, length)
300 destPixels[x] = color + BYTE_MUL(destPixels[x], minusAlphaOfColor);
302 for (; x < length-3; x += 4) {
303 __m128i dstVector = _mm_load_si128((__m128i *)&dst[x]);
304 BYTE_MUL_SSE2(dstVector, dstVector, minusAlphaOfColorVector, colorMask, half);
305 dstVector = _mm_add_epi8(colorVector, dstVector);
306 _mm_store_si128((__m128i *)&dst[x], dstVector);
308 for (;x < length; ++x)
309 destPixels[x] = color + BYTE_MUL(destPixels[x], minusAlphaOfColor);
313 CompositionFunctionSolid qt_functionForModeSolid_onlySSE2[numCompositionFunctions] = {
314 comp_func_solid_SourceOver_sse2,
315 comp_func_solid_DestinationOver,
316 comp_func_solid_Clear,
317 comp_func_solid_Source,
318 comp_func_solid_Destination,
319 comp_func_solid_SourceIn,
320 comp_func_solid_DestinationIn,
321 comp_func_solid_SourceOut,
322 comp_func_solid_DestinationOut,
323 comp_func_solid_SourceAtop,
324 comp_func_solid_DestinationAtop,
326 comp_func_solid_Plus,
327 comp_func_solid_Multiply,
328 comp_func_solid_Screen,
329 comp_func_solid_Overlay,
330 comp_func_solid_Darken,
331 comp_func_solid_Lighten,
332 comp_func_solid_ColorDodge,
333 comp_func_solid_ColorBurn,
334 comp_func_solid_HardLight,
335 comp_func_solid_SoftLight,
336 comp_func_solid_Difference,
337 comp_func_solid_Exclusion,
338 rasterop_solid_SourceOrDestination,
339 rasterop_solid_SourceAndDestination,
340 rasterop_solid_SourceXorDestination,
341 rasterop_solid_NotSourceAndNotDestination,
342 rasterop_solid_NotSourceOrNotDestination,
343 rasterop_solid_NotSourceXorDestination,
344 rasterop_solid_NotSource,
345 rasterop_solid_NotSourceAndDestination,
346 rasterop_solid_SourceAndNotDestination
349 CompositionFunction qt_functionForMode_onlySSE2[numCompositionFunctions] = {
350 comp_func_SourceOver_sse2,
351 comp_func_DestinationOver,
353 comp_func_Source_sse2,
354 comp_func_Destination,
356 comp_func_DestinationIn,
358 comp_func_DestinationOut,
359 comp_func_SourceAtop,
360 comp_func_DestinationAtop,
368 comp_func_ColorDodge,
372 comp_func_Difference,
374 rasterop_SourceOrDestination,
375 rasterop_SourceAndDestination,
376 rasterop_SourceXorDestination,
377 rasterop_NotSourceAndNotDestination,
378 rasterop_NotSourceOrNotDestination,
379 rasterop_NotSourceXorDestination,
381 rasterop_NotSourceAndDestination,
382 rasterop_SourceAndNotDestination
385 void qt_memfill16_sse2(quint16 *dest, quint16 value, int count)
389 case 2: *dest++ = value;
390 case 1: *dest = value;
395 const int align = (quintptr)(dest) & 0x3;
397 case 2: *dest++ = value; --count;
400 const quint32 value32 = (value << 16) | value;
401 qt_memfill32_sse2(reinterpret_cast<quint32*>(dest), value32, count / 2);
404 dest[count - 1] = value;
407 void qt_bitmapblit32_sse2(QRasterBuffer *rasterBuffer, int x, int y,
409 const uchar *src, int width, int height, int stride)
411 quint32 *dest = reinterpret_cast<quint32*>(rasterBuffer->scanLine(y)) + x;
412 const int destStride = rasterBuffer->bytesPerLine() / sizeof(quint32);
414 const __m128i c128 = _mm_set1_epi32(color);
415 const __m128i maskmask1 = _mm_set_epi32(0x10101010, 0x20202020,
416 0x40404040, 0x80808080);
417 const __m128i maskadd1 = _mm_set_epi32(0x70707070, 0x60606060,
418 0x40404040, 0x00000000);
421 const __m128i maskmask2 = _mm_set_epi32(0x01010101, 0x02020202,
422 0x04040404, 0x08080808);
423 const __m128i maskadd2 = _mm_set_epi32(0x7f7f7f7f, 0x7e7e7e7e,
424 0x7c7c7c7c, 0x78787878);
426 for (int x = 0; x < width; x += 8) {
427 const quint8 s = src[x >> 3];
430 __m128i mask1 = _mm_set1_epi8(s);
431 __m128i mask2 = mask1;
433 mask1 = _mm_and_si128(mask1, maskmask1);
434 mask1 = _mm_add_epi8(mask1, maskadd1);
435 _mm_maskmoveu_si128(c128, mask1, (char*)(dest + x));
436 mask2 = _mm_and_si128(mask2, maskmask2);
437 mask2 = _mm_add_epi8(mask2, maskadd2);
438 _mm_maskmoveu_si128(c128, mask2, (char*)(dest + x + 4));
445 const quint8 s = *src;
447 __m128i mask1 = _mm_set1_epi8(s);
448 mask1 = _mm_and_si128(mask1, maskmask1);
449 mask1 = _mm_add_epi8(mask1, maskadd1);
450 _mm_maskmoveu_si128(c128, mask1, (char*)(dest));
458 void qt_bitmapblit16_sse2(QRasterBuffer *rasterBuffer, int x, int y,
460 const uchar *src, int width, int height, int stride)
462 const quint16 c = qt_colorConvert<quint16, quint32>(color, 0);
463 quint16 *dest = reinterpret_cast<quint16*>(rasterBuffer->scanLine(y)) + x;
464 const int destStride = rasterBuffer->bytesPerLine() / sizeof(quint16);
466 const __m128i c128 = _mm_set1_epi16(c);
467 #if defined(Q_CC_MSVC)
468 # pragma warning(disable: 4309) // truncation of constant value
470 const __m128i maskmask = _mm_set_epi16(0x0101, 0x0202, 0x0404, 0x0808,
471 0x1010, 0x2020, 0x4040, 0x8080);
472 const __m128i maskadd = _mm_set_epi16(0x7f7f, 0x7e7e, 0x7c7c, 0x7878,
473 0x7070, 0x6060, 0x4040, 0x0000);
476 for (int x = 0; x < width; x += 8) {
477 const quint8 s = src[x >> 3];
480 __m128i mask = _mm_set1_epi8(s);
481 mask = _mm_and_si128(mask, maskmask);
482 mask = _mm_add_epi8(mask, maskadd);
483 _mm_maskmoveu_si128(c128, mask, (char*)(dest + x));
493 typedef __m128i Int32x4;
494 typedef __m128 Float32x4;
496 union Vect_buffer_i { Int32x4 v; int i[4]; };
497 union Vect_buffer_f { Float32x4 v; float f[4]; };
499 static inline Float32x4 v_dup(float x) { return _mm_set1_ps(x); }
500 static inline Float32x4 v_dup(double x) { return _mm_set1_ps(x); }
501 static inline Int32x4 v_dup(int x) { return _mm_set1_epi32(x); }
502 static inline Int32x4 v_dup(uint x) { return _mm_set1_epi32(x); }
504 static inline Float32x4 v_add(Float32x4 a, Float32x4 b) { return _mm_add_ps(a, b); }
505 static inline Int32x4 v_add(Int32x4 a, Int32x4 b) { return _mm_add_epi32(a, b); }
507 static inline Float32x4 v_max(Float32x4 a, Float32x4 b) { return _mm_max_ps(a, b); }
508 static inline Float32x4 v_min(Float32x4 a, Float32x4 b) { return _mm_min_ps(a, b); }
509 static inline Int32x4 v_min_16(Int32x4 a, Int32x4 b) { return _mm_min_epi16(a, b); }
511 static inline Int32x4 v_and(Int32x4 a, Int32x4 b) { return _mm_and_si128(a, b); }
513 static inline Float32x4 v_sub(Float32x4 a, Float32x4 b) { return _mm_sub_ps(a, b); }
514 static inline Int32x4 v_sub(Int32x4 a, Int32x4 b) { return _mm_sub_epi32(a, b); }
516 static inline Float32x4 v_mul(Float32x4 a, Float32x4 b) { return _mm_mul_ps(a, b); }
518 static inline Float32x4 v_sqrt(Float32x4 x) { return _mm_sqrt_ps(x); }
520 static inline Int32x4 v_toInt(Float32x4 x) { return _mm_cvttps_epi32(x); }
522 // pre-VS 2008 doesn't have cast intrinsics, whereas 2008 and later requires it
523 #if defined(Q_CC_MSVC) && _MSC_VER < 1500
524 static inline Int32x4 v_greaterOrEqual(Float32x4 a, Float32x4 b)
526 union Convert { Int32x4 vi; Float32x4 vf; } convert;
527 convert.vf = _mm_cmpgt_ps(a, b);
531 static inline Int32x4 v_greaterOrEqual(Float32x4 a, Float32x4 b) { return _mm_castps_si128(_mm_cmpgt_ps(a, b)); }
535 const uint * QT_FASTCALL qt_fetch_radial_gradient_sse2(uint *buffer, const Operator *op, const QSpanData *data,
536 int y, int x, int length)
538 return qt_fetch_radial_gradient_template<QRadialFetchSimd<QSimdSse2> >(buffer, op, data, y, x, length);
544 #endif // QT_HAVE_SSE2