1 /* -*- Mode: c; c-basic-offset: 4; tab-width: 8; indent-tabs-mode: t; -*- */
3 * Copyright © 2000 SuSE, Inc.
4 * Copyright © 2007 Red Hat, Inc.
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that
9 * copyright notice and this permission notice appear in supporting
10 * documentation, and that the name of SuSE not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. SuSE makes no representations about the
13 * suitability of this software for any purpose. It is provided "as is"
14 * without express or implied warranty.
16 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
18 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
20 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
21 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 * Author: Keith Packard, SuSE, Inc.
26 #ifndef PIXMAN_FAST_PATH_H__
27 #define PIXMAN_FAST_PATH_H__
29 #include "pixman-private.h"
31 #define PIXMAN_REPEAT_COVER -1
33 /* Flags describing input parameters to fast path macro template.
34 * Turning on some flag values may indicate that
35 * "some property X is available so template can use this" or
36 * "some property X should be handled by template".
38 * FLAG_HAVE_SOLID_MASK
39 * Input mask is solid so template should handle this.
41 * FLAG_HAVE_NON_SOLID_MASK
42 * Input mask is bits mask so template should handle this.
44 * FLAG_HAVE_SOLID_MASK and FLAG_HAVE_NON_SOLID_MASK are mutually
45 * exclusive. (It's not allowed to turn both flags on)
48 #define FLAG_HAVE_SOLID_MASK (1 << 1)
49 #define FLAG_HAVE_NON_SOLID_MASK (1 << 2)
51 /* To avoid too short repeated scanline function calls, extend source
52 * scanlines having width less than below constant value.
54 #define REPEAT_NORMAL_MIN_WIDTH 64
56 static force_inline pixman_bool_t
57 repeat (pixman_repeat_t repeat, int *c, int size)
59 if (repeat == PIXMAN_REPEAT_NONE)
61 if (*c < 0 || *c >= size)
64 else if (repeat == PIXMAN_REPEAT_NORMAL)
71 else if (repeat == PIXMAN_REPEAT_PAD)
73 *c = CLIP (*c, 0, size - 1);
77 *c = MOD (*c, size * 2);
79 *c = size * 2 - *c - 1;
84 static force_inline int
85 pixman_fixed_to_bilinear_weight (pixman_fixed_t x)
87 return (x >> (16 - BILINEAR_INTERPOLATION_BITS)) &
88 ((1 << BILINEAR_INTERPOLATION_BITS) - 1);
91 #if BILINEAR_INTERPOLATION_BITS <= 4
92 /* Inspired by Filter_32_opaque from Skia */
93 static force_inline uint32_t
94 bilinear_interpolation (uint32_t tl, uint32_t tr,
95 uint32_t bl, uint32_t br,
98 int distxy, distxiy, distixy, distixiy;
101 distx <<= (4 - BILINEAR_INTERPOLATION_BITS);
102 disty <<= (4 - BILINEAR_INTERPOLATION_BITS);
104 distxy = distx * disty;
105 distxiy = (distx << 4) - distxy; /* distx * (16 - disty) */
106 distixy = (disty << 4) - distxy; /* disty * (16 - distx) */
108 16 * 16 - (disty << 4) -
109 (distx << 4) + distxy; /* (16 - distx) * (16 - disty) */
111 lo = (tl & 0xff00ff) * distixiy;
112 hi = ((tl >> 8) & 0xff00ff) * distixiy;
114 lo += (tr & 0xff00ff) * distxiy;
115 hi += ((tr >> 8) & 0xff00ff) * distxiy;
117 lo += (bl & 0xff00ff) * distixy;
118 hi += ((bl >> 8) & 0xff00ff) * distixy;
120 lo += (br & 0xff00ff) * distxy;
121 hi += ((br >> 8) & 0xff00ff) * distxy;
123 return ((lo >> 8) & 0xff00ff) | (hi & ~0xff00ff);
129 static force_inline uint32_t
130 bilinear_interpolation (uint32_t tl, uint32_t tr,
131 uint32_t bl, uint32_t br,
132 int distx, int disty)
134 uint64_t distxy, distxiy, distixy, distixiy;
135 uint64_t tl64, tr64, bl64, br64;
138 distx <<= (8 - BILINEAR_INTERPOLATION_BITS);
139 disty <<= (8 - BILINEAR_INTERPOLATION_BITS);
141 distxy = distx * disty;
142 distxiy = distx * (256 - disty);
143 distixy = (256 - distx) * disty;
144 distixiy = (256 - distx) * (256 - disty);
147 tl64 = tl & 0xff0000ff;
148 tr64 = tr & 0xff0000ff;
149 bl64 = bl & 0xff0000ff;
150 br64 = br & 0xff0000ff;
152 f = tl64 * distixiy + tr64 * distxiy + bl64 * distixy + br64 * distxy;
153 r = f & 0x0000ff0000ff0000ull;
157 tl64 = ((tl64 << 16) & 0x000000ff00000000ull) | (tl64 & 0x0000ff00ull);
160 tr64 = ((tr64 << 16) & 0x000000ff00000000ull) | (tr64 & 0x0000ff00ull);
163 bl64 = ((bl64 << 16) & 0x000000ff00000000ull) | (bl64 & 0x0000ff00ull);
166 br64 = ((br64 << 16) & 0x000000ff00000000ull) | (br64 & 0x0000ff00ull);
168 f = tl64 * distixiy + tr64 * distxiy + bl64 * distixy + br64 * distxy;
169 r |= ((f >> 16) & 0x000000ff00000000ull) | (f & 0xff000000ull);
171 return (uint32_t)(r >> 16);
176 static force_inline uint32_t
177 bilinear_interpolation (uint32_t tl, uint32_t tr,
178 uint32_t bl, uint32_t br,
179 int distx, int disty)
181 int distxy, distxiy, distixy, distixiy;
184 distx <<= (8 - BILINEAR_INTERPOLATION_BITS);
185 disty <<= (8 - BILINEAR_INTERPOLATION_BITS);
187 distxy = distx * disty;
188 distxiy = (distx << 8) - distxy; /* distx * (256 - disty) */
189 distixy = (disty << 8) - distxy; /* disty * (256 - distx) */
191 256 * 256 - (disty << 8) -
192 (distx << 8) + distxy; /* (256 - distx) * (256 - disty) */
195 r = (tl & 0x000000ff) * distixiy + (tr & 0x000000ff) * distxiy
196 + (bl & 0x000000ff) * distixy + (br & 0x000000ff) * distxy;
199 f = (tl & 0x0000ff00) * distixiy + (tr & 0x0000ff00) * distxiy
200 + (bl & 0x0000ff00) * distixy + (br & 0x0000ff00) * distxy;
210 f = (tl & 0x000000ff) * distixiy + (tr & 0x000000ff) * distxiy
211 + (bl & 0x000000ff) * distixy + (br & 0x000000ff) * distxy;
215 f = (tl & 0x0000ff00) * distixiy + (tr & 0x0000ff00) * distxiy
216 + (bl & 0x0000ff00) * distixy + (br & 0x0000ff00) * distxy;
223 #endif // BILINEAR_INTERPOLATION_BITS <= 4
225 static force_inline argb_t
226 bilinear_interpolation_float (argb_t tl, argb_t tr,
227 argb_t bl, argb_t br,
228 float distx, float disty)
230 float distxy, distxiy, distixy, distixiy;
233 distxy = distx * disty;
234 distxiy = distx - (1.f - distxy);
235 distixy = (1.f - distx) * disty;
236 distixiy = (1.f - distx) * (1.f - disty);
238 r.a = tl.a * distixiy + tr.a * distxiy +
239 bl.a * distixy + br.a * distxy;
240 r.r = tl.r * distixiy + tr.r * distxiy +
241 bl.r * distixy + br.r * distxy;
242 r.g = tl.g * distixiy + tr.g * distxiy +
243 bl.g * distixy + br.g * distxy;
244 r.b = tl.b * distixiy + tr.b * distxiy +
245 bl.b * distixy + br.b * distxy;
251 * For each scanline fetched from source image with PAD repeat:
252 * - calculate how many pixels need to be padded on the left side
253 * - calculate how many pixels need to be padded on the right side
254 * - update width to only count pixels which are fetched from the image
255 * All this information is returned via 'width', 'left_pad', 'right_pad'
256 * arguments. The code is assuming that 'unit_x' is positive.
258 * Note: 64-bit math is used in order to avoid potential overflows, which
259 * is probably excessive in many cases. This particular function
260 * may need its own correctness test and performance tuning.
262 static force_inline void
263 pad_repeat_get_scanline_bounds (int32_t source_image_width,
265 pixman_fixed_t unit_x,
270 int64_t max_vx = (int64_t) source_image_width << 16;
274 tmp = ((int64_t) unit_x - 1 - vx) / unit_x;
282 *left_pad = (int32_t) tmp;
283 *width -= (int32_t) tmp;
290 tmp = ((int64_t) unit_x - 1 - vx + max_vx) / unit_x - *left_pad;
296 else if (tmp >= *width)
302 *right_pad = *width - (int32_t) tmp;
303 *width = (int32_t) tmp;
307 /* A macroified version of specialized nearest scalers for some
308 * common 8888 and 565 formats. It supports SRC and OVER ops.
310 * There are two repeat versions, one that handles repeat normal,
311 * and one without repeat handling that only works if the src region
312 * used is completely covered by the pre-repeated source samples.
314 * The loops are unrolled to process two pixels per iteration for better
315 * performance on most CPU architectures (superscalar processors
316 * can issue several operations simultaneously, other processors can hide
317 * instructions latencies by pipelining operations). Unrolling more
318 * does not make much sense because the compiler will start running out
319 * of spare registers soon.
322 #define GET_8888_ALPHA(s) ((s) >> 24)
323 /* This is not actually used since we don't have an OVER with
324 565 source, but it is needed to build. */
325 #define GET_0565_ALPHA(s) 0xff
326 #define GET_x888_ALPHA(s) 0xff
328 #define FAST_NEAREST_SCANLINE(scanline_func_name, SRC_FORMAT, DST_FORMAT, \
329 src_type_t, dst_type_t, OP, repeat_mode) \
330 static force_inline void \
331 scanline_func_name (dst_type_t *dst, \
332 const src_type_t *src, \
335 pixman_fixed_t unit_x, \
336 pixman_fixed_t src_width_fixed, \
337 pixman_bool_t fully_transparent_src) \
344 if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER && fully_transparent_src) \
347 if (PIXMAN_OP_ ## OP != PIXMAN_OP_SRC && PIXMAN_OP_ ## OP != PIXMAN_OP_OVER) \
350 while ((w -= 2) >= 0) \
352 x1 = pixman_fixed_to_int (vx); \
354 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
356 /* This works because we know that unit_x is positive */ \
358 vx -= src_width_fixed; \
362 x2 = pixman_fixed_to_int (vx); \
364 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
366 /* This works because we know that unit_x is positive */ \
368 vx -= src_width_fixed; \
372 if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER) \
374 a1 = GET_ ## SRC_FORMAT ## _ALPHA(s1); \
375 a2 = GET_ ## SRC_FORMAT ## _ALPHA(s2); \
379 *dst = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \
383 d = convert_ ## DST_FORMAT ## _to_8888 (*dst); \
384 s1 = convert_ ## SRC_FORMAT ## _to_8888 (s1); \
386 UN8x4_MUL_UN8_ADD_UN8x4 (d, a1, s1); \
387 *dst = convert_8888_to_ ## DST_FORMAT (d); \
393 *dst = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s2); \
397 d = convert_## DST_FORMAT ## _to_8888 (*dst); \
398 s2 = convert_## SRC_FORMAT ## _to_8888 (s2); \
400 UN8x4_MUL_UN8_ADD_UN8x4 (d, a2, s2); \
401 *dst = convert_8888_to_ ## DST_FORMAT (d); \
405 else /* PIXMAN_OP_SRC */ \
407 *dst++ = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \
408 *dst++ = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s2); \
414 x1 = pixman_fixed_to_int (vx); \
417 if (PIXMAN_OP_ ## OP == PIXMAN_OP_OVER) \
419 a1 = GET_ ## SRC_FORMAT ## _ALPHA(s1); \
423 *dst = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \
427 d = convert_## DST_FORMAT ## _to_8888 (*dst); \
428 s1 = convert_ ## SRC_FORMAT ## _to_8888 (s1); \
430 UN8x4_MUL_UN8_ADD_UN8x4 (d, a1, s1); \
431 *dst = convert_8888_to_ ## DST_FORMAT (d); \
435 else /* PIXMAN_OP_SRC */ \
437 *dst++ = convert_ ## SRC_FORMAT ## _to_ ## DST_FORMAT (s1); \
442 #define FAST_NEAREST_MAINLOOP_INT(scale_func_name, scanline_func, src_type_t, mask_type_t, \
443 dst_type_t, repeat_mode, have_mask, mask_is_solid) \
445 fast_composite_scaled_nearest ## scale_func_name (pixman_implementation_t *imp, \
446 pixman_composite_info_t *info) \
448 PIXMAN_COMPOSITE_ARGS (info); \
449 dst_type_t *dst_line; \
450 mask_type_t *mask_line; \
451 src_type_t *src_first_line; \
453 pixman_fixed_t src_width_fixed = pixman_int_to_fixed (src_image->bits.width); \
454 pixman_fixed_t max_vy; \
456 pixman_fixed_t vx, vy; \
457 pixman_fixed_t unit_x, unit_y; \
458 int32_t left_pad, right_pad; \
462 mask_type_t solid_mask; \
463 const mask_type_t *mask = &solid_mask; \
464 int src_stride, mask_stride, dst_stride; \
466 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type_t, dst_stride, dst_line, 1); \
470 solid_mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format); \
472 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type_t, \
473 mask_stride, mask_line, 1); \
475 /* pass in 0 instead of src_x and src_y because src_x and src_y need to be \
476 * transformed from destination space to source space */ \
477 PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, src_type_t, src_stride, src_first_line, 1); \
479 /* reference point is the center of the pixel */ \
480 v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; \
481 v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2; \
482 v.vector[2] = pixman_fixed_1; \
484 if (!pixman_transform_point_3d (src_image->common.transform, &v)) \
487 unit_x = src_image->common.transform->matrix[0][0]; \
488 unit_y = src_image->common.transform->matrix[1][1]; \
490 /* Round down to closest integer, ensuring that 0.5 rounds to 0, not 1 */ \
491 v.vector[0] -= pixman_fixed_e; \
492 v.vector[1] -= pixman_fixed_e; \
497 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
499 max_vy = pixman_int_to_fixed (src_image->bits.height); \
501 /* Clamp repeating positions inside the actual samples */ \
502 repeat (PIXMAN_REPEAT_NORMAL, &vx, src_width_fixed); \
503 repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); \
506 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD || \
507 PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \
509 pad_repeat_get_scanline_bounds (src_image->bits.width, vx, unit_x, \
510 &width, &left_pad, &right_pad); \
511 vx += left_pad * unit_x; \
514 while (--height >= 0) \
517 dst_line += dst_stride; \
518 if (have_mask && !mask_is_solid) \
521 mask_line += mask_stride; \
524 y = pixman_fixed_to_int (vy); \
526 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
527 repeat (PIXMAN_REPEAT_NORMAL, &vy, max_vy); \
528 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \
530 repeat (PIXMAN_REPEAT_PAD, &y, src_image->bits.height); \
531 src = src_first_line + src_stride * y; \
534 scanline_func (mask, dst, \
535 src + src_image->bits.width - src_image->bits.width + 1, \
536 left_pad, -pixman_fixed_e, 0, src_width_fixed, FALSE); \
540 scanline_func (mask + (mask_is_solid ? 0 : left_pad), \
541 dst + left_pad, src + src_image->bits.width, width, \
542 vx - src_width_fixed, unit_x, src_width_fixed, FALSE); \
546 scanline_func (mask + (mask_is_solid ? 0 : left_pad + width), \
547 dst + left_pad + width, src + src_image->bits.width, \
548 right_pad, -pixman_fixed_e, 0, src_width_fixed, FALSE); \
551 else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \
553 static const src_type_t zero[1] = { 0 }; \
554 if (y < 0 || y >= src_image->bits.height) \
556 scanline_func (mask, dst, zero + 1, left_pad + width + right_pad, \
557 -pixman_fixed_e, 0, src_width_fixed, TRUE); \
560 src = src_first_line + src_stride * y; \
563 scanline_func (mask, dst, zero + 1, left_pad, \
564 -pixman_fixed_e, 0, src_width_fixed, TRUE); \
568 scanline_func (mask + (mask_is_solid ? 0 : left_pad), \
569 dst + left_pad, src + src_image->bits.width, width, \
570 vx - src_width_fixed, unit_x, src_width_fixed, FALSE); \
574 scanline_func (mask + (mask_is_solid ? 0 : left_pad + width), \
575 dst + left_pad + width, zero + 1, right_pad, \
576 -pixman_fixed_e, 0, src_width_fixed, TRUE); \
581 src = src_first_line + src_stride * y; \
582 scanline_func (mask, dst, src + src_image->bits.width, width, vx - src_width_fixed, \
583 unit_x, src_width_fixed, FALSE); \
588 /* A workaround for old sun studio, see: https://bugs.freedesktop.org/show_bug.cgi?id=32764 */
589 #define FAST_NEAREST_MAINLOOP_COMMON(scale_func_name, scanline_func, src_type_t, mask_type_t, \
590 dst_type_t, repeat_mode, have_mask, mask_is_solid) \
591 FAST_NEAREST_MAINLOOP_INT(_ ## scale_func_name, scanline_func, src_type_t, mask_type_t, \
592 dst_type_t, repeat_mode, have_mask, mask_is_solid)
594 #define FAST_NEAREST_MAINLOOP_NOMASK(scale_func_name, scanline_func, src_type_t, dst_type_t, \
596 static force_inline void \
597 scanline_func##scale_func_name##_wrapper ( \
598 const uint8_t *mask, \
600 const src_type_t *src, \
603 pixman_fixed_t unit_x, \
604 pixman_fixed_t max_vx, \
605 pixman_bool_t fully_transparent_src) \
607 scanline_func (dst, src, w, vx, unit_x, max_vx, fully_transparent_src); \
609 FAST_NEAREST_MAINLOOP_INT (scale_func_name, scanline_func##scale_func_name##_wrapper, \
610 src_type_t, uint8_t, dst_type_t, repeat_mode, FALSE, FALSE)
612 #define FAST_NEAREST_MAINLOOP(scale_func_name, scanline_func, src_type_t, dst_type_t, \
614 FAST_NEAREST_MAINLOOP_NOMASK(_ ## scale_func_name, scanline_func, src_type_t, \
615 dst_type_t, repeat_mode)
617 #define FAST_NEAREST(scale_func_name, SRC_FORMAT, DST_FORMAT, \
618 src_type_t, dst_type_t, OP, repeat_mode) \
619 FAST_NEAREST_SCANLINE(scaled_nearest_scanline_ ## scale_func_name ## _ ## OP, \
620 SRC_FORMAT, DST_FORMAT, src_type_t, dst_type_t, \
622 FAST_NEAREST_MAINLOOP_NOMASK(_ ## scale_func_name ## _ ## OP, \
623 scaled_nearest_scanline_ ## scale_func_name ## _ ## OP, \
624 src_type_t, dst_type_t, repeat_mode)
627 #define SCALED_NEAREST_FLAGS \
628 (FAST_PATH_SCALE_TRANSFORM | \
629 FAST_PATH_NO_ALPHA_MAP | \
630 FAST_PATH_NEAREST_FILTER | \
631 FAST_PATH_NO_ACCESSORS | \
632 FAST_PATH_NARROW_FORMAT)
634 #define SIMPLE_NEAREST_FAST_PATH_NORMAL(op,s,d,func) \
635 { PIXMAN_OP_ ## op, \
637 (SCALED_NEAREST_FLAGS | \
638 FAST_PATH_NORMAL_REPEAT | \
639 FAST_PATH_X_UNIT_POSITIVE), \
641 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
642 fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \
645 #define SIMPLE_NEAREST_FAST_PATH_PAD(op,s,d,func) \
646 { PIXMAN_OP_ ## op, \
648 (SCALED_NEAREST_FLAGS | \
649 FAST_PATH_PAD_REPEAT | \
650 FAST_PATH_X_UNIT_POSITIVE), \
652 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
653 fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \
656 #define SIMPLE_NEAREST_FAST_PATH_NONE(op,s,d,func) \
657 { PIXMAN_OP_ ## op, \
659 (SCALED_NEAREST_FLAGS | \
660 FAST_PATH_NONE_REPEAT | \
661 FAST_PATH_X_UNIT_POSITIVE), \
663 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
664 fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \
667 #define SIMPLE_NEAREST_FAST_PATH_COVER(op,s,d,func) \
668 { PIXMAN_OP_ ## op, \
670 SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST, \
672 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
673 fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \
676 #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_NORMAL(op,s,d,func) \
677 { PIXMAN_OP_ ## op, \
679 (SCALED_NEAREST_FLAGS | \
680 FAST_PATH_NORMAL_REPEAT | \
681 FAST_PATH_X_UNIT_POSITIVE), \
682 PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
683 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
684 fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \
687 #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_PAD(op,s,d,func) \
688 { PIXMAN_OP_ ## op, \
690 (SCALED_NEAREST_FLAGS | \
691 FAST_PATH_PAD_REPEAT | \
692 FAST_PATH_X_UNIT_POSITIVE), \
693 PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
694 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
695 fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \
698 #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_NONE(op,s,d,func) \
699 { PIXMAN_OP_ ## op, \
701 (SCALED_NEAREST_FLAGS | \
702 FAST_PATH_NONE_REPEAT | \
703 FAST_PATH_X_UNIT_POSITIVE), \
704 PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
705 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
706 fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \
709 #define SIMPLE_NEAREST_A8_MASK_FAST_PATH_COVER(op,s,d,func) \
710 { PIXMAN_OP_ ## op, \
712 SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST, \
713 PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
714 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
715 fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \
718 #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NORMAL(op,s,d,func) \
719 { PIXMAN_OP_ ## op, \
721 (SCALED_NEAREST_FLAGS | \
722 FAST_PATH_NORMAL_REPEAT | \
723 FAST_PATH_X_UNIT_POSITIVE), \
724 PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
725 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
726 fast_composite_scaled_nearest_ ## func ## _normal ## _ ## op, \
729 #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_PAD(op,s,d,func) \
730 { PIXMAN_OP_ ## op, \
732 (SCALED_NEAREST_FLAGS | \
733 FAST_PATH_PAD_REPEAT | \
734 FAST_PATH_X_UNIT_POSITIVE), \
735 PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
736 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
737 fast_composite_scaled_nearest_ ## func ## _pad ## _ ## op, \
740 #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NONE(op,s,d,func) \
741 { PIXMAN_OP_ ## op, \
743 (SCALED_NEAREST_FLAGS | \
744 FAST_PATH_NONE_REPEAT | \
745 FAST_PATH_X_UNIT_POSITIVE), \
746 PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
747 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
748 fast_composite_scaled_nearest_ ## func ## _none ## _ ## op, \
751 #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_COVER(op,s,d,func) \
752 { PIXMAN_OP_ ## op, \
754 SCALED_NEAREST_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_NEAREST, \
755 PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
756 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
757 fast_composite_scaled_nearest_ ## func ## _cover ## _ ## op, \
760 /* Prefer the use of 'cover' variant, because it is faster */
761 #define SIMPLE_NEAREST_FAST_PATH(op,s,d,func) \
762 SIMPLE_NEAREST_FAST_PATH_COVER (op,s,d,func), \
763 SIMPLE_NEAREST_FAST_PATH_NONE (op,s,d,func), \
764 SIMPLE_NEAREST_FAST_PATH_PAD (op,s,d,func), \
765 SIMPLE_NEAREST_FAST_PATH_NORMAL (op,s,d,func)
767 #define SIMPLE_NEAREST_A8_MASK_FAST_PATH(op,s,d,func) \
768 SIMPLE_NEAREST_A8_MASK_FAST_PATH_COVER (op,s,d,func), \
769 SIMPLE_NEAREST_A8_MASK_FAST_PATH_NONE (op,s,d,func), \
770 SIMPLE_NEAREST_A8_MASK_FAST_PATH_PAD (op,s,d,func)
772 #define SIMPLE_NEAREST_SOLID_MASK_FAST_PATH(op,s,d,func) \
773 SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_COVER (op,s,d,func), \
774 SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NONE (op,s,d,func), \
775 SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_PAD (op,s,d,func), \
776 SIMPLE_NEAREST_SOLID_MASK_FAST_PATH_NORMAL (op,s,d,func)
778 /*****************************************************************************/
781 * Identify 5 zones in each scanline for bilinear scaling. Depending on
782 * whether 2 pixels to be interpolated are fetched from the image itself,
783 * from the padding area around it or from both image and padding area.
785 static force_inline void
786 bilinear_pad_repeat_get_scanline_bounds (int32_t source_image_width,
788 pixman_fixed_t unit_x,
795 int width1 = *width, left_pad1, right_pad1;
796 int width2 = *width, left_pad2, right_pad2;
798 pad_repeat_get_scanline_bounds (source_image_width, vx, unit_x,
799 &width1, &left_pad1, &right_pad1);
800 pad_repeat_get_scanline_bounds (source_image_width, vx + pixman_fixed_1,
801 unit_x, &width2, &left_pad2, &right_pad2);
803 *left_pad = left_pad2;
804 *left_tz = left_pad1 - left_pad2;
805 *right_tz = right_pad2 - right_pad1;
806 *right_pad = right_pad1;
807 *width -= *left_pad + *left_tz + *right_tz + *right_pad;
811 * Main loop template for single pass bilinear scaling. It needs to be
812 * provided with 'scanline_func' which should do the compositing operation.
813 * The needed function has the following prototype:
815 * scanline_func (dst_type_t * dst,
816 * const mask_type_ * mask,
817 * const src_type_t * src_top,
818 * const src_type_t * src_bottom,
823 * pixman_fixed_t unit_x,
824 * pixman_fixed_t max_vx,
825 * pixman_bool_t zero_src)
828 * dst - destination scanline buffer for storing results
829 * mask - mask buffer (or single value for solid mask)
830 * src_top, src_bottom - two source scanlines
831 * width - number of pixels to process
832 * weight_top - weight of the top row for interpolation
833 * weight_bottom - weight of the bottom row for interpolation
834 * vx - initial position for fetching the first pair of
835 * pixels from the source buffer
836 * unit_x - position increment needed to move to the next pair
838 * max_vx - image size as a fixed point value, can be used for
839 * implementing NORMAL repeat (when it is supported)
840 * zero_src - boolean hint variable, which is set to TRUE when
841 * all source pixels are fetched from zero padding
842 * zone for NONE repeat
844 * Note: normally the sum of 'weight_top' and 'weight_bottom' is equal to
845 * BILINEAR_INTERPOLATION_RANGE, but sometimes it may be less than that
846 * for NONE repeat when handling fuzzy antialiased top or bottom image
847 * edges. Also both top and bottom weight variables are guaranteed to
848 * have value, which is less than BILINEAR_INTERPOLATION_RANGE.
849 * For example, the weights can fit into unsigned byte or be used
850 * with 8-bit SIMD multiplication instructions for 8-bit interpolation
853 #define FAST_BILINEAR_MAINLOOP_INT(scale_func_name, scanline_func, src_type_t, mask_type_t, \
854 dst_type_t, repeat_mode, flags) \
856 fast_composite_scaled_bilinear ## scale_func_name (pixman_implementation_t *imp, \
857 pixman_composite_info_t *info) \
859 PIXMAN_COMPOSITE_ARGS (info); \
860 dst_type_t *dst_line; \
861 mask_type_t *mask_line; \
862 src_type_t *src_first_line; \
864 pixman_fixed_t max_vx = INT32_MAX; /* suppress uninitialized variable warning */ \
866 pixman_fixed_t vx, vy; \
867 pixman_fixed_t unit_x, unit_y; \
868 int32_t left_pad, left_tz, right_tz, right_pad; \
871 mask_type_t solid_mask; \
872 const mask_type_t *mask = &solid_mask; \
873 int src_stride, mask_stride, dst_stride; \
876 pixman_fixed_t src_width_fixed; \
878 pixman_bool_t need_src_extension; \
880 PIXMAN_IMAGE_GET_LINE (dest_image, dest_x, dest_y, dst_type_t, dst_stride, dst_line, 1); \
881 if (flags & FLAG_HAVE_SOLID_MASK) \
883 solid_mask = _pixman_image_get_solid (imp, mask_image, dest_image->bits.format); \
886 else if (flags & FLAG_HAVE_NON_SOLID_MASK) \
888 PIXMAN_IMAGE_GET_LINE (mask_image, mask_x, mask_y, mask_type_t, \
889 mask_stride, mask_line, 1); \
892 /* pass in 0 instead of src_x and src_y because src_x and src_y need to be \
893 * transformed from destination space to source space */ \
894 PIXMAN_IMAGE_GET_LINE (src_image, 0, 0, src_type_t, src_stride, src_first_line, 1); \
896 /* reference point is the center of the pixel */ \
897 v.vector[0] = pixman_int_to_fixed (src_x) + pixman_fixed_1 / 2; \
898 v.vector[1] = pixman_int_to_fixed (src_y) + pixman_fixed_1 / 2; \
899 v.vector[2] = pixman_fixed_1; \
901 if (!pixman_transform_point_3d (src_image->common.transform, &v)) \
904 unit_x = src_image->common.transform->matrix[0][0]; \
905 unit_y = src_image->common.transform->matrix[1][1]; \
907 v.vector[0] -= pixman_fixed_1 / 2; \
908 v.vector[1] -= pixman_fixed_1 / 2; \
912 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD || \
913 PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \
915 bilinear_pad_repeat_get_scanline_bounds (src_image->bits.width, v.vector[0], unit_x, \
916 &left_pad, &left_tz, &width, &right_tz, &right_pad); \
917 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \
919 /* PAD repeat does not need special handling for 'transition zones' and */ \
920 /* they can be combined with 'padding zones' safely */ \
921 left_pad += left_tz; \
922 right_pad += right_tz; \
923 left_tz = right_tz = 0; \
925 v.vector[0] += left_pad * unit_x; \
928 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
931 repeat (PIXMAN_REPEAT_NORMAL, &vx, pixman_int_to_fixed(src_image->bits.width)); \
932 max_x = pixman_fixed_to_int (vx + (width - 1) * (int64_t)unit_x) + 1; \
934 if (src_image->bits.width < REPEAT_NORMAL_MIN_WIDTH) \
938 while (src_width < REPEAT_NORMAL_MIN_WIDTH && src_width <= max_x) \
939 src_width += src_image->bits.width; \
941 need_src_extension = TRUE; \
945 src_width = src_image->bits.width; \
946 need_src_extension = FALSE; \
949 src_width_fixed = pixman_int_to_fixed (src_width); \
952 while (--height >= 0) \
954 int weight1, weight2; \
956 dst_line += dst_stride; \
958 if (flags & FLAG_HAVE_NON_SOLID_MASK) \
961 mask_line += mask_stride; \
964 y1 = pixman_fixed_to_int (vy); \
965 weight2 = pixman_fixed_to_bilinear_weight (vy); \
968 /* both weight1 and weight2 are smaller than BILINEAR_INTERPOLATION_RANGE */ \
970 weight1 = BILINEAR_INTERPOLATION_RANGE - weight2; \
974 /* set both top and bottom row to the same scanline and tweak weights */ \
976 weight1 = weight2 = BILINEAR_INTERPOLATION_RANGE / 2; \
979 if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_PAD) \
981 src_type_t *src1, *src2; \
982 src_type_t buf1[2]; \
983 src_type_t buf2[2]; \
984 repeat (PIXMAN_REPEAT_PAD, &y1, src_image->bits.height); \
985 repeat (PIXMAN_REPEAT_PAD, &y2, src_image->bits.height); \
986 src1 = src_first_line + src_stride * y1; \
987 src2 = src_first_line + src_stride * y2; \
991 buf1[0] = buf1[1] = src1[0]; \
992 buf2[0] = buf2[1] = src2[0]; \
993 scanline_func (dst, mask, \
994 buf1, buf2, left_pad, weight1, weight2, 0, 0, 0, FALSE); \
996 if (flags & FLAG_HAVE_NON_SOLID_MASK) \
1001 scanline_func (dst, mask, \
1002 src1, src2, width, weight1, weight2, vx, unit_x, 0, FALSE); \
1004 if (flags & FLAG_HAVE_NON_SOLID_MASK) \
1007 if (right_pad > 0) \
1009 buf1[0] = buf1[1] = src1[src_image->bits.width - 1]; \
1010 buf2[0] = buf2[1] = src2[src_image->bits.width - 1]; \
1011 scanline_func (dst, mask, \
1012 buf1, buf2, right_pad, weight1, weight2, 0, 0, 0, FALSE); \
1015 else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NONE) \
1017 src_type_t *src1, *src2; \
1018 src_type_t buf1[2]; \
1019 src_type_t buf2[2]; \
1020 /* handle top/bottom zero padding by just setting weights to 0 if needed */ \
1026 if (y1 >= src_image->bits.height) \
1029 y1 = src_image->bits.height - 1; \
1036 if (y2 >= src_image->bits.height) \
1039 y2 = src_image->bits.height - 1; \
1041 src1 = src_first_line + src_stride * y1; \
1042 src2 = src_first_line + src_stride * y2; \
1046 buf1[0] = buf1[1] = 0; \
1047 buf2[0] = buf2[1] = 0; \
1048 scanline_func (dst, mask, \
1049 buf1, buf2, left_pad, weight1, weight2, 0, 0, 0, TRUE); \
1051 if (flags & FLAG_HAVE_NON_SOLID_MASK) \
1057 buf1[1] = src1[0]; \
1059 buf2[1] = src2[0]; \
1060 scanline_func (dst, mask, \
1061 buf1, buf2, left_tz, weight1, weight2, \
1062 pixman_fixed_frac (vx), unit_x, 0, FALSE); \
1064 if (flags & FLAG_HAVE_NON_SOLID_MASK) \
1066 vx += left_tz * unit_x; \
1070 scanline_func (dst, mask, \
1071 src1, src2, width, weight1, weight2, vx, unit_x, 0, FALSE); \
1073 if (flags & FLAG_HAVE_NON_SOLID_MASK) \
1075 vx += width * unit_x; \
1079 buf1[0] = src1[src_image->bits.width - 1]; \
1081 buf2[0] = src2[src_image->bits.width - 1]; \
1083 scanline_func (dst, mask, \
1084 buf1, buf2, right_tz, weight1, weight2, \
1085 pixman_fixed_frac (vx), unit_x, 0, FALSE); \
1087 if (flags & FLAG_HAVE_NON_SOLID_MASK) \
1090 if (right_pad > 0) \
1092 buf1[0] = buf1[1] = 0; \
1093 buf2[0] = buf2[1] = 0; \
1094 scanline_func (dst, mask, \
1095 buf1, buf2, right_pad, weight1, weight2, 0, 0, 0, TRUE); \
1098 else if (PIXMAN_REPEAT_ ## repeat_mode == PIXMAN_REPEAT_NORMAL) \
1100 int32_t num_pixels; \
1101 int32_t width_remain; \
1102 src_type_t * src_line_top; \
1103 src_type_t * src_line_bottom; \
1104 src_type_t buf1[2]; \
1105 src_type_t buf2[2]; \
1106 src_type_t extended_src_line0[REPEAT_NORMAL_MIN_WIDTH*2]; \
1107 src_type_t extended_src_line1[REPEAT_NORMAL_MIN_WIDTH*2]; \
1110 repeat (PIXMAN_REPEAT_NORMAL, &y1, src_image->bits.height); \
1111 repeat (PIXMAN_REPEAT_NORMAL, &y2, src_image->bits.height); \
1112 src_line_top = src_first_line + src_stride * y1; \
1113 src_line_bottom = src_first_line + src_stride * y2; \
1115 if (need_src_extension) \
1117 for (i=0; i<src_width;) \
1119 for (j=0; j<src_image->bits.width; j++, i++) \
1121 extended_src_line0[i] = src_line_top[j]; \
1122 extended_src_line1[i] = src_line_bottom[j]; \
1126 src_line_top = &extended_src_line0[0]; \
1127 src_line_bottom = &extended_src_line1[0]; \
1130 /* Top & Bottom wrap around buffer */ \
1131 buf1[0] = src_line_top[src_width - 1]; \
1132 buf1[1] = src_line_top[0]; \
1133 buf2[0] = src_line_bottom[src_width - 1]; \
1134 buf2[1] = src_line_bottom[0]; \
1136 width_remain = width; \
1138 while (width_remain > 0) \
1140 /* We use src_width_fixed because it can make vx in original source range */ \
1141 repeat (PIXMAN_REPEAT_NORMAL, &vx, src_width_fixed); \
1143 /* Wrap around part */ \
1144 if (pixman_fixed_to_int (vx) == src_width - 1) \
1146 /* for positive unit_x \
1147 * num_pixels = max(n) + 1, where vx + n*unit_x < src_width_fixed \
1149 * vx is in range [0, src_width_fixed - pixman_fixed_e] \
1150 * So we are safe from overflow. \
1152 num_pixels = ((src_width_fixed - vx - pixman_fixed_e) / unit_x) + 1; \
1154 if (num_pixels > width_remain) \
1155 num_pixels = width_remain; \
1157 scanline_func (dst, mask, buf1, buf2, num_pixels, \
1158 weight1, weight2, pixman_fixed_frac(vx), \
1159 unit_x, src_width_fixed, FALSE); \
1161 width_remain -= num_pixels; \
1162 vx += num_pixels * unit_x; \
1163 dst += num_pixels; \
1165 if (flags & FLAG_HAVE_NON_SOLID_MASK) \
1166 mask += num_pixels; \
1168 repeat (PIXMAN_REPEAT_NORMAL, &vx, src_width_fixed); \
1171 /* Normal scanline composite */ \
1172 if (pixman_fixed_to_int (vx) != src_width - 1 && width_remain > 0) \
1174 /* for positive unit_x \
1175 * num_pixels = max(n) + 1, where vx + n*unit_x < (src_width_fixed - 1) \
1177 * vx is in range [0, src_width_fixed - pixman_fixed_e] \
1178 * So we are safe from overflow here. \
1180 num_pixels = ((src_width_fixed - pixman_fixed_1 - vx - pixman_fixed_e) \
1183 if (num_pixels > width_remain) \
1184 num_pixels = width_remain; \
1186 scanline_func (dst, mask, src_line_top, src_line_bottom, num_pixels, \
1187 weight1, weight2, vx, unit_x, src_width_fixed, FALSE); \
1189 width_remain -= num_pixels; \
1190 vx += num_pixels * unit_x; \
1191 dst += num_pixels; \
1193 if (flags & FLAG_HAVE_NON_SOLID_MASK) \
1194 mask += num_pixels; \
1200 scanline_func (dst, mask, src_first_line + src_stride * y1, \
1201 src_first_line + src_stride * y2, width, \
1202 weight1, weight2, vx, unit_x, max_vx, FALSE); \
1207 /* A workaround for old sun studio, see: https://bugs.freedesktop.org/show_bug.cgi?id=32764 */
1208 #define FAST_BILINEAR_MAINLOOP_COMMON(scale_func_name, scanline_func, src_type_t, mask_type_t, \
1209 dst_type_t, repeat_mode, flags) \
1210 FAST_BILINEAR_MAINLOOP_INT(_ ## scale_func_name, scanline_func, src_type_t, mask_type_t,\
1211 dst_type_t, repeat_mode, flags)
1213 #define SCALED_BILINEAR_FLAGS \
1214 (FAST_PATH_SCALE_TRANSFORM | \
1215 FAST_PATH_NO_ALPHA_MAP | \
1216 FAST_PATH_BILINEAR_FILTER | \
1217 FAST_PATH_NO_ACCESSORS | \
1218 FAST_PATH_NARROW_FORMAT)
1220 #define SIMPLE_BILINEAR_FAST_PATH_PAD(op,s,d,func) \
1221 { PIXMAN_OP_ ## op, \
1223 (SCALED_BILINEAR_FLAGS | \
1224 FAST_PATH_PAD_REPEAT | \
1225 FAST_PATH_X_UNIT_POSITIVE), \
1227 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1228 fast_composite_scaled_bilinear_ ## func ## _pad ## _ ## op, \
1231 #define SIMPLE_BILINEAR_FAST_PATH_NONE(op,s,d,func) \
1232 { PIXMAN_OP_ ## op, \
1234 (SCALED_BILINEAR_FLAGS | \
1235 FAST_PATH_NONE_REPEAT | \
1236 FAST_PATH_X_UNIT_POSITIVE), \
1238 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1239 fast_composite_scaled_bilinear_ ## func ## _none ## _ ## op, \
1242 #define SIMPLE_BILINEAR_FAST_PATH_COVER(op,s,d,func) \
1243 { PIXMAN_OP_ ## op, \
1245 SCALED_BILINEAR_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR, \
1247 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1248 fast_composite_scaled_bilinear_ ## func ## _cover ## _ ## op, \
1251 #define SIMPLE_BILINEAR_FAST_PATH_NORMAL(op,s,d,func) \
1252 { PIXMAN_OP_ ## op, \
1254 (SCALED_BILINEAR_FLAGS | \
1255 FAST_PATH_NORMAL_REPEAT | \
1256 FAST_PATH_X_UNIT_POSITIVE), \
1258 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1259 fast_composite_scaled_bilinear_ ## func ## _normal ## _ ## op, \
1262 #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_PAD(op,s,d,func) \
1263 { PIXMAN_OP_ ## op, \
1265 (SCALED_BILINEAR_FLAGS | \
1266 FAST_PATH_PAD_REPEAT | \
1267 FAST_PATH_X_UNIT_POSITIVE), \
1268 PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
1269 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1270 fast_composite_scaled_bilinear_ ## func ## _pad ## _ ## op, \
1273 #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NONE(op,s,d,func) \
1274 { PIXMAN_OP_ ## op, \
1276 (SCALED_BILINEAR_FLAGS | \
1277 FAST_PATH_NONE_REPEAT | \
1278 FAST_PATH_X_UNIT_POSITIVE), \
1279 PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
1280 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1281 fast_composite_scaled_bilinear_ ## func ## _none ## _ ## op, \
1284 #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_COVER(op,s,d,func) \
1285 { PIXMAN_OP_ ## op, \
1287 SCALED_BILINEAR_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR, \
1288 PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
1289 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1290 fast_composite_scaled_bilinear_ ## func ## _cover ## _ ## op, \
1293 #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NORMAL(op,s,d,func) \
1294 { PIXMAN_OP_ ## op, \
1296 (SCALED_BILINEAR_FLAGS | \
1297 FAST_PATH_NORMAL_REPEAT | \
1298 FAST_PATH_X_UNIT_POSITIVE), \
1299 PIXMAN_a8, MASK_FLAGS (a8, FAST_PATH_UNIFIED_ALPHA), \
1300 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1301 fast_composite_scaled_bilinear_ ## func ## _normal ## _ ## op, \
1304 #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_PAD(op,s,d,func) \
1305 { PIXMAN_OP_ ## op, \
1307 (SCALED_BILINEAR_FLAGS | \
1308 FAST_PATH_PAD_REPEAT | \
1309 FAST_PATH_X_UNIT_POSITIVE), \
1310 PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
1311 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1312 fast_composite_scaled_bilinear_ ## func ## _pad ## _ ## op, \
1315 #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NONE(op,s,d,func) \
1316 { PIXMAN_OP_ ## op, \
1318 (SCALED_BILINEAR_FLAGS | \
1319 FAST_PATH_NONE_REPEAT | \
1320 FAST_PATH_X_UNIT_POSITIVE), \
1321 PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
1322 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1323 fast_composite_scaled_bilinear_ ## func ## _none ## _ ## op, \
1326 #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_COVER(op,s,d,func) \
1327 { PIXMAN_OP_ ## op, \
1329 SCALED_BILINEAR_FLAGS | FAST_PATH_SAMPLES_COVER_CLIP_BILINEAR, \
1330 PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
1331 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1332 fast_composite_scaled_bilinear_ ## func ## _cover ## _ ## op, \
1335 #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NORMAL(op,s,d,func) \
1336 { PIXMAN_OP_ ## op, \
1338 (SCALED_BILINEAR_FLAGS | \
1339 FAST_PATH_NORMAL_REPEAT | \
1340 FAST_PATH_X_UNIT_POSITIVE), \
1341 PIXMAN_solid, MASK_FLAGS (solid, FAST_PATH_UNIFIED_ALPHA), \
1342 PIXMAN_ ## d, FAST_PATH_STD_DEST_FLAGS, \
1343 fast_composite_scaled_bilinear_ ## func ## _normal ## _ ## op, \
1346 /* Prefer the use of 'cover' variant, because it is faster */
1347 #define SIMPLE_BILINEAR_FAST_PATH(op,s,d,func) \
1348 SIMPLE_BILINEAR_FAST_PATH_COVER (op,s,d,func), \
1349 SIMPLE_BILINEAR_FAST_PATH_NONE (op,s,d,func), \
1350 SIMPLE_BILINEAR_FAST_PATH_PAD (op,s,d,func), \
1351 SIMPLE_BILINEAR_FAST_PATH_NORMAL (op,s,d,func)
1353 #define SIMPLE_BILINEAR_A8_MASK_FAST_PATH(op,s,d,func) \
1354 SIMPLE_BILINEAR_A8_MASK_FAST_PATH_COVER (op,s,d,func), \
1355 SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NONE (op,s,d,func), \
1356 SIMPLE_BILINEAR_A8_MASK_FAST_PATH_PAD (op,s,d,func), \
1357 SIMPLE_BILINEAR_A8_MASK_FAST_PATH_NORMAL (op,s,d,func)
1359 #define SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH(op,s,d,func) \
1360 SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_COVER (op,s,d,func), \
1361 SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NONE (op,s,d,func), \
1362 SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_PAD (op,s,d,func), \
1363 SIMPLE_BILINEAR_SOLID_MASK_FAST_PATH_NORMAL (op,s,d,func)