2 * Copyright © 2000 SuSE, Inc.
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that
7 * copyright notice and this permission notice appear in supporting
8 * documentation, and that the name of SuSE not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. SuSE makes no representations about the
11 * suitability of this software for any purpose. It is provided "as is"
12 * without express or implied warranty.
14 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
16 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
18 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
19 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 * Author: Keith Packard, SuSE, Inc.
30 #include "pixman-private.h"
31 #include "pixman-mmx.h"
32 #include "pixman-sse2.h"
35 pixman_fill8 (uint32_t *bits,
43 int byte_stride = stride * (int) sizeof (uint32_t);
44 uint8_t *dst = (uint8_t *) bits;
45 uint8_t v = xor & 0xff;
48 dst = dst + y * byte_stride + x;
52 for (i = 0; i < width; ++i)
60 pixman_fill16 (uint32_t *bits,
68 int short_stride = (stride * (int) sizeof (uint32_t)) / (int) sizeof (uint16_t);
69 uint16_t *dst = (uint16_t *)bits;
70 uint16_t v = xor & 0xffff;
73 dst = dst + y * short_stride + x;
77 for (i = 0; i < width; ++i)
85 pixman_fill32 (uint32_t *bits,
95 bits = bits + y * stride + x;
99 for (i = 0; i < width; ++i)
106 #if defined(USE_SSE2) && defined(__GNUC__) && !defined(__x86_64__) && !defined(__amd64__)
107 __attribute__((__force_align_arg_pointer__))
109 PIXMAN_EXPORT pixman_bool_t
110 pixman_fill (uint32_t *bits,
120 printf ("filling: %d %d %d %d (stride: %d, bpp: %d) pixel: %x\n",
121 x, y, width, height, stride, bpp, xor);
125 if (pixman_have_sse2() && pixmanFillsse2 (bits, stride, bpp, x, y, width, height, xor))
130 if (pixman_have_mmx() && pixman_fill_mmx (bits, stride, bpp, x, y, width, height, xor))
137 pixman_fill8 (bits, stride, x, y, width, height, xor);
141 pixman_fill16 (bits, stride, x, y, width, height, xor);
145 pixman_fill32 (bits, stride, x, y, width, height, xor);
158 * Compute the smallest value no less than y which is on a
162 PIXMAN_EXPORT pixman_fixed_t
163 pixman_sample_ceil_y (pixman_fixed_t y, int n)
165 pixman_fixed_t f = pixman_fixed_frac(y);
166 pixman_fixed_t i = pixman_fixed_floor(y);
168 f = ((f + Y_FRAC_FIRST(n)) / STEP_Y_SMALL(n)) * STEP_Y_SMALL(n) + Y_FRAC_FIRST(n);
169 if (f > Y_FRAC_LAST(n))
171 if (pixman_fixed_to_int(i) == 0x7fff)
173 f = 0xffff; /* saturate */
182 #define _div(a,b) ((a) >= 0 ? (a) / (b) : -((-(a) + (b) - 1) / (b)))
185 * Compute the largest value no greater than y which is on a
188 PIXMAN_EXPORT pixman_fixed_t
189 pixman_sample_floor_y (pixman_fixed_t y, int n)
191 pixman_fixed_t f = pixman_fixed_frac(y);
192 pixman_fixed_t i = pixman_fixed_floor (y);
194 f = _div(f - Y_FRAC_FIRST(n), STEP_Y_SMALL(n)) * STEP_Y_SMALL(n) + Y_FRAC_FIRST(n);
195 if (f < Y_FRAC_FIRST(n))
197 if (pixman_fixed_to_int(i) == 0x8000)
199 f = 0; /* saturate */
209 * Step an edge by any amount (including negative values)
212 pixman_edge_step (pixman_edge_t *e, int n)
214 pixman_fixed_48_16_t ne;
216 e->x += n * e->stepx;
218 ne = e->e + n * (pixman_fixed_48_16_t) e->dx;
224 int nx = (ne + e->dy - 1) / e->dy;
225 e->e = ne - nx * (pixman_fixed_48_16_t) e->dy;
226 e->x += nx * e->signdx;
233 int nx = (-ne) / e->dy;
234 e->e = ne + nx * (pixman_fixed_48_16_t) e->dy;
235 e->x -= nx * e->signdx;
241 * A private routine to initialize the multi-step
242 * elements of an edge structure
245 _pixman_edge_multi_init (pixman_edge_t *e, int n, pixman_fixed_t *stepx_p, pixman_fixed_t *dx_p)
247 pixman_fixed_t stepx;
248 pixman_fixed_48_16_t ne;
250 ne = n * (pixman_fixed_48_16_t) e->dx;
251 stepx = n * e->stepx;
256 stepx += nx * e->signdx;
263 * Initialize one edge structure given the line endpoints and a
267 pixman_edge_init (pixman_edge_t *e,
269 pixman_fixed_t y_start,
270 pixman_fixed_t x_top,
271 pixman_fixed_t y_top,
272 pixman_fixed_t x_bot,
273 pixman_fixed_t y_bot)
275 pixman_fixed_t dx, dy;
295 e->stepx = -(-dx / dy);
300 _pixman_edge_multi_init (e, STEP_Y_SMALL(n), &e->stepx_small, &e->dx_small);
301 _pixman_edge_multi_init (e, STEP_Y_BIG(n), &e->stepx_big, &e->dx_big);
303 pixman_edge_step (e, y_start - y_top);
307 * Initialize one edge structure given a line, starting y value
308 * and a pixel offset for the line
311 pixman_line_fixed_edge_init (pixman_edge_t *e,
314 const pixman_line_fixed_t *line,
318 pixman_fixed_t x_off_fixed = pixman_int_to_fixed(x_off);
319 pixman_fixed_t y_off_fixed = pixman_int_to_fixed(y_off);
320 const pixman_point_fixed_t *top, *bot;
322 if (line->p1.y <= line->p2.y)
332 pixman_edge_init (e, n, y,
333 top->x + x_off_fixed,
334 top->y + y_off_fixed,
335 bot->x + x_off_fixed,
336 bot->y + y_off_fixed);
340 pixman_multiply_overflows_int (unsigned int a,
343 return a >= INT32_MAX / b;
347 pixman_addition_overflows_int (unsigned int a,
350 return a > INT32_MAX - b;
354 pixman_malloc_ab(unsigned int a,
357 if (a >= INT32_MAX / b)
360 return malloc (a * b);
364 pixman_malloc_abc (unsigned int a,
368 if (a >= INT32_MAX / b)
370 else if (a * b >= INT32_MAX / c)
373 return malloc (a * b * c);
380 * Returns the version of the pixman library encoded in a single
381 * integer as per %PIXMAN_VERSION_ENCODE. The encoding ensures that
382 * later versions compare greater than earlier versions.
384 * A run-time comparison to check that pixman's version is greater than
385 * or equal to version X.Y.Z could be performed as follows:
387 * <informalexample><programlisting>
388 * if (pixman_version() >= PIXMAN_VERSION_ENCODE(X,Y,Z)) {...}
389 * </programlisting></informalexample>
391 * See also pixman_version_string() as well as the compile-time
392 * equivalents %PIXMAN_VERSION and %PIXMAN_VERSION_STRING.
394 * Return value: the encoded version.
397 pixman_version (void)
399 return PIXMAN_VERSION;
403 * pixman_version_string:
405 * Returns the version of the pixman library as a human-readable string
406 * of the form "X.Y.Z".
408 * See also pixman_version() as well as the compile-time equivalents
409 * %PIXMAN_VERSION_STRING and %PIXMAN_VERSION.
411 * Return value: a string containing the version.
413 PIXMAN_EXPORT const char*
414 pixman_version_string (void)
416 return PIXMAN_VERSION_STRING;
420 * pixman_format_supported_destination:
421 * @format: A pixman_format_code_t format
423 * Return value: whether the provided format code is a supported
424 * format for a pixman surface used as a destination in
427 * Currently, all pixman_format_code_t values are supported
428 * except for the YUV formats.
430 PIXMAN_EXPORT pixman_bool_t
431 pixman_format_supported_destination (pixman_format_code_t format)
435 case PIXMAN_a2b10g10r10:
436 case PIXMAN_x2b10g10r10:
437 case PIXMAN_a8r8g8b8:
438 case PIXMAN_x8r8g8b8:
439 case PIXMAN_a8b8g8r8:
440 case PIXMAN_x8b8g8r8:
441 case PIXMAN_b8g8r8a8:
442 case PIXMAN_b8g8r8x8:
448 case PIXMAN_a1r5g5b5:
449 case PIXMAN_x1r5g5b5:
450 case PIXMAN_a1b5g5r5:
451 case PIXMAN_x1b5g5r5:
452 case PIXMAN_a4r4g4b4:
453 case PIXMAN_x4r4g4b4:
454 case PIXMAN_a4b4g4r4:
455 case PIXMAN_x4b4g4r4:
460 case PIXMAN_a2r2g2b2:
461 case PIXMAN_a2b2g2r2:
465 /* Collides with PIXMAN_c8
468 /* Collides with PIXMAN_g8
475 case PIXMAN_a1r1g1b1:
476 case PIXMAN_a1b1g1r1:
493 * pixman_format_supported_source:
494 * @format: A pixman_format_code_t format
496 * Return value: whether the provided format code is a supported
497 * format for a pixman surface used as a source in
500 * Currently, all pixman_format_code_t values are supported.
502 PIXMAN_EXPORT pixman_bool_t
503 pixman_format_supported_source (pixman_format_code_t format)
507 case PIXMAN_a2b10g10r10:
508 case PIXMAN_x2b10g10r10:
509 case PIXMAN_a8r8g8b8:
510 case PIXMAN_x8r8g8b8:
511 case PIXMAN_a8b8g8r8:
512 case PIXMAN_x8b8g8r8:
513 case PIXMAN_b8g8r8a8:
514 case PIXMAN_b8g8r8x8:
520 case PIXMAN_a1r5g5b5:
521 case PIXMAN_x1r5g5b5:
522 case PIXMAN_a1b5g5r5:
523 case PIXMAN_x1b5g5r5:
524 case PIXMAN_a4r4g4b4:
525 case PIXMAN_x4r4g4b4:
526 case PIXMAN_a4b4g4r4:
527 case PIXMAN_x4b4g4r4:
532 case PIXMAN_a2r2g2b2:
533 case PIXMAN_a2b2g2r2:
537 /* Collides with PIXMAN_c8
540 /* Collides with PIXMAN_g8
547 case PIXMAN_a1r1g1b1:
548 case PIXMAN_a1b1g1r1:
565 _pixman_walk_composite_region (pixman_implementation_t *imp,
567 pixman_image_t * pSrc,
568 pixman_image_t * pMask,
569 pixman_image_t * pDst,
578 pixman_bool_t srcRepeat,
579 pixman_bool_t maskRepeat,
580 pixman_composite_func_t compositeRect)
583 const pixman_box32_t *pbox;
584 int w, h, w_this, h_this;
585 int x_msk, y_msk, x_src, y_src, x_dst, y_dst;
586 pixman_region32_t reg;
587 pixman_region32_t *region;
589 pixman_region32_init (®);
590 if (!pixman_compute_composite_region32 (®, pSrc, pMask, pDst,
591 xSrc, ySrc, xMask, yMask, xDst, yDst, width, height))
598 pbox = pixman_region32_rectangles (region, &n);
601 h = pbox->y2 - pbox->y1;
602 y_src = pbox->y1 - yDst + ySrc;
603 y_msk = pbox->y1 - yDst + yMask;
608 w = pbox->x2 - pbox->x1;
609 x_src = pbox->x1 - xDst + xSrc;
610 x_msk = pbox->x1 - xDst + xMask;
614 y_msk = MOD (y_msk, pMask->bits.height);
615 if (h_this > pMask->bits.height - y_msk)
616 h_this = pMask->bits.height - y_msk;
620 y_src = MOD (y_src, pSrc->bits.height);
621 if (h_this > pSrc->bits.height - y_src)
622 h_this = pSrc->bits.height - y_src;
629 x_msk = MOD (x_msk, pMask->bits.width);
630 if (w_this > pMask->bits.width - x_msk)
631 w_this = pMask->bits.width - x_msk;
635 x_src = MOD (x_src, pSrc->bits.width);
636 if (w_this > pSrc->bits.width - x_src)
637 w_this = pSrc->bits.width - x_src;
639 (*compositeRect) (imp,
640 op, pSrc, pMask, pDst,
641 x_src, y_src, x_msk, y_msk, x_dst, y_dst,
655 pixman_region32_fini (®);
659 mask_is_solid (pixman_image_t *mask)
661 if (mask->type == SOLID)
664 if (mask->type == BITS &&
665 mask->common.repeat == PIXMAN_REPEAT_NORMAL &&
666 mask->bits.width == 1 &&
667 mask->bits.height == 1)
675 static const FastPathInfo *
676 get_fast_path (const FastPathInfo *fast_paths,
678 pixman_image_t *pSrc,
679 pixman_image_t *pMask,
680 pixman_image_t *pDst,
681 pixman_bool_t is_pixbuf)
683 const FastPathInfo *info;
685 for (info = fast_paths; info->op != PIXMAN_OP_NONE; info++)
687 pixman_bool_t valid_src = FALSE;
688 pixman_bool_t valid_mask = FALSE;
693 if ((info->src_format == PIXMAN_solid && pixman_image_can_get_solid (pSrc)) ||
694 (pSrc->type == BITS && info->src_format == pSrc->bits.format))
702 if ((info->mask_format == PIXMAN_null && !pMask) ||
703 (pMask && pMask->type == BITS && info->mask_format == pMask->bits.format))
707 if (info->flags & NEED_SOLID_MASK)
709 if (!pMask || !mask_is_solid (pMask))
713 if (info->flags & NEED_COMPONENT_ALPHA)
715 if (!pMask || !pMask->common.component_alpha)
723 if (info->dest_format != pDst->bits.format)
726 if ((info->flags & NEED_PIXBUF) && !is_pixbuf)
736 _pixman_run_fast_path (const FastPathInfo *paths,
737 pixman_implementation_t *imp,
740 pixman_image_t *mask,
741 pixman_image_t *dest,
751 pixman_composite_func_t func = NULL;
752 pixman_bool_t src_repeat = src->common.repeat == PIXMAN_REPEAT_NORMAL;
753 pixman_bool_t mask_repeat = mask && mask->common.repeat == PIXMAN_REPEAT_NORMAL;
755 if ((src->type == BITS || pixman_image_can_get_solid (src)) &&
756 (!mask || mask->type == BITS)
757 && !src->common.transform && !(mask && mask->common.transform)
758 && !(mask && mask->common.alpha_map) && !src->common.alpha_map && !dest->common.alpha_map
759 && (src->common.filter != PIXMAN_FILTER_CONVOLUTION)
760 && (src->common.repeat != PIXMAN_REPEAT_PAD)
761 && (src->common.repeat != PIXMAN_REPEAT_REFLECT)
762 && (!mask || (mask->common.filter != PIXMAN_FILTER_CONVOLUTION &&
763 mask->common.repeat != PIXMAN_REPEAT_PAD &&
764 mask->common.repeat != PIXMAN_REPEAT_REFLECT))
765 && !src->common.read_func && !src->common.write_func
766 && !(mask && mask->common.read_func)
767 && !(mask && mask->common.write_func)
768 && !dest->common.read_func
769 && !dest->common.write_func)
771 const FastPathInfo *info;
772 pixman_bool_t pixbuf;
775 src && src->type == BITS &&
776 mask && mask->type == BITS &&
777 src->bits.bits == mask->bits.bits &&
780 !mask->common.component_alpha &&
783 info = get_fast_path (paths, op, src, mask, dest, pixbuf);
789 if (info->src_format == PIXMAN_solid)
792 if (info->mask_format == PIXMAN_solid || info->flags & NEED_SOLID_MASK)
796 src->bits.width == 1 &&
797 src->bits.height == 1) ||
799 mask->bits.width == 1 &&
800 mask->bits.height == 1))
802 /* If src or mask are repeating 1x1 images and src_repeat or
803 * mask_repeat are still TRUE, it means the fast path we
804 * selected does not actually handle repeating images.
806 * So rather than call the "fast path" with a zillion
807 * 1x1 requests, we just fall back to the general code (which
808 * does do something sensible with 1x1 repeating images).
817 _pixman_walk_composite_region (imp, op,
819 src_x, src_y, mask_x, mask_y,
822 src_repeat, mask_repeat,