2 * Copyright © 2000 SuSE, Inc.
3 * Copyright © 1999 Keith Packard
5 * Permission to use, copy, modify, distribute, and sell this software and its
6 * documentation for any purpose is hereby granted without fee, provided that
7 * the above copyright notice appear in all copies and that both that
8 * copyright notice and this permission notice appear in supporting
9 * documentation, and that the name of SuSE not be used in advertising or
10 * publicity pertaining to distribution of the software without specific,
11 * written prior permission. SuSE makes no representations about the
12 * suitability of this software for any purpose. It is provided "as is"
13 * without express or implied warranty.
15 * SuSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL SuSE
17 * BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
18 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
19 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
20 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 * Author: Keith Packard, SuSE, Inc.
31 #include "pixman-private.h"
33 #define N_CACHED_FAST_PATHS 8
39 pixman_implementation_t * imp;
40 pixman_fast_path_t fast_path;
41 } cache [N_CACHED_FAST_PATHS];
44 PIXMAN_DEFINE_THREAD_LOCAL (cache_t, fast_path_cache);
47 _pixman_lookup_composite_function (pixman_implementation_t *toplevel,
49 pixman_format_code_t src_format,
51 pixman_format_code_t mask_format,
53 pixman_format_code_t dest_format,
55 pixman_implementation_t **out_imp,
56 pixman_composite_func_t *out_func)
58 pixman_implementation_t *imp;
62 /* Check cache for fast paths */
63 cache = PIXMAN_GET_THREAD_LOCAL (fast_path_cache);
65 for (i = 0; i < N_CACHED_FAST_PATHS; ++i)
67 const pixman_fast_path_t *info = &(cache->cache[i].fast_path);
69 /* Note that we check for equality here, not whether
70 * the cached fast path matches. This is to prevent
71 * us from selecting an overly general fast path
72 * when a more specific one would work.
75 info->src_format == src_format &&
76 info->mask_format == mask_format &&
77 info->dest_format == dest_format &&
78 info->src_flags == src_flags &&
79 info->mask_flags == mask_flags &&
80 info->dest_flags == dest_flags &&
83 *out_imp = cache->cache[i].imp;
84 *out_func = cache->cache[i].fast_path.func;
90 for (imp = toplevel; imp != NULL; imp = imp->delegate)
92 const pixman_fast_path_t *info = imp->fast_paths;
94 while (info->op != PIXMAN_OP_NONE)
96 if ((info->op == op || info->op == PIXMAN_OP_any) &&
98 ((info->src_format == src_format) ||
99 (info->src_format == PIXMAN_any)) &&
100 ((info->mask_format == mask_format) ||
101 (info->mask_format == PIXMAN_any)) &&
102 ((info->dest_format == dest_format) ||
103 (info->dest_format == PIXMAN_any)) &&
105 (info->src_flags & src_flags) == info->src_flags &&
106 (info->mask_flags & mask_flags) == info->mask_flags &&
107 (info->dest_flags & dest_flags) == info->dest_flags)
110 *out_func = info->func;
112 /* Set i to the last spot in the cache so that the
113 * move-to-front code below will work
115 i = N_CACHED_FAST_PATHS - 1;
129 cache->cache[i + 1] = cache->cache[i];
131 cache->cache[0].imp = *out_imp;
132 cache->cache[0].fast_path.op = op;
133 cache->cache[0].fast_path.src_format = src_format;
134 cache->cache[0].fast_path.src_flags = src_flags;
135 cache->cache[0].fast_path.mask_format = mask_format;
136 cache->cache[0].fast_path.mask_flags = mask_flags;
137 cache->cache[0].fast_path.dest_format = dest_format;
138 cache->cache[0].fast_path.dest_flags = dest_flags;
139 cache->cache[0].fast_path.func = *out_func;
146 _pixman_multiply_overflows_size (size_t a, size_t b)
148 return a >= SIZE_MAX / b;
152 _pixman_multiply_overflows_int (unsigned int a, unsigned int b)
154 return a >= INT32_MAX / b;
158 _pixman_addition_overflows_int (unsigned int a, unsigned int b)
160 return a > INT32_MAX - b;
164 pixman_malloc_ab (unsigned int a,
167 if (a >= INT32_MAX / b)
170 return malloc (a * b);
174 pixman_malloc_abc (unsigned int a,
178 if (a >= INT32_MAX / b)
180 else if (a * b >= INT32_MAX / c)
183 return malloc (a * b * c);
187 * This function expands images from ARGB8 format to ARGB16. To preserve
188 * precision, it needs to know the original source format. For example, if the
189 * source was PIXMAN_x1r5g5b5 and the red component contained bits 12345, then
190 * the expanded value is 12345123. To correctly expand this to 16 bits, it
191 * should be 1234512345123451 and not 1234512312345123.
194 pixman_expand (uint64_t * dst,
195 const uint32_t * src,
196 pixman_format_code_t format,
200 * Determine the sizes of each component and the masks and shifts
201 * required to extract them from the source pixel.
203 const int a_size = PIXMAN_FORMAT_A (format),
204 r_size = PIXMAN_FORMAT_R (format),
205 g_size = PIXMAN_FORMAT_G (format),
206 b_size = PIXMAN_FORMAT_B (format);
207 const int a_shift = 32 - a_size,
208 r_shift = 24 - r_size,
209 g_shift = 16 - g_size,
210 b_shift = 8 - b_size;
211 const uint8_t a_mask = ~(~0 << a_size),
212 r_mask = ~(~0 << r_size),
213 g_mask = ~(~0 << g_size),
214 b_mask = ~(~0 << b_size);
217 /* Start at the end so that we can do the expansion in place
220 for (i = width - 1; i >= 0; i--)
222 const uint32_t pixel = src[i];
223 const uint8_t a = (pixel >> a_shift) & a_mask,
224 r = (pixel >> r_shift) & r_mask,
225 g = (pixel >> g_shift) & g_mask,
226 b = (pixel >> b_shift) & b_mask;
228 a16 = a_size ? unorm_to_unorm (a, a_size, 16) : 0xffff,
229 r16 = unorm_to_unorm (r, r_size, 16),
230 g16 = unorm_to_unorm (g, g_size, 16),
231 b16 = unorm_to_unorm (b, b_size, 16);
233 dst[i] = a16 << 48 | r16 << 32 | g16 << 16 | b16;
238 * Contracting is easier than expanding. We just need to truncate the
242 pixman_contract (uint32_t * dst,
248 /* Start at the beginning so that we can do the contraction in
249 * place when src == dst
251 for (i = 0; i < width; i++)
253 const uint8_t a = src[i] >> 56,
258 dst[i] = a << 24 | r << 16 | g << 8 | b;
263 _pixman_iter_get_scanline_noop (pixman_iter_t *iter, const uint32_t *mask)
268 #define N_TMP_BOXES (16)
271 pixman_region16_copy_from_region32 (pixman_region16_t *dst,
272 pixman_region32_t *src)
275 pixman_box32_t *boxes32;
276 pixman_box16_t *boxes16;
277 pixman_bool_t retval;
279 boxes32 = pixman_region32_rectangles (src, &n_boxes);
281 boxes16 = pixman_malloc_ab (n_boxes, sizeof (pixman_box16_t));
286 for (i = 0; i < n_boxes; ++i)
288 boxes16[i].x1 = boxes32[i].x1;
289 boxes16[i].y1 = boxes32[i].y1;
290 boxes16[i].x2 = boxes32[i].x2;
291 boxes16[i].y2 = boxes32[i].y2;
294 pixman_region_fini (dst);
295 retval = pixman_region_init_rects (dst, boxes16, n_boxes);
301 pixman_region32_copy_from_region16 (pixman_region32_t *dst,
302 pixman_region16_t *src)
305 pixman_box16_t *boxes16;
306 pixman_box32_t *boxes32;
307 pixman_box32_t tmp_boxes[N_TMP_BOXES];
308 pixman_bool_t retval;
310 boxes16 = pixman_region_rectangles (src, &n_boxes);
312 if (n_boxes > N_TMP_BOXES)
313 boxes32 = pixman_malloc_ab (n_boxes, sizeof (pixman_box32_t));
320 for (i = 0; i < n_boxes; ++i)
322 boxes32[i].x1 = boxes16[i].x1;
323 boxes32[i].y1 = boxes16[i].y1;
324 boxes32[i].x2 = boxes16[i].x2;
325 boxes32[i].y2 = boxes16[i].y2;
328 pixman_region32_fini (dst);
329 retval = pixman_region32_init_rects (dst, boxes32, n_boxes);
331 if (boxes32 != tmp_boxes)
340 _pixman_log_error (const char *function, const char *message)
342 static int n_messages = 0;
349 "Set a breakpoint on '_pixman_log_error' to debug\n\n",