2 * Copyright © 2009 Red Hat, Inc.
3 * Copyright © 2000 SuSE, Inc.
4 * Copyright © 2007 Red Hat, Inc.
5 * Copyright © 2000 Keith Packard, member of The XFree86 Project, Inc.
6 * 2005 Lars Knoll & Zack Rusin, Trolltech
7 * 2008 Aaron Plattner, NVIDIA Corporation
9 * Permission to use, copy, modify, distribute, and sell this software and its
10 * documentation for any purpose is hereby granted without fee, provided that
11 * the above copyright notice appear in all copies and that both that
12 * copyright notice and this permission notice appear in supporting
13 * documentation, and that the name of Red Hat not be used in advertising or
14 * publicity pertaining to distribution of the software without specific,
15 * written prior permission. Red Hat makes no representations about the
16 * suitability of this software for any purpose. It is provided "as is"
17 * without express or implied warranty.
19 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
20 * SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
21 * FITNESS, IN NO EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY
22 * SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
23 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
24 * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING
25 * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
38 #include "pixman-private.h"
39 #include "pixman-combine32.h"
40 #include "pixman-private.h"
42 #define SCANLINE_BUFFER_LENGTH 8192
45 general_composite_rect (pixman_implementation_t *imp,
48 pixman_image_t * mask,
49 pixman_image_t * dest,
59 uint8_t stack_scanline_buffer[SCANLINE_BUFFER_LENGTH * 3];
60 uint8_t *scanline_buffer = stack_scanline_buffer;
61 uint8_t *src_buffer, *mask_buffer, *dest_buffer;
62 fetch_scanline_t fetch_src = NULL, fetch_mask = NULL, fetch_dest = NULL;
63 pixman_combine_32_func_t compose;
64 store_scanline_t store;
65 source_image_class_t src_class, mask_class;
66 pixman_bool_t component_alpha;
73 (src->common.flags & FAST_PATH_NARROW_FORMAT) &&
74 (!mask || mask->common.flags & FAST_PATH_NARROW_FORMAT) &&
75 (dest->common.flags & FAST_PATH_NARROW_FORMAT);
78 if (width * Bpp > SCANLINE_BUFFER_LENGTH)
80 scanline_buffer = pixman_malloc_abc (width, 3, Bpp);
86 src_buffer = scanline_buffer;
87 mask_buffer = src_buffer + width * Bpp;
88 dest_buffer = mask_buffer + width * Bpp;
90 src_class = _pixman_image_classify (src,
94 mask_class = SOURCE_IMAGE_CLASS_UNKNOWN;
98 mask_class = _pixman_image_classify (mask,
103 if (op == PIXMAN_OP_CLEAR)
106 fetch_src = _pixman_image_get_scanline_32;
108 fetch_src = _pixman_image_get_scanline_64;
110 if (!mask || op == PIXMAN_OP_CLEAR)
113 fetch_mask = _pixman_image_get_scanline_32;
115 fetch_mask = _pixman_image_get_scanline_64;
117 if (op == PIXMAN_OP_CLEAR || op == PIXMAN_OP_SRC)
120 fetch_dest = _pixman_image_get_scanline_32;
122 fetch_dest = _pixman_image_get_scanline_64;
125 store = _pixman_image_store_scanline_32;
127 store = _pixman_image_store_scanline_64;
129 /* Skip the store step and composite directly into the
130 * destination if the output format of the compose func matches
131 * the destination format.
133 * If the destination format is a8r8g8b8 then we can always do
134 * this. If it is x8r8g8b8, then we can only do it if the
135 * operator doesn't make use of destination alpha.
137 if ((dest->bits.format == PIXMAN_a8r8g8b8) ||
138 (dest->bits.format == PIXMAN_x8r8g8b8 &&
139 (op == PIXMAN_OP_OVER ||
140 op == PIXMAN_OP_ADD ||
141 op == PIXMAN_OP_SRC ||
142 op == PIXMAN_OP_CLEAR ||
143 op == PIXMAN_OP_IN_REVERSE ||
144 op == PIXMAN_OP_OUT_REVERSE ||
145 op == PIXMAN_OP_DST)))
148 !dest->common.alpha_map &&
149 !dest->bits.write_func)
157 bits = dest->bits.bits;
158 stride = dest->bits.rowstride;
170 mask->common.type == BITS &&
171 mask->common.component_alpha &&
172 PIXMAN_FORMAT_RGB (mask->bits.format);
177 compose = _pixman_implementation_combine_32_ca;
179 compose = _pixman_implementation_combine_32;
184 compose = (pixman_combine_32_func_t)_pixman_implementation_combine_64_ca;
186 compose = (pixman_combine_32_func_t)_pixman_implementation_combine_64;
195 for (i = 0; i < height; ++i)
197 /* fill first half of scanline with source */
202 /* fetch mask before source so that fetching of
203 source can be optimized */
204 fetch_mask (mask, mask_x, mask_y + i,
205 width, (void *)mask_buffer, 0);
207 if (mask_class == SOURCE_IMAGE_CLASS_HORIZONTAL)
211 if (src_class == SOURCE_IMAGE_CLASS_HORIZONTAL)
213 fetch_src (src, src_x, src_y + i,
214 width, (void *)src_buffer, 0);
219 fetch_src (src, src_x, src_y + i,
220 width, (void *)src_buffer, (void *)mask_buffer);
225 fetch_mask (mask, mask_x, mask_y + i,
226 width, (void *)mask_buffer, 0);
231 /* fill dest into second half of scanline */
234 fetch_dest (dest, dest_x, dest_y + i,
235 width, (void *)dest_buffer, 0);
239 compose (imp->toplevel, op,
246 store (&(dest->bits), dest_x, dest_y + i, width,
247 (void *)dest_buffer);
252 compose (imp->toplevel, op,
253 bits + (dest_y + i) * stride + dest_x,
254 (void *)src_buffer, (void *)mask_buffer, width);
258 if (scanline_buffer != stack_scanline_buffer)
259 free (scanline_buffer);
262 static const pixman_fast_path_t general_fast_path[] =
264 { PIXMAN_OP_any, PIXMAN_any, 0, PIXMAN_any, 0, PIXMAN_any, 0, general_composite_rect },
269 general_blt (pixman_implementation_t *imp,
283 /* We can't blit unless we have sse2 or mmx */
289 general_fill (pixman_implementation_t *imp,
302 pixman_implementation_t *
303 _pixman_implementation_create_general (void)
305 pixman_implementation_t *imp = _pixman_implementation_create (NULL, general_fast_path);
307 _pixman_setup_combiner_functions_32 (imp);
308 _pixman_setup_combiner_functions_64 (imp);
310 imp->blt = general_blt;
311 imp->fill = general_fill;