1 /* -*- Mode: c; tab-width: 8; c-basic-offset: 4; indent-tabs-mode: t; -*- */
2 /* glitter-paths - polygon scan converter
4 * Copyright (c) 2008 M Joonas Pihlaja
5 * Copyright (c) 2007 David Turner
7 * Permission is hereby granted, free of charge, to any person
8 * obtaining a copy of this software and associated documentation
9 * files (the "Software"), to deal in the Software without
10 * restriction, including without limitation the rights to use,
11 * copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following
16 * The above copyright notice and this permission notice shall be
17 * included in all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
21 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
23 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
24 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
28 /* This is the Glitter paths scan converter incorporated into cairo.
29 * The source is from commit 734c53237a867a773640bd5b64816249fa1730f8
32 * http://gitweb.freedesktop.org/?p=users/joonas/glitter-paths
34 /* Glitter-paths is a stand alone polygon rasteriser derived from
35 * David Turner's reimplementation of Tor Anderssons's 15x17
36 * supersampling rasteriser from the Apparition graphics library. The
37 * main new feature here is cheaply choosing per-scan line between
38 * doing fully analytical coverage computation for an entire row at a
39 * time vs. using a supersampling approach.
41 * David Turner's code can be found at
43 * http://david.freetype.org/rasterizer-shootout/raster-comparison-20070813.tar.bz2
45 * In particular this file incorporates large parts of ftgrays_tor10.h
46 * from raster-comparison-20070813.tar.bz2
50 * A scan converter's basic purpose to take polygon edges and convert
51 * them into an RLE compressed A8 mask. This one works in two phases:
52 * gathering edges and generating spans.
54 * 1) As the user feeds the scan converter edges they are vertically
55 * clipped and bucketted into a _polygon_ data structure. The edges
56 * are also snapped from the user's coordinates to the subpixel grid
57 * coordinates used during scan conversion.
65 * 2) Generating spans works by performing a vertical sweep of pixel
66 * rows from top to bottom and maintaining an _active_list_ of edges
67 * that intersect the row. From the active list the fill rule
68 * determines which edges are the left and right edges of the start of
69 * each span, and their contribution is then accumulated into a pixel
70 * coverage list (_cell_list_) as coverage deltas. Once the coverage
71 * deltas of all edges are known we can form spans of constant pixel
72 * coverage by summing the deltas during a traversal of the cell list.
73 * At the end of a pixel row the cell list is sent to a coverage
74 * blitter for rendering to some target surface.
76 * The pixel coverages are computed by either supersampling the row
77 * and box filtering a mono rasterisation, or by computing the exact
78 * coverages of edges in the active list. The supersampling method is
79 * used whenever some edge starts or stops within the row or there are
80 * edge intersections in the row.
82 * polygon bucket for \
85 * | activate new edges | Repeat GRID_Y times if we
86 * V \ are supersampling this row,
87 * active list / or just once if we're computing
88 * | | analytical coverage.
91 * pixel coverage list /
97 #include "cairo-spans-private.h"
98 #include "cairo-error-private.h"
105 /*-------------------------------------------------------------------------
106 * cairo specific config
110 /* Prefer cairo's status type. */
111 #define GLITTER_HAVE_STATUS_T 1
112 #define GLITTER_STATUS_SUCCESS CAIRO_STATUS_SUCCESS
113 #define GLITTER_STATUS_NO_MEMORY CAIRO_STATUS_NO_MEMORY
114 typedef cairo_status_t glitter_status_t;
116 /* The input coordinate scale and the rasterisation grid scales. */
117 #define GLITTER_INPUT_BITS CAIRO_FIXED_FRAC_BITS
118 //#define GRID_X_BITS CAIRO_FIXED_FRAC_BITS
120 #define GRID_X_BITS 2
121 #define GRID_Y_BITS 2
123 /* Set glitter up to use a cairo span renderer to do the coverage
128 /*-------------------------------------------------------------------------
132 /* "Input scaled" numbers are fixed precision reals with multiplier
133 * 2**GLITTER_INPUT_BITS. Input coordinates are given to glitter as
134 * pixel scaled numbers. These get converted to the internal grid
135 * scaled numbers as soon as possible. Internal overflow is possible
136 * if GRID_X/Y inside glitter-paths.c is larger than
137 * 1<<GLITTER_INPUT_BITS. */
138 #ifndef GLITTER_INPUT_BITS
139 # define GLITTER_INPUT_BITS 8
141 #define GLITTER_INPUT_SCALE (1<<GLITTER_INPUT_BITS)
142 typedef int glitter_input_scaled_t;
144 #if !GLITTER_HAVE_STATUS_T
146 GLITTER_STATUS_SUCCESS = 0,
147 GLITTER_STATUS_NO_MEMORY
152 # define I /*static*/
155 /* Opaque type for scan converting. */
156 typedef struct glitter_scan_converter glitter_scan_converter_t;
158 /* Reset a scan converter to accept polygon edges and set the clip box
159 * in pixels. Allocates O(ymax-ymin) bytes of memory. The clip box
160 * is set to integer pixel coordinates xmin <= x < xmax, ymin <= y <
163 glitter_scan_converter_reset(
164 glitter_scan_converter_t *converter,
168 /* Render the polygon in the scan converter to the given A8 format
169 * image raster. Only the pixels accessible as pixels[y*stride+x] for
170 * x,y inside the clip box are written to, where xmin <= x < xmax,
171 * ymin <= y < ymax. The image is assumed to be clear on input.
173 * If nonzero_fill is true then the interior of the polygon is
174 * computed with the non-zero fill rule. Otherwise the even-odd fill
177 * The scan converter must be reset or destroyed after this call. */
179 /*-------------------------------------------------------------------------
180 * glitter-paths.c: Implementation internal types
186 /* All polygon coordinates are snapped onto a subsample grid. "Grid
187 * scaled" numbers are fixed precision reals with multiplier GRID_X or
189 typedef int grid_scaled_t;
190 typedef int grid_scaled_x_t;
191 typedef int grid_scaled_y_t;
193 /* Default x/y scale factors.
194 * You can either define GRID_X/Y_BITS to get a power-of-two scale
195 * or define GRID_X/Y separately. */
196 #if !defined(GRID_X) && !defined(GRID_X_BITS)
197 # define GRID_X_BITS 8
199 #if !defined(GRID_Y) && !defined(GRID_Y_BITS)
203 /* Use GRID_X/Y_BITS to define GRID_X/Y if they're available. */
205 # define GRID_X (1 << GRID_X_BITS)
208 # define GRID_Y (1 << GRID_Y_BITS)
211 /* The GRID_X_TO_INT_FRAC macro splits a grid scaled coordinate into
212 * integer and fractional parts. The integer part is floored. */
213 #if defined(GRID_X_TO_INT_FRAC)
215 #elif defined(GRID_X_BITS)
216 # define GRID_X_TO_INT_FRAC(x, i, f) \
217 _GRID_TO_INT_FRAC_shift(x, i, f, GRID_X_BITS)
219 # define GRID_X_TO_INT_FRAC(x, i, f) \
220 _GRID_TO_INT_FRAC_general(x, i, f, GRID_X)
223 #define _GRID_TO_INT_FRAC_general(t, i, f, m) do { \
232 #define _GRID_TO_INT_FRAC_shift(t, i, f, b) do { \
233 (f) = (t) & ((1 << (b)) - 1); \
237 /* A grid area is a real in [0,1] scaled by 2*GRID_X*GRID_Y. We want
238 * to be able to represent exactly areas of subpixel trapezoids whose
239 * vertices are given in grid scaled coordinates. The scale factor
240 * comes from needing to accurately represent the area 0.5*dx*dy of a
241 * triangle with base dx and height dy in grid scaled numbers. */
242 #define GRID_XY (2*GRID_X*GRID_Y) /* Unit area on the grid. */
244 /* GRID_AREA_TO_ALPHA(area): map [0,GRID_XY] to [0,255]. */
246 # define GRID_AREA_TO_ALPHA(c) (((c)+1) >> 1)
248 # define GRID_AREA_TO_ALPHA(c) (c)
250 # define GRID_AREA_TO_ALPHA(c) (((c) << 2) | -(((c) & 0x40) >> 6))
252 # define GRID_AREA_TO_ALPHA(c) ((((c) << 1) | -((c) >> 7)) & 255)
254 # define GRID_AREA_TO_ALPHA(c) (((c) | -((c) >> 8)) & 255)
256 # define GRID_AREA_TO_ALPHA(c) (((c) << 4) + (c))
257 #elif GRID_XY == 2*256*15
258 # define GRID_AREA_TO_ALPHA(c) (((c) + ((c)<<4) + 256) >> 9)
260 # define GRID_AREA_TO_ALPHA(c) (((c)*255 + GRID_XY/2) / GRID_XY)
263 #define UNROLL3(x) x x x
270 /* Header for a chunk of memory in a memory pool. */
272 /* # bytes used in this chunk. */
275 /* # bytes total in this chunk */
278 /* Pointer to the previous chunk or %NULL if this is the sentinel
279 * chunk in the pool header. */
280 struct _pool_chunk *prev_chunk;
282 /* Actual data starts here. Well aligned for pointers. */
285 /* A memory pool. This is supposed to be embedded on the stack or
286 * within some other structure. It may optionally be followed by an
287 * embedded array from which requests are fulfilled until
288 * malloc needs to be called to allocate a first real chunk. */
290 /* Chunk we're allocating from. */
291 struct _pool_chunk *current;
295 /* Free list of previously allocated chunks. All have >= default
297 struct _pool_chunk *first_free;
299 /* The default capacity of a chunk. */
300 size_t default_capacity;
302 /* Header for the sentinel chunk. Directly following the pool
303 * struct should be some space for embedded elements from which
304 * the sentinel chunk allocates from. */
305 struct _pool_chunk sentinel[1];
308 /* A polygon edge. */
310 /* Next in y-bucket or active list. */
311 struct edge *next, *prev;
313 /* Number of subsample rows remaining to scan convert of this
315 grid_scaled_y_t height_left;
317 /* Original sign of the edge: +1 for downwards, -1 for upwards
322 /* Current x coordinate while the edge is on the active
323 * list. Initialised to the x coordinate of the top of the
324 * edge. The quotient is in grid_scaled_x_t units and the
325 * remainder is mod dy in grid_scaled_y_t units.*/
328 /* Advance of the current x when moving down a subsample line. */
331 /* The clipped y of the top of the edge. */
332 grid_scaled_y_t ytop;
334 /* y2-y1 after orienting the edge downwards. */
338 #define EDGE_Y_BUCKET_INDEX(y, ymin) (((y) - (ymin))/GRID_Y)
340 /* A collection of sorted and vertically clipped edges of the polygon.
341 * Edges are moved from the polygon to an active list while scan
344 /* The vertical clip extents. */
345 grid_scaled_y_t ymin, ymax;
347 /* Array of edges all starting in the same bucket. An edge is put
348 * into bucket EDGE_BUCKET_INDEX(edge->ytop, polygon->ymin) when
349 * it is added to the polygon. */
350 struct edge **y_buckets;
351 struct edge *y_buckets_embedded[64];
355 struct edge embedded[32];
359 /* A cell records the effect on pixel coverage of polygon edges
360 * passing through a pixel. It contains two accumulators of pixel
363 * Consider the effects of a polygon edge on the coverage of a pixel
364 * it intersects and that of the following one. The coverage of the
365 * following pixel is the height of the edge multiplied by the width
366 * of the pixel, and the coverage of the pixel itself is the area of
367 * the trapezoid formed by the edge and the right side of the pixel.
369 * +-----------------------+-----------------------+
372 * |_______________________|_______________________|
373 * | \...................|.......................|\
374 * | \..................|.......................| |
375 * | \.................|.......................| |
376 * | \....covered.....|.......................| |
377 * | \....area.......|.......................| } covered height
378 * | \..............|.......................| |
379 * |uncovered\.............|.......................| |
380 * | area \............|.......................| |
381 * |___________\...........|.......................|/
385 * +-----------------------+-----------------------+
387 * Since the coverage of the following pixel will always be a multiple
388 * of the width of the pixel, we can store the height of the covered
389 * area instead. The coverage of the pixel itself is the total
390 * coverage minus the area of the uncovered area to the left of the
391 * edge. As it's faster to compute the uncovered area we only store
392 * that and subtract it from the total coverage later when forming
395 * The heights and areas are signed, with left edges of the polygon
396 * having positive sign and right edges having negative sign. When
397 * two edges intersect they swap their left/rightness so their
398 * contribution above and below the intersection point must be
399 * computed separately. */
403 int16_t uncovered_area;
404 int16_t covered_height;
407 /* A cell list represents the scan line sparsely as cells ordered by
408 * ascending x. It is geared towards scanning the cells in order
409 * using an internal cursor. */
412 struct cell head, tail;
414 /* Cursor state for iterating through the cell list. */
415 struct cell *cursor, *rewind;
417 /* Cells in the cell list are owned by the cell list and are
418 * allocated from this pool. */
421 struct cell embedded[32];
430 /* The active list contains edges in the current scan line ordered by
431 * the x-coordinate of the intercept of the edge and the scan line. */
433 /* Leftmost edge on the current scan line. */
434 struct edge head, tail;
436 /* A lower bound on the height of the active edges is used to
437 * estimate how soon some active edge ends. We can't advance the
438 * scan conversion by a full pixel row if an edge ends somewhere
440 grid_scaled_y_t min_height;
444 struct glitter_scan_converter {
445 struct polygon polygon[1];
446 struct active_list active[1];
447 struct cell_list coverages[1];
449 cairo_half_open_span_t *spans;
450 cairo_half_open_span_t spans_embedded[64];
453 grid_scaled_x_t xmin, xmax;
454 grid_scaled_y_t ymin, ymax;
457 /* Compute the floored division a/b. Assumes / and % perform symmetric
459 inline static struct quorem
460 floored_divrem(int a, int b)
465 if ((a^b)<0 && qr.rem) {
472 /* Compute the floored division (x*a)/b. Assumes / and % perform symmetric
475 floored_muldivrem(int x, int a, int b)
478 long long xa = (long long)x*a;
481 if ((xa>=0) != (b>=0) && qr.rem) {
488 static struct _pool_chunk *
490 struct _pool_chunk *p,
491 struct _pool_chunk *prev_chunk,
494 p->prev_chunk = prev_chunk;
496 p->capacity = capacity;
500 static struct _pool_chunk *
501 _pool_chunk_create(struct pool *pool, size_t size)
503 struct _pool_chunk *p;
505 p = malloc(size + sizeof(struct _pool_chunk));
506 if (unlikely (NULL == p))
507 longjmp (*pool->jmp, _cairo_error (CAIRO_STATUS_NO_MEMORY));
509 return _pool_chunk_init(p, pool->current, size);
513 pool_init(struct pool *pool,
515 size_t default_capacity,
516 size_t embedded_capacity)
519 pool->current = pool->sentinel;
520 pool->first_free = NULL;
521 pool->default_capacity = default_capacity;
522 _pool_chunk_init(pool->sentinel, NULL, embedded_capacity);
526 pool_fini(struct pool *pool)
528 struct _pool_chunk *p = pool->current;
531 struct _pool_chunk *prev = p->prev_chunk;
532 if (p != pool->sentinel)
536 p = pool->first_free;
537 pool->first_free = NULL;
541 /* Satisfy an allocation by first allocating a new large enough chunk
542 * and adding it to the head of the pool's chunk list. This function
543 * is called as a fallback if pool_alloc() couldn't do a quick
544 * allocation from the current chunk in the pool. */
546 _pool_alloc_from_new_chunk(
550 struct _pool_chunk *chunk;
554 /* If the allocation is smaller than the default chunk size then
555 * try getting a chunk off the free list. Force alloc of a new
556 * chunk for large requests. */
559 if (size < pool->default_capacity) {
560 capacity = pool->default_capacity;
561 chunk = pool->first_free;
563 pool->first_free = chunk->prev_chunk;
564 _pool_chunk_init(chunk, pool->current, chunk->capacity);
569 chunk = _pool_chunk_create (pool, capacity);
570 pool->current = chunk;
572 obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
577 /* Allocate size bytes from the pool. The first allocated address
578 * returned from a pool is aligned to sizeof(void*). Subsequent
579 * addresses will maintain alignment as long as multiples of void* are
580 * allocated. Returns the address of a new memory area or %NULL on
581 * allocation failures. The pool retains ownership of the returned
584 pool_alloc (struct pool *pool, size_t size)
586 struct _pool_chunk *chunk = pool->current;
588 if (size <= chunk->capacity - chunk->size) {
589 void *obj = ((unsigned char*)chunk + sizeof(*chunk) + chunk->size);
593 return _pool_alloc_from_new_chunk(pool, size);
597 /* Relinquish all pool_alloced memory back to the pool. */
599 pool_reset (struct pool *pool)
601 /* Transfer all used chunks to the chunk free list. */
602 struct _pool_chunk *chunk = pool->current;
603 if (chunk != pool->sentinel) {
604 while (chunk->prev_chunk != pool->sentinel) {
605 chunk = chunk->prev_chunk;
607 chunk->prev_chunk = pool->first_free;
608 pool->first_free = pool->current;
610 /* Reset the sentinel as the current chunk. */
611 pool->current = pool->sentinel;
612 pool->sentinel->size = 0;
615 /* Rewinds the cell list's cursor to the beginning. After rewinding
616 * we're good to cell_list_find() the cell any x coordinate. */
618 cell_list_rewind (struct cell_list *cells)
620 cells->cursor = &cells->head;
624 cell_list_maybe_rewind (struct cell_list *cells, int x)
626 if (x < cells->cursor->x) {
627 cells->cursor = cells->rewind;
628 if (x < cells->cursor->x)
629 cells->cursor = &cells->head;
634 cell_list_set_rewind (struct cell_list *cells)
636 cells->rewind = cells->cursor;
640 cell_list_init(struct cell_list *cells, jmp_buf *jmp)
642 pool_init(cells->cell_pool.base, jmp,
643 256*sizeof(struct cell),
644 sizeof(cells->cell_pool.embedded));
645 cells->tail.next = NULL;
646 cells->tail.x = INT_MAX;
647 cells->head.x = INT_MIN;
648 cells->head.next = &cells->tail;
649 cell_list_rewind (cells);
653 cell_list_fini(struct cell_list *cells)
655 pool_fini (cells->cell_pool.base);
658 /* Empty the cell list. This is called at the start of every pixel
661 cell_list_reset (struct cell_list *cells)
663 cell_list_rewind (cells);
664 cells->head.next = &cells->tail;
665 pool_reset (cells->cell_pool.base);
668 inline static struct cell *
669 cell_list_alloc (struct cell_list *cells,
675 cell = pool_alloc (cells->cell_pool.base, sizeof (struct cell));
676 cell->next = tail->next;
679 *(uint32_t *)&cell->uncovered_area = 0;
684 /* Find a cell at the given x-coordinate. Returns %NULL if a new cell
685 * needed to be allocated but couldn't be. Cells must be found with
686 * non-decreasing x-coordinate until the cell list is rewound using
687 * cell_list_rewind(). Ownership of the returned cell is retained by
689 inline static struct cell *
690 cell_list_find (struct cell_list *cells, int x)
692 struct cell *tail = cells->cursor;
699 if (tail->next->x > x)
706 tail = cell_list_alloc (cells, tail, x);
707 return cells->cursor = tail;
711 /* Find two cells at x1 and x2. This is exactly equivalent
714 * pair.cell1 = cell_list_find(cells, x1);
715 * pair.cell2 = cell_list_find(cells, x2);
717 * except with less function call overhead. */
718 inline static struct cell_pair
719 cell_list_find_pair(struct cell_list *cells, int x1, int x2)
721 struct cell_pair pair;
723 pair.cell1 = cells->cursor;
726 if (pair.cell1->next->x > x1)
728 pair.cell1 = pair.cell1->next;
731 if (pair.cell1->x != x1)
732 pair.cell1 = cell_list_alloc (cells, pair.cell1, x1);
734 pair.cell2 = pair.cell1;
737 if (pair.cell2->next->x > x2)
739 pair.cell2 = pair.cell2->next;
742 if (pair.cell2->x != x2)
743 pair.cell2 = cell_list_alloc (cells, pair.cell2, x2);
745 cells->cursor = pair.cell2;
749 /* Add a subpixel span covering [x1, x2) to the coverage cells. */
751 cell_list_add_subspan(struct cell_list *cells,
761 GRID_X_TO_INT_FRAC(x1, ix1, fx1);
762 GRID_X_TO_INT_FRAC(x2, ix2, fx2);
766 p = cell_list_find_pair(cells, ix1, ix2);
767 p.cell1->uncovered_area += 2*fx1;
768 ++p.cell1->covered_height;
769 p.cell2->uncovered_area -= 2*fx2;
770 --p.cell2->covered_height;
772 struct cell *cell = cell_list_find(cells, ix1);
773 cell->uncovered_area += 2*(fx1-fx2);
777 /* Adds the analytical coverage of an edge crossing the current pixel
778 * row to the coverage cells and advances the edge's x position to the
781 * This function is only called when we know that during this pixel row:
783 * 1) The relative order of all edges on the active list doesn't
784 * change. In particular, no edges intersect within this row to pixel
787 * 2) No new edges start in this row.
789 * 3) No existing edges end mid-row.
791 * This function depends on being called with all edges from the
792 * active list in the order they appear on the list (i.e. with
793 * non-decreasing x-coordinate.) */
795 cell_list_render_edge(struct cell_list *cells,
803 GRID_X_TO_INT_FRAC(edge->x.quo, ix, fx);
805 /* We always know that ix1 is >= the cell list cursor in this
806 * case due to the no-intersections precondition. */
807 cell = cell_list_find(cells, ix);
808 cell->covered_height += sign*GRID_Y;
809 cell->uncovered_area += sign*2*fx*GRID_Y;
813 polygon_init (struct polygon *polygon, jmp_buf *jmp)
815 polygon->ymin = polygon->ymax = 0;
816 polygon->y_buckets = polygon->y_buckets_embedded;
817 pool_init (polygon->edge_pool.base, jmp,
818 8192 - sizeof (struct _pool_chunk),
819 sizeof (polygon->edge_pool.embedded));
823 polygon_fini (struct polygon *polygon)
825 if (polygon->y_buckets != polygon->y_buckets_embedded)
826 free (polygon->y_buckets);
828 pool_fini (polygon->edge_pool.base);
831 /* Empties the polygon of all edges. The polygon is then prepared to
832 * receive new edges and clip them to the vertical range
834 static glitter_status_t
835 polygon_reset (struct polygon *polygon,
836 grid_scaled_y_t ymin,
837 grid_scaled_y_t ymax)
839 unsigned h = ymax - ymin;
840 unsigned num_buckets = EDGE_Y_BUCKET_INDEX(ymax + GRID_Y-1, ymin);
842 pool_reset(polygon->edge_pool.base);
844 if (unlikely (h > 0x7FFFFFFFU - GRID_Y))
845 goto bail_no_mem; /* even if you could, you wouldn't want to. */
847 if (polygon->y_buckets != polygon->y_buckets_embedded)
848 free (polygon->y_buckets);
850 polygon->y_buckets = polygon->y_buckets_embedded;
851 if (num_buckets > ARRAY_LENGTH (polygon->y_buckets_embedded)) {
852 polygon->y_buckets = _cairo_malloc_ab (num_buckets,
853 sizeof (struct edge *));
854 if (unlikely (NULL == polygon->y_buckets))
857 memset (polygon->y_buckets, 0, num_buckets * sizeof (struct edge *));
859 polygon->ymin = ymin;
860 polygon->ymax = ymax;
861 return GLITTER_STATUS_SUCCESS;
866 return GLITTER_STATUS_NO_MEMORY;
870 _polygon_insert_edge_into_its_y_bucket(struct polygon *polygon,
873 unsigned ix = EDGE_Y_BUCKET_INDEX(e->ytop, polygon->ymin);
874 struct edge **ptail = &polygon->y_buckets[ix];
880 polygon_add_edge (struct polygon *polygon,
881 const cairo_edge_t *edge)
886 grid_scaled_y_t ytop, ybot;
887 grid_scaled_y_t ymin = polygon->ymin;
888 grid_scaled_y_t ymax = polygon->ymax;
890 if (unlikely (edge->top >= ymax || edge->bottom <= ymin))
893 e = pool_alloc (polygon->edge_pool.base, sizeof (struct edge));
895 dx = edge->line.p2.x - edge->line.p1.x;
896 dy = edge->line.p2.y - edge->line.p1.y;
900 ytop = edge->top >= ymin ? edge->top : ymin;
901 ybot = edge->bottom <= ymax ? edge->bottom : ymax;
903 e->height_left = ybot - ytop;
907 e->x.quo = edge->line.p1.x;
913 e->dxdy = floored_divrem (dx, dy);
914 if (ytop == edge->line.p1.y) {
915 e->x.quo = edge->line.p1.x;
918 e->x = floored_muldivrem (ytop - edge->line.p1.y, dx, dy);
919 e->x.quo += edge->line.p1.x;
923 _polygon_insert_edge_into_its_y_bucket (polygon, e);
925 e->x.rem -= dy; /* Bias the remainder for faster
926 * edge advancement. */
930 active_list_reset (struct active_list *active)
932 active->head.vertical = 1;
933 active->head.height_left = INT_MAX;
934 active->head.x.quo = INT_MIN;
935 active->head.prev = NULL;
936 active->head.next = &active->tail;
937 active->tail.prev = &active->head;
938 active->tail.next = NULL;
939 active->tail.x.quo = INT_MAX;
940 active->tail.height_left = INT_MAX;
941 active->tail.vertical = 1;
942 active->min_height = 0;
943 active->is_vertical = 1;
947 active_list_init(struct active_list *active)
949 active_list_reset(active);
953 * Merge two sorted edge lists.
955 * - head_a: The head of the first list.
956 * - head_b: The head of the second list; head_b cannot be NULL.
958 * Returns the head of the merged list.
960 * Implementation notes:
961 * To make it fast (in particular, to reduce to an insertion sort whenever
962 * one of the two input lists only has a single element) we iterate through
963 * a list until its head becomes greater than the head of the other list,
964 * then we switch their roles. As soon as one of the two lists is empty, we
965 * just attach the other one to the current list and exit.
966 * Writes to memory are only needed to "switch" lists (as it also requires
967 * attaching to the output list the list which we will be iterating next) and
968 * to attach the last non-empty list.
971 merge_sorted_edges (struct edge *head_a, struct edge *head_b)
973 struct edge *head, **next, *prev;
978 if (head_a->x.quo <= head_b->x.quo) {
988 while (head_a != NULL && head_a->x.quo <= x) {
990 next = &head_a->next;
991 head_a = head_a->next;
1001 while (head_b != NULL && head_b->x.quo <= x) {
1003 next = &head_b->next;
1004 head_b = head_b->next;
1007 head_a->prev = prev;
1015 * Sort (part of) a list.
1017 * - list: The list to be sorted; list cannot be NULL.
1018 * - limit: Recursion limit.
1020 * - head_out: The head of the sorted list containing the first 2^(level+1) elements of the
1021 * input list; if the input list has fewer elements, head_out be a sorted list
1022 * containing all the elements of the input list.
1023 * Returns the head of the list of unprocessed elements (NULL if the sorted list contains
1024 * all the elements of the input list).
1026 * Implementation notes:
1027 * Special case single element list, unroll/inline the sorting of the first two elements.
1028 * Some tail recursion is used since we iterate on the bottom-up solution of the problem
1029 * (we start with a small sorted list and keep merging other lists of the same size to it).
1031 static struct edge *
1032 sort_edges (struct edge *list,
1034 struct edge **head_out)
1036 struct edge *head_other, *remaining;
1039 head_other = list->next;
1041 if (head_other == NULL) {
1046 remaining = head_other->next;
1047 if (list->x.quo <= head_other->x.quo) {
1049 head_other->next = NULL;
1051 *head_out = head_other;
1052 head_other->prev = list->prev;
1053 head_other->next = list;
1054 list->prev = head_other;
1058 for (i = 0; i < level && remaining; i++) {
1059 remaining = sort_edges (remaining, i, &head_other);
1060 *head_out = merge_sorted_edges (*head_out, head_other);
1066 static struct edge *
1067 merge_unsorted_edges (struct edge *head, struct edge *unsorted)
1069 sort_edges (unsorted, UINT_MAX, &unsorted);
1070 return merge_sorted_edges (head, unsorted);
1073 /* Test if the edges on the active list can be safely advanced by a
1074 * full row without intersections or any edges ending. */
1076 can_do_full_row (struct active_list *active)
1078 const struct edge *e;
1080 /* Recomputes the minimum height of all edges on the active
1081 * list if we have been dropping edges. */
1082 if (active->min_height <= 0) {
1083 int min_height = INT_MAX;
1084 int is_vertical = 1;
1086 e = active->head.next;
1088 if (e->height_left < min_height)
1089 min_height = e->height_left;
1090 is_vertical &= e->vertical;
1094 active->is_vertical = is_vertical;
1095 active->min_height = min_height;
1098 if (active->min_height < GRID_Y)
1101 return active->is_vertical;
1104 /* Merges edges on the given subpixel row from the polygon to the
1107 active_list_merge_edges_from_bucket(struct active_list *active,
1110 active->head.next = merge_unsorted_edges (active->head.next, edges);
1114 polygon_fill_buckets (struct active_list *active,
1117 struct edge **buckets)
1119 grid_scaled_y_t min_height = active->min_height;
1120 int is_vertical = active->is_vertical;
1123 struct edge *next = edge->next;
1124 int suby = edge->ytop - y;
1126 buckets[suby]->prev = edge;
1127 edge->next = buckets[suby];
1129 buckets[suby] = edge;
1130 if (edge->height_left < min_height)
1131 min_height = edge->height_left;
1132 is_vertical &= edge->vertical;
1136 active->is_vertical = is_vertical;
1137 active->min_height = min_height;
1141 sub_row (struct active_list *active,
1142 struct cell_list *coverages,
1145 struct edge *edge = active->head.next;
1146 int xstart = INT_MIN, prev_x = INT_MIN;
1149 cell_list_rewind (coverages);
1151 while (&active->tail != edge) {
1152 struct edge *next = edge->next;
1153 int xend = edge->x.quo;
1155 if (--edge->height_left) {
1156 edge->x.quo += edge->dxdy.quo;
1157 edge->x.rem += edge->dxdy.rem;
1158 if (edge->x.rem >= 0) {
1160 edge->x.rem -= edge->dy;
1163 if (edge->x.quo < prev_x) {
1164 struct edge *pos = edge->prev;
1169 } while (edge->x.quo < pos->x.quo);
1170 pos->next->prev = edge;
1171 edge->next = pos->next;
1175 prev_x = edge->x.quo;
1177 edge->prev->next = next;
1178 next->prev = edge->prev;
1181 winding += edge->dir;
1182 if ((winding & mask) == 0) {
1183 if (next->x.quo != xend) {
1184 cell_list_add_subspan (coverages, xstart, xend);
1187 } else if (xstart == INT_MIN)
1194 inline static void dec (struct edge *e, int h)
1196 e->height_left -= h;
1197 if (e->height_left == 0) {
1198 e->prev->next = e->next;
1199 e->next->prev = e->prev;
1204 full_row (struct active_list *active,
1205 struct cell_list *coverages,
1208 struct edge *left = active->head.next;
1210 while (&active->tail != left) {
1216 winding = left->dir;
1219 dec (right, GRID_Y);
1221 winding += right->dir;
1222 if ((winding & mask) == 0 && right->next->x.quo != right->x.quo)
1225 right = right->next;
1228 cell_list_set_rewind (coverages);
1229 cell_list_render_edge (coverages, left, +1);
1230 cell_list_render_edge (coverages, right, -1);
1237 _glitter_scan_converter_init(glitter_scan_converter_t *converter, jmp_buf *jmp)
1239 polygon_init(converter->polygon, jmp);
1240 active_list_init(converter->active);
1241 cell_list_init(converter->coverages, jmp);
1249 _glitter_scan_converter_fini(glitter_scan_converter_t *self)
1251 if (self->spans != self->spans_embedded)
1254 polygon_fini(self->polygon);
1255 cell_list_fini(self->coverages);
1263 static grid_scaled_t
1264 int_to_grid_scaled(int i, int scale)
1266 /* Clamp to max/min representable scaled number. */
1268 if (i >= INT_MAX/scale)
1272 if (i <= INT_MIN/scale)
1278 #define int_to_grid_scaled_x(x) int_to_grid_scaled((x), GRID_X)
1279 #define int_to_grid_scaled_y(x) int_to_grid_scaled((x), GRID_Y)
1282 glitter_scan_converter_reset(
1283 glitter_scan_converter_t *converter,
1287 glitter_status_t status;
1289 converter->xmin = 0; converter->xmax = 0;
1290 converter->ymin = 0; converter->ymax = 0;
1292 if (xmax - xmin > ARRAY_LENGTH(converter->spans_embedded)) {
1293 converter->spans = _cairo_malloc_ab (xmax - xmin,
1294 sizeof (cairo_half_open_span_t));
1295 if (unlikely (converter->spans == NULL))
1296 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1298 converter->spans = converter->spans_embedded;
1300 xmin = int_to_grid_scaled_x(xmin);
1301 ymin = int_to_grid_scaled_y(ymin);
1302 xmax = int_to_grid_scaled_x(xmax);
1303 ymax = int_to_grid_scaled_y(ymax);
1305 active_list_reset(converter->active);
1306 cell_list_reset(converter->coverages);
1307 status = polygon_reset(converter->polygon, ymin, ymax);
1311 converter->xmin = xmin;
1312 converter->xmax = xmax;
1313 converter->ymin = ymin;
1314 converter->ymax = ymax;
1315 return GLITTER_STATUS_SUCCESS;
1318 /* INPUT_TO_GRID_X/Y (in_coord, out_grid_scaled, grid_scale)
1319 * These macros convert an input coordinate in the client's
1320 * device space to the rasterisation grid.
1322 /* Gah.. this bit of ugly defines INPUT_TO_GRID_X/Y so as to use
1323 * shifts if possible, and something saneish if not.
1325 #if !defined(INPUT_TO_GRID_Y) && defined(GRID_Y_BITS) && GRID_Y_BITS <= GLITTER_INPUT_BITS
1326 # define INPUT_TO_GRID_Y(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_Y_BITS)
1328 # define INPUT_TO_GRID_Y(in, out) INPUT_TO_GRID_general(in, out, GRID_Y)
1331 #if !defined(INPUT_TO_GRID_X) && defined(GRID_X_BITS) && GRID_X_BITS <= GLITTER_INPUT_BITS
1332 # define INPUT_TO_GRID_X(in, out) (out) = (in) >> (GLITTER_INPUT_BITS - GRID_X_BITS)
1334 # define INPUT_TO_GRID_X(in, out) INPUT_TO_GRID_general(in, out, GRID_X)
1337 #define INPUT_TO_GRID_general(in, out, grid_scale) do { \
1338 long long tmp__ = (long long)(grid_scale) * (in); \
1339 tmp__ >>= GLITTER_INPUT_BITS; \
1343 /* Add a new polygon edge from pixel (x1,y1) to (x2,y2) to the scan
1344 * converter. The coordinates represent pixel positions scaled by
1345 * 2**GLITTER_PIXEL_BITS. If this function fails then the scan
1346 * converter should be reset or destroyed. Dir must be +1 or -1,
1347 * with the latter reversing the orientation of the edge. */
1349 glitter_scan_converter_add_edge (glitter_scan_converter_t *converter,
1350 const cairo_edge_t *edge)
1354 INPUT_TO_GRID_Y (edge->top, e.top);
1355 INPUT_TO_GRID_Y (edge->bottom, e.bottom);
1356 if (e.top >= e.bottom)
1359 /* XXX: possible overflows if GRID_X/Y > 2**GLITTER_INPUT_BITS */
1360 INPUT_TO_GRID_Y (edge->line.p1.y, e.line.p1.y);
1361 INPUT_TO_GRID_Y (edge->line.p2.y, e.line.p2.y);
1362 if (e.line.p1.y == e.line.p2.y)
1365 INPUT_TO_GRID_X (edge->line.p1.x, e.line.p1.x);
1366 INPUT_TO_GRID_X (edge->line.p2.x, e.line.p2.x);
1370 polygon_add_edge (converter->polygon, &e);
1374 step_edges (struct active_list *active, int count)
1379 for (edge = active->head.next; edge != &active->tail; edge = edge->next) {
1380 edge->height_left -= count;
1381 if (! edge->height_left) {
1382 edge->prev->next = edge->next;
1383 edge->next->prev = edge->prev;
1388 static glitter_status_t
1389 blit_a8 (struct cell_list *cells,
1390 cairo_span_renderer_t *renderer,
1391 cairo_half_open_span_t *spans,
1395 struct cell *cell = cells->head.next;
1396 int prev_x = xmin, last_x = -1;
1397 int16_t cover = 0, last_cover = 0;
1400 if (cell == &cells->tail)
1401 return CAIRO_STATUS_SUCCESS;
1403 /* Skip cells to the left of the clip region. */
1404 while (cell->x < xmin) {
1405 cover += cell->covered_height;
1410 /* Form the spans from the coverages and areas. */
1412 for (; cell->x < xmax; cell = cell->next) {
1416 if (x > prev_x && cover != last_cover) {
1417 spans[num_spans].x = prev_x;
1418 spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
1424 cover += cell->covered_height*GRID_X*2;
1425 area = cover - cell->uncovered_area;
1427 if (area != last_cover) {
1428 spans[num_spans].x = x;
1429 spans[num_spans].coverage = GRID_AREA_TO_ALPHA (area);
1438 if (prev_x <= xmax && cover != last_cover) {
1439 spans[num_spans].x = prev_x;
1440 spans[num_spans].coverage = GRID_AREA_TO_ALPHA (cover);
1446 if (last_x < xmax && last_cover) {
1447 spans[num_spans].x = xmax;
1448 spans[num_spans].coverage = 0;
1452 /* Dump them into the renderer. */
1453 return renderer->render_rows (renderer, y, height, spans, num_spans);
1456 #define GRID_AREA_TO_A1(A) ((GRID_AREA_TO_ALPHA (A) > 127) ? 255 : 0)
1457 static glitter_status_t
1458 blit_a1 (struct cell_list *cells,
1459 cairo_span_renderer_t *renderer,
1460 cairo_half_open_span_t *spans,
1464 struct cell *cell = cells->head.next;
1465 int prev_x = xmin, last_x = -1;
1467 uint8_t coverage, last_cover = 0;
1470 if (cell == &cells->tail)
1471 return CAIRO_STATUS_SUCCESS;
1473 /* Skip cells to the left of the clip region. */
1474 while (cell->x < xmin) {
1475 cover += cell->covered_height;
1480 /* Form the spans from the coverages and areas. */
1482 for (; cell->x < xmax; cell = cell->next) {
1486 coverage = GRID_AREA_TO_A1 (cover);
1487 if (x > prev_x && coverage != last_cover) {
1488 last_x = spans[num_spans].x = prev_x;
1489 last_cover = spans[num_spans].coverage = coverage;
1493 cover += cell->covered_height*GRID_X*2;
1494 area = cover - cell->uncovered_area;
1496 coverage = GRID_AREA_TO_A1 (area);
1497 if (coverage != last_cover) {
1498 last_x = spans[num_spans].x = x;
1499 last_cover = spans[num_spans].coverage = coverage;
1506 coverage = GRID_AREA_TO_A1 (cover);
1507 if (prev_x <= xmax && coverage != last_cover) {
1508 last_x = spans[num_spans].x = prev_x;
1509 last_cover = spans[num_spans].coverage = coverage;
1513 if (last_x < xmax && last_cover) {
1514 spans[num_spans].x = xmax;
1515 spans[num_spans].coverage = 0;
1519 return CAIRO_STATUS_SUCCESS;
1521 /* Dump them into the renderer. */
1522 return renderer->render_rows (renderer, y, height, spans, num_spans);
1527 glitter_scan_converter_render(glitter_scan_converter_t *converter,
1528 unsigned int winding_mask,
1530 cairo_span_renderer_t *renderer)
1533 int ymax_i = converter->ymax / GRID_Y;
1534 int ymin_i = converter->ymin / GRID_Y;
1536 int h = ymax_i - ymin_i;
1537 struct polygon *polygon = converter->polygon;
1538 struct cell_list *coverages = converter->coverages;
1539 struct active_list *active = converter->active;
1540 struct edge *buckets[GRID_Y] = { 0 };
1542 xmin_i = converter->xmin / GRID_X;
1543 xmax_i = converter->xmax / GRID_X;
1544 if (xmin_i >= xmax_i)
1547 /* Render each pixel row. */
1548 for (i = 0; i < h; i = j) {
1549 int do_full_row = 0;
1553 /* Determine if we can ignore this row or use the full pixel
1555 if (! polygon->y_buckets[i]) {
1556 if (active->head.next == &active->tail) {
1557 active->min_height = INT_MAX;
1558 active->is_vertical = 1;
1559 for (; j < h && ! polygon->y_buckets[j]; j++)
1564 do_full_row = can_do_full_row (active);
1568 /* Step by a full pixel row's worth. */
1569 full_row (active, coverages, winding_mask);
1571 if (active->is_vertical) {
1573 polygon->y_buckets[j] == NULL &&
1574 active->min_height >= 2*GRID_Y)
1576 active->min_height -= GRID_Y;
1580 step_edges (active, j - (i + 1));
1585 polygon_fill_buckets (active,
1586 polygon->y_buckets[i],
1590 /* Subsample this row. */
1591 for (sub = 0; sub < GRID_Y; sub++) {
1593 active_list_merge_edges_from_bucket (active, buckets[sub]);
1594 buckets[sub] = NULL;
1597 sub_row (active, coverages, winding_mask);
1602 blit_a8 (coverages, renderer, converter->spans,
1603 i+ymin_i, j-i, xmin_i, xmax_i);
1605 blit_a1 (coverages, renderer, converter->spans,
1606 i+ymin_i, j-i, xmin_i, xmax_i);
1607 cell_list_reset (coverages);
1609 active->min_height -= GRID_Y;
1613 struct _cairo_tor22_scan_converter {
1614 cairo_scan_converter_t base;
1616 glitter_scan_converter_t converter[1];
1617 cairo_fill_rule_t fill_rule;
1618 cairo_antialias_t antialias;
1623 typedef struct _cairo_tor22_scan_converter cairo_tor22_scan_converter_t;
1626 _cairo_tor22_scan_converter_destroy (void *converter)
1628 cairo_tor22_scan_converter_t *self = converter;
1632 _glitter_scan_converter_fini (self->converter);
1637 _cairo_tor22_scan_converter_add_polygon (void *converter,
1638 const cairo_polygon_t *polygon)
1640 cairo_tor22_scan_converter_t *self = converter;
1644 FILE *file = fopen ("polygon.txt", "w");
1645 _cairo_debug_print_polygon (file, polygon);
1649 for (i = 0; i < polygon->num_edges; i++)
1650 glitter_scan_converter_add_edge (self->converter, &polygon->edges[i]);
1652 return CAIRO_STATUS_SUCCESS;
1655 static cairo_status_t
1656 _cairo_tor22_scan_converter_generate (void *converter,
1657 cairo_span_renderer_t *renderer)
1659 cairo_tor22_scan_converter_t *self = converter;
1660 cairo_status_t status;
1662 if ((status = setjmp (self->jmp)))
1663 return _cairo_scan_converter_set_error (self, _cairo_error (status));
1665 glitter_scan_converter_render (self->converter,
1666 self->fill_rule == CAIRO_FILL_RULE_WINDING ? ~0 : 1,
1667 self->antialias != CAIRO_ANTIALIAS_NONE,
1669 return CAIRO_STATUS_SUCCESS;
1672 cairo_scan_converter_t *
1673 _cairo_tor22_scan_converter_create (int xmin,
1677 cairo_fill_rule_t fill_rule,
1678 cairo_antialias_t antialias)
1680 cairo_tor22_scan_converter_t *self;
1681 cairo_status_t status;
1683 self = malloc (sizeof(struct _cairo_tor22_scan_converter));
1684 if (unlikely (self == NULL)) {
1685 status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
1689 self->base.destroy = _cairo_tor22_scan_converter_destroy;
1690 self->base.generate = _cairo_tor22_scan_converter_generate;
1692 _glitter_scan_converter_init (self->converter, &self->jmp);
1693 status = glitter_scan_converter_reset (self->converter,
1694 xmin, ymin, xmax, ymax);
1695 if (unlikely (status))
1698 self->fill_rule = fill_rule;
1699 self->antialias = antialias;
1704 self->base.destroy(&self->base);
1706 return _cairo_scan_converter_create_in_error (status);