2 * Copyright © 2004 Carl Worth
3 * Copyright © 2006 Red Hat, Inc.
4 * Copyright © 2007 David Turner
5 * Copyright © 2008 M Joonas Pihlaja
6 * Copyright © 2008 Chris Wilson
7 * Copyright © 2009 Intel Corporation
9 * This library is free software; you can redistribute it and/or
10 * modify it either under the terms of the GNU Lesser General Public
11 * License version 2.1 as published by the Free Software Foundation
12 * (the "LGPL") or, at your option, under the terms of the Mozilla
13 * Public License Version 1.1 (the "MPL"). If you do not alter this
14 * notice, a recipient may use your version of this file under either
15 * the MPL or the LGPL.
17 * You should have received a copy of the LGPL along with this library
18 * in the file COPYING-LGPL-2.1; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
20 * You should have received a copy of the MPL along with this library
21 * in the file COPYING-MPL-1.1
23 * The contents of this file are subject to the Mozilla Public License
24 * Version 1.1 (the "License"); you may not use this file except in
25 * compliance with the License. You may obtain a copy of the License at
26 * http://www.mozilla.org/MPL/
28 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
29 * OF ANY KIND, either express or implied. See the LGPL or the MPL for
30 * the specific language governing rights and limitations.
32 * The Original Code is the cairo graphics library.
34 * The Initial Developer of the Original Code is Carl Worth
37 * Carl D. Worth <cworth@cworth.org>
38 * M Joonas Pihlaja <jpihlaja@cc.helsinki.fi>
39 * Chris Wilson <chris@chris-wilson.co.uk>
42 /* Provide definitions for standalone compilation */
45 #include "cairo-error-private.h"
46 #include "cairo-list-inline.h"
47 #include "cairo-freelist-private.h"
48 #include "cairo-combsort-inline.h"
52 #define STEP_X CAIRO_FIXED_ONE
53 #define STEP_Y CAIRO_FIXED_ONE
54 #define UNROLL3(x) x x x
56 #define STEP_XY (2*STEP_X*STEP_Y) /* Unit area in the step. */
57 #define AREA_TO_ALPHA(c) (((c)*255 + STEP_XY/2) / STEP_XY)
59 typedef struct _cairo_bo_intersect_ordinate {
61 enum { EXACT, INEXACT } exactness;
62 } cairo_bo_intersect_ordinate_t;
64 typedef struct _cairo_bo_intersect_point {
65 cairo_bo_intersect_ordinate_t x;
66 cairo_bo_intersect_ordinate_t y;
67 } cairo_bo_intersect_point_t;
85 /* Current x coordinate and advancement.
86 * Initialised to the x coordinate of the top of the
87 * edge. The quotient is in cairo_fixed_t units and the
88 * remainder is mod dy in cairo_fixed_t units.
93 struct quorem dxdy_full;
95 cairo_bool_t vertical;
107 /* the parent is always given by index/2 */
108 #define PQ_PARENT_INDEX(i) ((i) >> 1)
109 #define PQ_FIRST_ENTRY 1
111 /* left and right children are index * 2 and (index * 2) +1 respectively */
112 #define PQ_LEFT_CHILD_INDEX(i) ((i) << 1)
116 EVENT_TYPE_INTERSECTION,
120 typedef struct _event {
125 typedef struct _start_event {
131 typedef struct _queue_event {
138 typedef struct _pqueue {
142 event_t *elements_embedded[1024];
153 typedef struct _sweep_line {
155 cairo_list_t stopped;
156 cairo_list_t *insert_cursor;
157 cairo_bool_t is_vertical;
159 cairo_fixed_t current_row;
160 cairo_fixed_t current_subrow;
169 cairo_freepool_t pool;
174 event_t **start_events;
176 cairo_freepool_t pool;
179 cairo_freepool_t runs;
184 cairo_always_inline static struct quorem
185 floored_divrem (int a, int b)
190 if ((a^b)<0 && qr.rem) {
198 floored_muldivrem(int x, int a, int b)
201 long long xa = (long long)x*a;
204 if ((xa>=0) != (b>=0) && qr.rem) {
212 line_compute_intersection_x_for_y (const cairo_line_t *line,
223 dy = line->p2.y - line->p1.y;
225 x += _cairo_fixed_mul_div_floor (y - line->p1.y,
226 line->p2.x - line->p1.x,
234 * We need to compare the x-coordinates of a pair of lines for a particular y,
235 * without loss of precision.
237 * The x-coordinate along an edge for a given y is:
238 * X = A_x + (Y - A_y) * A_dx / A_dy
240 * So the inequality we wish to test is:
241 * A_x + (Y - A_y) * A_dx / A_dy ∘ B_x + (Y - B_y) * B_dx / B_dy,
242 * where ∘ is our inequality operator.
244 * By construction, we know that A_dy and B_dy (and (Y - A_y), (Y - B_y)) are
245 * all positive, so we can rearrange it thus without causing a sign change:
246 * A_dy * B_dy * (A_x - B_x) ∘ (Y - B_y) * B_dx * A_dy
247 * - (Y - A_y) * A_dx * B_dy
249 * Given the assumption that all the deltas fit within 32 bits, we can compute
250 * this comparison directly using 128 bit arithmetic. For certain, but common,
251 * input we can reduce this down to a single 32 bit compare by inspecting the
254 * (And put the burden of the work on developing fast 128 bit ops, which are
255 * required throughout the tessellator.)
257 * See the similar discussion for _slope_compare().
260 edges_compare_x_for_y_general (const cairo_edge_t *a,
261 const cairo_edge_t *b,
264 /* XXX: We're assuming here that dx and dy will still fit in 32
265 * bits. That's not true in general as there could be overflow. We
266 * should prevent that before the tessellation algorithm
276 HAVE_DX_ADX = HAVE_DX | HAVE_ADX,
278 HAVE_DX_BDX = HAVE_DX | HAVE_BDX,
279 HAVE_ADX_BDX = HAVE_ADX | HAVE_BDX,
280 HAVE_ALL = HAVE_DX | HAVE_ADX | HAVE_BDX
281 } have_dx_adx_bdx = HAVE_ALL;
283 /* don't bother solving for abscissa if the edges' bounding boxes
284 * can be used to order them. */
288 if (a->line.p1.x < a->line.p2.x) {
295 if (b->line.p1.x < b->line.p2.x) {
302 if (amax < bmin) return -1;
303 if (amin > bmax) return +1;
306 ady = a->line.p2.y - a->line.p1.y;
307 adx = a->line.p2.x - a->line.p1.x;
309 have_dx_adx_bdx &= ~HAVE_ADX;
311 bdy = b->line.p2.y - b->line.p1.y;
312 bdx = b->line.p2.x - b->line.p1.x;
314 have_dx_adx_bdx &= ~HAVE_BDX;
316 dx = a->line.p1.x - b->line.p1.x;
318 have_dx_adx_bdx &= ~HAVE_DX;
320 #define L _cairo_int64x32_128_mul (_cairo_int32x32_64_mul (ady, bdy), dx)
321 #define A _cairo_int64x32_128_mul (_cairo_int32x32_64_mul (adx, bdy), y - a->line.p1.y)
322 #define B _cairo_int64x32_128_mul (_cairo_int32x32_64_mul (bdx, ady), y - b->line.p1.y)
323 switch (have_dx_adx_bdx) {
328 /* A_dy * B_dy * (A_x - B_x) ∘ 0 */
329 return dx; /* ady * bdy is positive definite */
331 /* 0 ∘ - (Y - A_y) * A_dx * B_dy */
332 return adx; /* bdy * (y - a->top.y) is positive definite */
334 /* 0 ∘ (Y - B_y) * B_dx * A_dy */
335 return -bdx; /* ady * (y - b->top.y) is positive definite */
337 /* 0 ∘ (Y - B_y) * B_dx * A_dy - (Y - A_y) * A_dx * B_dy */
338 if ((adx ^ bdx) < 0) {
340 } else if (a->line.p1.y == b->line.p1.y) { /* common origin */
341 cairo_int64_t adx_bdy, bdx_ady;
343 /* ∴ A_dx * B_dy ∘ B_dx * A_dy */
345 adx_bdy = _cairo_int32x32_64_mul (adx, bdy);
346 bdx_ady = _cairo_int32x32_64_mul (bdx, ady);
348 return _cairo_int64_cmp (adx_bdy, bdx_ady);
350 return _cairo_int128_cmp (A, B);
352 /* A_dy * (A_x - B_x) ∘ - (Y - A_y) * A_dx */
353 if ((-adx ^ dx) < 0) {
356 cairo_int64_t ady_dx, dy_adx;
358 ady_dx = _cairo_int32x32_64_mul (ady, dx);
359 dy_adx = _cairo_int32x32_64_mul (a->line.p1.y - y, adx);
361 return _cairo_int64_cmp (ady_dx, dy_adx);
364 /* B_dy * (A_x - B_x) ∘ (Y - B_y) * B_dx */
365 if ((bdx ^ dx) < 0) {
368 cairo_int64_t bdy_dx, dy_bdx;
370 bdy_dx = _cairo_int32x32_64_mul (bdy, dx);
371 dy_bdx = _cairo_int32x32_64_mul (y - b->line.p1.y, bdx);
373 return _cairo_int64_cmp (bdy_dx, dy_bdx);
376 /* XXX try comparing (a->line.p2.x - b->line.p2.x) et al */
377 return _cairo_int128_cmp (L, _cairo_int128_sub (B, A));
385 * We need to compare the x-coordinate of a line for a particular y wrt to a
386 * given x, without loss of precision.
388 * The x-coordinate along an edge for a given y is:
389 * X = A_x + (Y - A_y) * A_dx / A_dy
391 * So the inequality we wish to test is:
392 * A_x + (Y - A_y) * A_dx / A_dy ∘ X
393 * where ∘ is our inequality operator.
395 * By construction, we know that A_dy (and (Y - A_y)) are
396 * all positive, so we can rearrange it thus without causing a sign change:
397 * (Y - A_y) * A_dx ∘ (X - A_x) * A_dy
399 * Given the assumption that all the deltas fit within 32 bits, we can compute
400 * this comparison directly using 64 bit arithmetic.
402 * See the similar discussion for _slope_compare() and
403 * edges_compare_x_for_y_general().
406 edge_compare_for_y_against_x (const cairo_edge_t *a,
414 if (a->line.p1.x <= a->line.p2.x) {
415 if (x < a->line.p1.x)
417 if (x > a->line.p2.x)
420 if (x < a->line.p2.x)
422 if (x > a->line.p1.x)
426 adx = a->line.p2.x - a->line.p1.x;
427 dx = x - a->line.p1.x;
431 if (dx == 0 || (adx ^ dx) < 0)
434 dy = y - a->line.p1.y;
435 ady = a->line.p2.y - a->line.p1.y;
437 L = _cairo_int32x32_64_mul (dy, adx);
438 R = _cairo_int32x32_64_mul (dx, ady);
440 return _cairo_int64_cmp (L, R);
444 edges_compare_x_for_y (const cairo_edge_t *a,
445 const cairo_edge_t *b,
448 /* If the sweep-line is currently on an end-point of a line,
449 * then we know its precise x value (and considering that we often need to
450 * compare events at end-points, this happens frequently enough to warrant
457 HAVE_BOTH = HAVE_AX | HAVE_BX
458 } have_ax_bx = HAVE_BOTH;
461 /* XXX given we have x and dx? */
463 if (y == a->line.p1.y)
465 else if (y == a->line.p2.y)
468 have_ax_bx &= ~HAVE_AX;
470 if (y == b->line.p1.y)
472 else if (y == b->line.p2.y)
475 have_ax_bx &= ~HAVE_BX;
477 switch (have_ax_bx) {
480 return edges_compare_x_for_y_general (a, b, y);
482 return -edge_compare_for_y_against_x (b, y, ax);
484 return edge_compare_for_y_against_x (a, y, bx);
491 slope_compare (const edge_t *a,
497 cmp = a->dxdy.quo - b->dxdy.quo;
501 if (a->dxdy.rem == 0)
503 if (b->dxdy.rem == 0)
506 L = _cairo_int32x32_64_mul (b->dy, a->dxdy.rem);
507 R = _cairo_int32x32_64_mul (a->dy, b->dxdy.rem);
508 return _cairo_int64_cmp (L, R);
512 line_equal (const cairo_line_t *a, const cairo_line_t *b)
514 return a->p1.x == b->p1.x && a->p1.y == b->p1.y &&
515 a->p2.x == b->p2.x && a->p2.y == b->p2.y;
519 sweep_line_compare_edges (const edge_t *a,
525 if (line_equal (&a->edge.line, &b->edge.line))
528 cmp = edges_compare_x_for_y (&a->edge, &b->edge, y);
532 return slope_compare (a, b);
535 static inline cairo_int64_t
536 det32_64 (int32_t a, int32_t b,
537 int32_t c, int32_t d)
539 /* det = a * d - b * c */
540 return _cairo_int64_sub (_cairo_int32x32_64_mul (a, d),
541 _cairo_int32x32_64_mul (b, c));
544 static inline cairo_int128_t
545 det64x32_128 (cairo_int64_t a, int32_t b,
546 cairo_int64_t c, int32_t d)
548 /* det = a * d - b * c */
549 return _cairo_int128_sub (_cairo_int64x32_128_mul (a, d),
550 _cairo_int64x32_128_mul (c, b));
553 /* Compute the intersection of two lines as defined by two edges. The
554 * result is provided as a coordinate pair of 128-bit integers.
556 * Returns %CAIRO_BO_STATUS_INTERSECTION if there is an intersection or
557 * %CAIRO_BO_STATUS_PARALLEL if the two lines are exactly parallel.
560 intersect_lines (const edge_t *a, const edge_t *b,
561 cairo_bo_intersect_point_t *intersection)
563 cairo_int64_t a_det, b_det;
565 /* XXX: We're assuming here that dx and dy will still fit in 32
566 * bits. That's not true in general as there could be overflow. We
567 * should prevent that before the tessellation algorithm begins.
568 * What we're doing to mitigate this is to perform clamping in
569 * cairo_bo_tessellate_polygon().
571 int32_t dx1 = a->edge.line.p1.x - a->edge.line.p2.x;
572 int32_t dy1 = a->edge.line.p1.y - a->edge.line.p2.y;
574 int32_t dx2 = b->edge.line.p1.x - b->edge.line.p2.x;
575 int32_t dy2 = b->edge.line.p1.y - b->edge.line.p2.y;
577 cairo_int64_t den_det;
581 den_det = det32_64 (dx1, dy1, dx2, dy2);
583 /* Q: Can we determine that the lines do not intersect (within range)
584 * much more cheaply than computing the intersection point i.e. by
585 * avoiding the division?
587 * X = ax + t * adx = bx + s * bdx;
588 * Y = ay + t * ady = by + s * bdy;
589 * ∴ t * (ady*bdx - bdy*adx) = bdx * (by - ay) + bdy * (ax - bx)
592 * Therefore we can reject any intersection (under the criteria for
593 * valid intersection events) if:
594 * L^R < 0 => t < 0, or
597 * (where top/bottom must at least extend to the line endpoints).
599 * A similar substitution can be performed for s, yielding:
600 * s * (ady*bdx - bdy*adx) = ady * (ax - bx) - adx * (ay - by)
602 R = det32_64 (dx2, dy2,
603 b->edge.line.p1.x - a->edge.line.p1.x,
604 b->edge.line.p1.y - a->edge.line.p1.y);
605 if (_cairo_int64_negative (den_det)) {
606 if (_cairo_int64_ge (den_det, R))
609 if (_cairo_int64_le (den_det, R))
613 R = det32_64 (dy1, dx1,
614 a->edge.line.p1.y - b->edge.line.p1.y,
615 a->edge.line.p1.x - b->edge.line.p1.x);
616 if (_cairo_int64_negative (den_det)) {
617 if (_cairo_int64_ge (den_det, R))
620 if (_cairo_int64_le (den_det, R))
624 /* We now know that the two lines should intersect within range. */
626 a_det = det32_64 (a->edge.line.p1.x, a->edge.line.p1.y,
627 a->edge.line.p2.x, a->edge.line.p2.y);
628 b_det = det32_64 (b->edge.line.p1.x, b->edge.line.p1.y,
629 b->edge.line.p2.x, b->edge.line.p2.y);
631 /* x = det (a_det, dx1, b_det, dx2) / den_det */
632 qr = _cairo_int_96by64_32x64_divrem (det64x32_128 (a_det, dx1,
635 if (_cairo_int64_eq (qr.rem, den_det))
638 intersection->x.exactness = _cairo_int64_is_zero (qr.rem) ? EXACT : INEXACT;
640 intersection->x.exactness = EXACT;
641 if (! _cairo_int64_is_zero (qr.rem)) {
642 if (_cairo_int64_negative (den_det) ^ _cairo_int64_negative (qr.rem))
643 qr.rem = _cairo_int64_negate (qr.rem);
644 qr.rem = _cairo_int64_mul (qr.rem, _cairo_int32_to_int64 (2));
645 if (_cairo_int64_ge (qr.rem, den_det)) {
646 qr.quo = _cairo_int64_add (qr.quo,
647 _cairo_int32_to_int64 (_cairo_int64_negative (qr.quo) ? -1 : 1));
649 intersection->x.exactness = INEXACT;
652 intersection->x.ordinate = _cairo_int64_to_int32 (qr.quo);
654 /* y = det (a_det, dy1, b_det, dy2) / den_det */
655 qr = _cairo_int_96by64_32x64_divrem (det64x32_128 (a_det, dy1,
658 if (_cairo_int64_eq (qr.rem, den_det))
661 intersection->y.exactness = _cairo_int64_is_zero (qr.rem) ? EXACT : INEXACT;
663 intersection->y.exactness = EXACT;
664 if (! _cairo_int64_is_zero (qr.rem)) {
665 /* compute ceiling away from zero */
666 qr.quo = _cairo_int64_add (qr.quo,
667 _cairo_int32_to_int64 (_cairo_int64_negative (qr.quo) ? -1 : 1));
668 intersection->y.exactness = INEXACT;
671 intersection->y.ordinate = _cairo_int64_to_int32 (qr.quo);
677 bo_intersect_ordinate_32_compare (int32_t a, int32_t b, int exactness)
681 /* First compare the quotient */
686 /* With quotient identical, if remainder is 0 then compare equal */
687 /* Otherwise, the non-zero remainder makes a > b */
688 return -(INEXACT == exactness);
691 /* Does the given edge contain the given point. The point must already
692 * be known to be contained within the line determined by the edge,
693 * (most likely the point results from an intersection of this edge
696 * If we had exact arithmetic, then this function would simply be a
697 * matter of examining whether the y value of the point lies within
698 * the range of y values of the edge. But since intersection points
699 * are not exact due to being rounded to the nearest integer within
700 * the available precision, we must also examine the x value of the
703 * The definition of "contains" here is that the given intersection
704 * point will be seen by the sweep line after the start event for the
705 * given edge and before the stop event for the edge. See the comments
706 * in the implementation for more details.
709 bo_edge_contains_intersect_point (const edge_t *edge,
710 cairo_bo_intersect_point_t *point)
712 int cmp_top, cmp_bottom;
714 /* XXX: When running the actual algorithm, we don't actually need to
715 * compare against edge->top at all here, since any intersection above
716 * top is eliminated early via a slope comparison. We're leaving these
717 * here for now only for the sake of the quadratic-time intersection
718 * finder which needs them.
721 cmp_top = bo_intersect_ordinate_32_compare (point->y.ordinate,
727 cmp_bottom = bo_intersect_ordinate_32_compare (point->y.ordinate,
733 if (cmp_top > 0 && cmp_bottom < 0)
736 /* At this stage, the point lies on the same y value as either
737 * edge->top or edge->bottom, so we have to examine the x value in
738 * order to properly determine containment. */
740 /* If the y value of the point is the same as the y value of the
741 * top of the edge, then the x value of the point must be greater
742 * to be considered as inside the edge. Similarly, if the y value
743 * of the point is the same as the y value of the bottom of the
744 * edge, then the x value of the point must be less to be
745 * considered as inside. */
750 top_x = line_compute_intersection_x_for_y (&edge->edge.line,
752 return bo_intersect_ordinate_32_compare (top_x, point->x.ordinate, point->x.exactness) < 0;
753 } else { /* cmp_bottom == 0 */
756 bot_x = line_compute_intersection_x_for_y (&edge->edge.line,
758 return bo_intersect_ordinate_32_compare (point->x.ordinate, bot_x, point->x.exactness) < 0;
763 edge_intersect (const edge_t *a,
765 cairo_point_t *intersection)
767 cairo_bo_intersect_point_t quorem;
769 if (! intersect_lines (a, b, &quorem))
772 if (a->edge.top != a->edge.line.p1.y || a->edge.bottom != a->edge.line.p2.y) {
773 if (! bo_edge_contains_intersect_point (a, &quorem))
777 if (b->edge.top != b->edge.line.p1.y || b->edge.bottom != b->edge.line.p2.y) {
778 if (! bo_edge_contains_intersect_point (b, &quorem))
782 /* Now that we've correctly compared the intersection point and
783 * determined that it lies within the edge, then we know that we
784 * no longer need any more bits of storage for the intersection
785 * than we do for our edge coordinates. We also no longer need the
786 * remainder from the division. */
787 intersection->x = quorem.x.ordinate;
788 intersection->y = quorem.y.ordinate;
794 event_compare (const event_t *a, const event_t *b)
800 pqueue_init (pqueue_t *pq)
802 pq->max_size = ARRAY_LENGTH (pq->elements_embedded);
805 pq->elements = pq->elements_embedded;
809 pqueue_fini (pqueue_t *pq)
811 if (pq->elements != pq->elements_embedded)
816 pqueue_grow (pqueue_t *pq)
818 event_t **new_elements;
821 if (pq->elements == pq->elements_embedded) {
822 new_elements = _cairo_malloc_ab (pq->max_size,
824 if (unlikely (new_elements == NULL))
827 memcpy (new_elements, pq->elements_embedded,
828 sizeof (pq->elements_embedded));
830 new_elements = _cairo_realloc_ab (pq->elements,
833 if (unlikely (new_elements == NULL))
837 pq->elements = new_elements;
842 pqueue_push (sweep_line_t *sweep_line, event_t *event)
847 if (unlikely (sweep_line->queue.pq.size + 1 == sweep_line->queue.pq.max_size)) {
848 if (unlikely (! pqueue_grow (&sweep_line->queue.pq))) {
849 longjmp (sweep_line->unwind,
850 _cairo_error (CAIRO_STATUS_NO_MEMORY));
854 elements = sweep_line->queue.pq.elements;
855 for (i = ++sweep_line->queue.pq.size;
856 i != PQ_FIRST_ENTRY &&
857 event_compare (event,
858 elements[parent = PQ_PARENT_INDEX (i)]) < 0;
861 elements[i] = elements[parent];
868 pqueue_pop (pqueue_t *pq)
870 event_t **elements = pq->elements;
874 tail = elements[pq->size--];
876 elements[PQ_FIRST_ENTRY] = NULL;
880 for (i = PQ_FIRST_ENTRY;
881 (child = PQ_LEFT_CHILD_INDEX (i)) <= pq->size;
884 if (child != pq->size &&
885 event_compare (elements[child+1],
886 elements[child]) < 0)
891 if (event_compare (elements[child], tail) >= 0)
894 elements[i] = elements[child];
900 event_insert (sweep_line_t *sweep_line,
906 queue_event_t *event;
908 event = _cairo_freepool_alloc (&sweep_line->queue.pool);
909 if (unlikely (event == NULL)) {
910 longjmp (sweep_line->unwind,
911 _cairo_error (CAIRO_STATUS_NO_MEMORY));
919 pqueue_push (sweep_line, (event_t *) event);
923 event_delete (sweep_line_t *sweep_line,
926 _cairo_freepool_free (&sweep_line->queue.pool, event);
929 static inline event_t *
930 event_next (sweep_line_t *sweep_line)
932 event_t *event, *cmp;
934 event = sweep_line->queue.pq.elements[PQ_FIRST_ENTRY];
935 cmp = *sweep_line->queue.start_events;
937 (cmp != NULL && event_compare (cmp, event) < 0))
940 sweep_line->queue.start_events++;
944 pqueue_pop (&sweep_line->queue.pq);
950 CAIRO_COMBSORT_DECLARE (start_event_sort, event_t *, event_compare)
953 event_insert_stop (sweep_line_t *sweep_line,
956 event_insert (sweep_line,
963 event_insert_if_intersect_below_current_y (sweep_line_t *sweep_line,
967 cairo_point_t intersection;
969 /* start points intersect */
970 if (left->edge.line.p1.x == right->edge.line.p1.x &&
971 left->edge.line.p1.y == right->edge.line.p1.y)
976 /* end points intersect, process DELETE events first */
977 if (left->edge.line.p2.x == right->edge.line.p2.x &&
978 left->edge.line.p2.y == right->edge.line.p2.y)
983 if (slope_compare (left, right) <= 0)
986 if (! edge_intersect (left, right, &intersection))
989 event_insert (sweep_line,
990 EVENT_TYPE_INTERSECTION,
995 static inline edge_t *
996 link_to_edge (cairo_list_t *link)
998 return (edge_t *) link;
1002 sweep_line_insert (sweep_line_t *sweep_line,
1006 cairo_fixed_t y = sweep_line->current_subrow;
1008 pos = sweep_line->insert_cursor;
1009 if (pos == &sweep_line->active)
1010 pos = sweep_line->active.next;
1011 if (pos != &sweep_line->active) {
1014 cmp = sweep_line_compare_edges (link_to_edge (pos),
1018 while (pos->next != &sweep_line->active &&
1019 sweep_line_compare_edges (link_to_edge (pos->next),
1025 } else if (cmp > 0) {
1028 } while (pos != &sweep_line->active &&
1029 sweep_line_compare_edges (link_to_edge (pos),
1034 cairo_list_add (&edge->link, pos);
1035 sweep_line->insert_cursor = &edge->link;
1039 coverage_rewind (struct coverage *cells)
1041 cells->cursor = &cells->head;
1045 coverage_init (struct coverage *cells)
1047 _cairo_freepool_init (&cells->pool,
1048 sizeof (struct cell));
1049 cells->head.prev = NULL;
1050 cells->head.next = &cells->tail;
1051 cells->head.x = INT_MIN;
1052 cells->tail.prev = &cells->head;
1053 cells->tail.next = NULL;
1054 cells->tail.x = INT_MAX;
1056 coverage_rewind (cells);
1060 coverage_fini (struct coverage *cells)
1062 _cairo_freepool_fini (&cells->pool);
1066 coverage_reset (struct coverage *cells)
1068 cells->head.next = &cells->tail;
1069 cells->tail.prev = &cells->head;
1071 _cairo_freepool_reset (&cells->pool);
1072 coverage_rewind (cells);
1075 static struct cell *
1076 coverage_alloc (sweep_line_t *sweep_line,
1082 cell = _cairo_freepool_alloc (&sweep_line->coverage.pool);
1083 if (unlikely (NULL == cell)) {
1084 longjmp (sweep_line->unwind,
1085 _cairo_error (CAIRO_STATUS_NO_MEMORY));
1088 tail->prev->next = cell;
1089 cell->prev = tail->prev;
1093 cell->uncovered_area = 0;
1094 cell->covered_height = 0;
1095 sweep_line->coverage.count++;
1099 inline static struct cell *
1100 coverage_find (sweep_line_t *sweep_line, int x)
1104 cell = sweep_line->coverage.cursor;
1105 if (unlikely (cell->x > x)) {
1107 if (cell->prev->x < x)
1125 cell = coverage_alloc (sweep_line, cell, x);
1127 return sweep_line->coverage.cursor = cell;
1131 coverage_render_cells (sweep_line_t *sweep_line,
1132 cairo_fixed_t left, cairo_fixed_t right,
1133 cairo_fixed_t y1, cairo_fixed_t y2,
1140 /* Orient the edge left-to-right. */
1143 ix1 = _cairo_fixed_integer_part (left);
1144 fx1 = _cairo_fixed_fractional_part (left);
1146 ix2 = _cairo_fixed_integer_part (right);
1147 fx2 = _cairo_fixed_fractional_part (right);
1151 ix1 = _cairo_fixed_integer_part (right);
1152 fx1 = _cairo_fixed_fractional_part (right);
1154 ix2 = _cairo_fixed_integer_part (left);
1155 fx2 = _cairo_fixed_fractional_part (left);
1164 /* Add coverage for all pixels [ix1,ix2] on this row crossed
1167 struct quorem y = floored_divrem ((STEP_X - fx1)*dy, dx);
1170 cell = sweep_line->coverage.cursor;
1171 if (cell->x != ix1) {
1172 if (unlikely (cell->x > ix1)) {
1174 if (cell->prev->x < ix1)
1187 cell = coverage_alloc (sweep_line, cell, ix1);
1190 cell->uncovered_area += sign * y.quo * (STEP_X + fx1);
1191 cell->covered_height += sign * y.quo;
1195 if (cell->x != ++ix1)
1196 cell = coverage_alloc (sweep_line, cell, ix1);
1198 struct quorem dydx_full = floored_divrem (STEP_X*dy, dx);
1201 cairo_fixed_t y_skip = dydx_full.quo;
1202 y.rem += dydx_full.rem;
1211 cell->covered_height += y_skip;
1212 cell->uncovered_area += y_skip*STEP_X;
1215 if (cell->x != ++ix1)
1216 cell = coverage_alloc (sweep_line, cell, ix1);
1217 } while (ix1 != ix2);
1219 cell->uncovered_area += sign*(y2 - y.quo)*fx2;
1220 cell->covered_height += sign*(y2 - y.quo);
1221 sweep_line->coverage.cursor = cell;
1226 full_inc_edge (edge_t *edge)
1228 edge->x.quo += edge->dxdy_full.quo;
1229 edge->x.rem += edge->dxdy_full.rem;
1230 if (edge->x.rem >= 0) {
1232 edge->x.rem -= edge->dy;
1237 full_add_edge (sweep_line_t *sweep_line, edge_t *edge, int sign)
1240 cairo_fixed_t x1, x2;
1244 edge->current_sign = sign;
1246 ix1 = _cairo_fixed_integer_part (edge->x.quo);
1248 if (edge->vertical) {
1249 frac = _cairo_fixed_fractional_part (edge->x.quo);
1250 cell = coverage_find (sweep_line, ix1);
1251 cell->covered_height += sign * STEP_Y;
1252 cell->uncovered_area += sign * 2 * frac * STEP_Y;
1257 full_inc_edge (edge);
1260 ix2 = _cairo_fixed_integer_part (edge->x.quo);
1262 /* Edge is entirely within a column? */
1263 if (likely (ix1 == ix2)) {
1264 frac = _cairo_fixed_fractional_part (x1) +
1265 _cairo_fixed_fractional_part (x2);
1266 cell = coverage_find (sweep_line, ix1);
1267 cell->covered_height += sign * STEP_Y;
1268 cell->uncovered_area += sign * frac * STEP_Y;
1272 coverage_render_cells (sweep_line, x1, x2, 0, STEP_Y, sign);
1276 full_nonzero (sweep_line_t *sweep_line)
1280 sweep_line->is_vertical = TRUE;
1281 pos = sweep_line->active.next;
1283 edge_t *left = link_to_edge (pos), *right;
1284 int winding = left->edge.dir;
1286 sweep_line->is_vertical &= left->vertical;
1288 pos = left->link.next;
1290 if (unlikely (pos == &sweep_line->active)) {
1291 full_add_edge (sweep_line, left, +1);
1295 right = link_to_edge (pos);
1297 sweep_line->is_vertical &= right->vertical;
1299 winding += right->edge.dir;
1301 if (pos == &sweep_line->active ||
1302 link_to_edge (pos)->x.quo != right->x.quo)
1308 if (! right->vertical)
1309 full_inc_edge (right);
1312 full_add_edge (sweep_line, left, +1);
1313 full_add_edge (sweep_line, right, -1);
1314 } while (pos != &sweep_line->active);
1318 full_evenodd (sweep_line_t *sweep_line)
1322 sweep_line->is_vertical = TRUE;
1323 pos = sweep_line->active.next;
1325 edge_t *left = link_to_edge (pos), *right;
1328 sweep_line->is_vertical &= left->vertical;
1330 pos = left->link.next;
1332 if (pos == &sweep_line->active) {
1333 full_add_edge (sweep_line, left, +1);
1337 right = link_to_edge (pos);
1339 sweep_line->is_vertical &= right->vertical;
1341 if (++winding & 1) {
1342 if (pos == &sweep_line->active ||
1343 link_to_edge (pos)->x.quo != right->x.quo)
1349 if (! right->vertical)
1350 full_inc_edge (right);
1353 full_add_edge (sweep_line, left, +1);
1354 full_add_edge (sweep_line, right, -1);
1355 } while (pos != &sweep_line->active);
1359 render_rows (cairo_botor_scan_converter_t *self,
1360 sweep_line_t *sweep_line,
1362 cairo_span_renderer_t *renderer)
1364 cairo_half_open_span_t spans_stack[CAIRO_STACK_ARRAY_LENGTH (cairo_half_open_span_t)];
1365 cairo_half_open_span_t *spans = spans_stack;
1369 cairo_status_t status;
1371 if (unlikely (sweep_line->coverage.count == 0)) {
1372 status = renderer->render_rows (renderer, y, height, NULL, 0);
1373 if (unlikely (status))
1374 longjmp (sweep_line->unwind, status);
1378 /* Allocate enough spans for the row. */
1380 num_spans = 2*sweep_line->coverage.count+2;
1381 if (unlikely (num_spans > ARRAY_LENGTH (spans_stack))) {
1382 spans = _cairo_malloc_ab (num_spans, sizeof (cairo_half_open_span_t));
1383 if (unlikely (spans == NULL)) {
1384 longjmp (sweep_line->unwind,
1385 _cairo_error (CAIRO_STATUS_NO_MEMORY));
1389 /* Form the spans from the coverage and areas. */
1391 prev_x = self->xmin;
1393 cell = sweep_line->coverage.head.next;
1399 spans[num_spans].x = prev_x;
1400 spans[num_spans].inverse = 0;
1401 spans[num_spans].coverage = AREA_TO_ALPHA (cover);
1405 cover += cell->covered_height*STEP_X*2;
1406 area = cover - cell->uncovered_area;
1408 spans[num_spans].x = x;
1409 spans[num_spans].coverage = AREA_TO_ALPHA (area);
1413 } while ((cell = cell->next) != &sweep_line->coverage.tail);
1415 if (prev_x <= self->xmax) {
1416 spans[num_spans].x = prev_x;
1417 spans[num_spans].inverse = 0;
1418 spans[num_spans].coverage = AREA_TO_ALPHA (cover);
1422 if (cover && prev_x < self->xmax) {
1423 spans[num_spans].x = self->xmax;
1424 spans[num_spans].inverse = 1;
1425 spans[num_spans].coverage = 0;
1429 status = renderer->render_rows (renderer, y, height, spans, num_spans);
1431 if (unlikely (spans != spans_stack))
1434 coverage_reset (&sweep_line->coverage);
1436 if (unlikely (status))
1437 longjmp (sweep_line->unwind, status);
1441 full_repeat (sweep_line_t *sweep)
1445 cairo_list_foreach_entry (edge, edge_t, &sweep->active, link) {
1446 if (edge->current_sign)
1447 full_add_edge (sweep, edge, edge->current_sign);
1448 else if (! edge->vertical)
1449 full_inc_edge (edge);
1454 full_reset (sweep_line_t *sweep)
1458 cairo_list_foreach_entry (edge, edge_t, &sweep->active, link)
1459 edge->current_sign = 0;
1463 full_step (cairo_botor_scan_converter_t *self,
1464 sweep_line_t *sweep_line,
1466 cairo_span_renderer_t *renderer)
1470 top = _cairo_fixed_integer_part (sweep_line->current_row);
1471 bottom = _cairo_fixed_integer_part (row);
1472 if (cairo_list_is_empty (&sweep_line->active)) {
1473 cairo_status_t status;
1475 status = renderer->render_rows (renderer, top, bottom - top, NULL, 0);
1476 if (unlikely (status))
1477 longjmp (sweep_line->unwind, status);
1482 if (self->fill_rule == CAIRO_FILL_RULE_WINDING)
1483 full_nonzero (sweep_line);
1485 full_evenodd (sweep_line);
1487 if (sweep_line->is_vertical || bottom == top + 1) {
1488 render_rows (self, sweep_line, top, bottom - top, renderer);
1489 full_reset (sweep_line);
1493 render_rows (self, sweep_line, top++, 1, renderer);
1495 full_repeat (sweep_line);
1496 render_rows (self, sweep_line, top, 1, renderer);
1497 } while (++top != bottom);
1499 full_reset (sweep_line);
1502 cairo_always_inline static void
1503 sub_inc_edge (edge_t *edge,
1504 cairo_fixed_t height)
1507 edge->x.quo += edge->dxdy.quo;
1508 edge->x.rem += edge->dxdy.rem;
1509 if (edge->x.rem >= 0) {
1511 edge->x.rem -= edge->dy;
1514 edge->x.quo += height * edge->dxdy.quo;
1515 edge->x.rem += height * edge->dxdy.rem;
1516 if (edge->x.rem >= 0) {
1517 int carry = edge->x.rem / edge->dy + 1;
1518 edge->x.quo += carry;
1519 edge->x.rem -= carry * edge->dy;
1525 sub_add_run (sweep_line_t *sweep_line, edge_t *edge, int y, int sign)
1529 run = _cairo_freepool_alloc (&sweep_line->runs);
1530 if (unlikely (run == NULL))
1531 longjmp (sweep_line->unwind, _cairo_error (CAIRO_STATUS_NO_MEMORY));
1535 run->next = edge->runs;
1538 edge->current_sign = sign;
1541 inline static cairo_bool_t
1542 edges_coincident (edge_t *left, edge_t *right, cairo_fixed_t y)
1544 /* XXX is compare_x_for_y() worth executing during sub steps? */
1545 return line_equal (&left->edge.line, &right->edge.line);
1546 //edges_compare_x_for_y (&left->edge, &right->edge, y) >= 0;
1550 sub_nonzero (sweep_line_t *sweep_line)
1552 cairo_fixed_t y = sweep_line->current_subrow;
1553 cairo_fixed_t fy = _cairo_fixed_fractional_part (y);
1556 pos = sweep_line->active.next;
1558 edge_t *left = link_to_edge (pos), *right;
1559 int winding = left->edge.dir;
1561 pos = left->link.next;
1563 if (unlikely (pos == &sweep_line->active)) {
1564 if (left->current_sign != +1)
1565 sub_add_run (sweep_line, left, fy, +1);
1569 right = link_to_edge (pos);
1572 winding += right->edge.dir;
1574 if (pos == &sweep_line->active ||
1575 ! edges_coincident (right, link_to_edge (pos), y))
1581 if (right->current_sign)
1582 sub_add_run (sweep_line, right, fy, 0);
1585 if (left->current_sign != +1)
1586 sub_add_run (sweep_line, left, fy, +1);
1587 if (right->current_sign != -1)
1588 sub_add_run (sweep_line, right, fy, -1);
1589 } while (pos != &sweep_line->active);
1593 sub_evenodd (sweep_line_t *sweep_line)
1595 cairo_fixed_t y = sweep_line->current_subrow;
1596 cairo_fixed_t fy = _cairo_fixed_fractional_part (y);
1599 pos = sweep_line->active.next;
1601 edge_t *left = link_to_edge (pos), *right;
1604 pos = left->link.next;
1606 if (unlikely (pos == &sweep_line->active)) {
1607 if (left->current_sign != +1)
1608 sub_add_run (sweep_line, left, fy, +1);
1612 right = link_to_edge (pos);
1615 if (++winding & 1) {
1616 if (pos == &sweep_line->active ||
1617 ! edges_coincident (right, link_to_edge (pos), y))
1623 if (right->current_sign)
1624 sub_add_run (sweep_line, right, fy, 0);
1627 if (left->current_sign != +1)
1628 sub_add_run (sweep_line, left, fy, +1);
1629 if (right->current_sign != -1)
1630 sub_add_run (sweep_line, right, fy, -1);
1631 } while (pos != &sweep_line->active);
1634 cairo_always_inline static void
1635 sub_step (cairo_botor_scan_converter_t *self,
1636 sweep_line_t *sweep_line)
1638 if (cairo_list_is_empty (&sweep_line->active))
1641 if (self->fill_rule == CAIRO_FILL_RULE_WINDING)
1642 sub_nonzero (sweep_line);
1644 sub_evenodd (sweep_line);
1648 coverage_render_runs (sweep_line_t *sweep, edge_t *edge,
1649 cairo_fixed_t y1, cairo_fixed_t y2)
1652 struct run *run = &tail;
1657 /* Order the runs top->bottom */
1658 while (edge->runs) {
1662 edge->runs = r->next;
1668 sub_inc_edge (edge, run->y - y1);
1671 cairo_fixed_t x1, x2;
1677 if (y2 - y1 == STEP_Y)
1678 full_inc_edge (edge);
1680 sub_inc_edge (edge, y2 - y1);
1686 ix1 = _cairo_fixed_integer_part (x1);
1687 ix2 = _cairo_fixed_integer_part (x2);
1689 /* Edge is entirely within a column? */
1690 if (likely (ix1 == ix2)) {
1694 frac = _cairo_fixed_fractional_part (x1) +
1695 _cairo_fixed_fractional_part (x2);
1696 cell = coverage_find (sweep, ix1);
1697 cell->covered_height += run->sign * (y2 - y1);
1698 cell->uncovered_area += run->sign * (y2 - y1) * frac;
1700 coverage_render_cells (sweep, x1, x2, y1, y2, run->sign);
1705 } while (run->next != NULL);
1709 coverage_render_vertical_runs (sweep_line_t *sweep, edge_t *edge, cairo_fixed_t y2)
1715 for (run = edge->runs; run != NULL; run = run->next) {
1717 height += run->sign * (y2 - run->y);
1721 cell = coverage_find (sweep, _cairo_fixed_integer_part (edge->x.quo));
1722 cell->covered_height += height;
1723 cell->uncovered_area += 2 * _cairo_fixed_fractional_part (edge->x.quo) * height;
1726 cairo_always_inline static void
1727 sub_emit (cairo_botor_scan_converter_t *self,
1728 sweep_line_t *sweep,
1729 cairo_span_renderer_t *renderer)
1733 sub_step (self, sweep);
1735 /* convert the runs into coverages */
1737 cairo_list_foreach_entry (edge, edge_t, &sweep->active, link) {
1738 if (edge->runs == NULL) {
1739 if (! edge->vertical) {
1740 if (edge->flags & START) {
1742 STEP_Y - _cairo_fixed_fractional_part (edge->edge.top));
1743 edge->flags &= ~START;
1745 full_inc_edge (edge);
1748 if (edge->vertical) {
1749 coverage_render_vertical_runs (sweep, edge, STEP_Y);
1752 if (edge->flags & START) {
1753 y1 = _cairo_fixed_fractional_part (edge->edge.top);
1754 edge->flags &= ~START;
1756 coverage_render_runs (sweep, edge, y1, STEP_Y);
1759 edge->current_sign = 0;
1763 cairo_list_foreach_entry (edge, edge_t, &sweep->stopped, link) {
1764 int y2 = _cairo_fixed_fractional_part (edge->edge.bottom);
1765 if (edge->vertical) {
1766 coverage_render_vertical_runs (sweep, edge, y2);
1769 if (edge->flags & START)
1770 y1 = _cairo_fixed_fractional_part (edge->edge.top);
1771 coverage_render_runs (sweep, edge, y1, y2);
1774 cairo_list_init (&sweep->stopped);
1776 _cairo_freepool_reset (&sweep->runs);
1778 render_rows (self, sweep,
1779 _cairo_fixed_integer_part (sweep->current_row), 1,
1784 sweep_line_init (sweep_line_t *sweep_line,
1785 event_t **start_events,
1788 cairo_list_init (&sweep_line->active);
1789 cairo_list_init (&sweep_line->stopped);
1790 sweep_line->insert_cursor = &sweep_line->active;
1792 sweep_line->current_row = INT32_MIN;
1793 sweep_line->current_subrow = INT32_MIN;
1795 coverage_init (&sweep_line->coverage);
1796 _cairo_freepool_init (&sweep_line->runs, sizeof (struct run));
1798 start_event_sort (start_events, num_events);
1799 start_events[num_events] = NULL;
1801 sweep_line->queue.start_events = start_events;
1803 _cairo_freepool_init (&sweep_line->queue.pool,
1804 sizeof (queue_event_t));
1805 pqueue_init (&sweep_line->queue.pq);
1806 sweep_line->queue.pq.elements[PQ_FIRST_ENTRY] = NULL;
1810 sweep_line_delete (sweep_line_t *sweep_line,
1813 if (sweep_line->insert_cursor == &edge->link)
1814 sweep_line->insert_cursor = edge->link.prev;
1816 cairo_list_del (&edge->link);
1818 cairo_list_add_tail (&edge->link, &sweep_line->stopped);
1819 edge->flags |= STOP;
1823 sweep_line_swap (sweep_line_t *sweep_line,
1827 right->link.prev = left->link.prev;
1828 left->link.next = right->link.next;
1829 right->link.next = &left->link;
1830 left->link.prev = &right->link;
1831 left->link.next->prev = &left->link;
1832 right->link.prev->next = &right->link;
1836 sweep_line_fini (sweep_line_t *sweep_line)
1838 pqueue_fini (&sweep_line->queue.pq);
1839 _cairo_freepool_fini (&sweep_line->queue.pool);
1840 coverage_fini (&sweep_line->coverage);
1841 _cairo_freepool_fini (&sweep_line->runs);
1844 static cairo_status_t
1845 botor_generate (cairo_botor_scan_converter_t *self,
1846 event_t **start_events,
1847 cairo_span_renderer_t *renderer)
1849 cairo_status_t status;
1850 sweep_line_t sweep_line;
1853 cairo_list_t *left, *right;
1857 sweep_line_init (&sweep_line, start_events, self->num_edges);
1858 if ((status = setjmp (sweep_line.unwind)))
1861 ybot = self->extents.p2.y;
1862 sweep_line.current_subrow = self->extents.p1.y;
1863 sweep_line.current_row = _cairo_fixed_floor (self->extents.p1.y);
1864 event = *sweep_line.queue.start_events++;
1866 /* Can we process a full step in one go? */
1867 if (event->y >= sweep_line.current_row + STEP_Y) {
1868 bottom = _cairo_fixed_floor (event->y);
1869 full_step (self, &sweep_line, bottom, renderer);
1870 sweep_line.current_row = bottom;
1871 sweep_line.current_subrow = bottom;
1875 if (event->y > sweep_line.current_subrow) {
1876 sub_step (self, &sweep_line);
1877 sweep_line.current_subrow = event->y;
1881 /* Update the active list using Bentley-Ottmann */
1882 switch (event->type) {
1883 case EVENT_TYPE_START:
1884 e1 = ((start_event_t *) event)->edge;
1886 sweep_line_insert (&sweep_line, e1);
1887 event_insert_stop (&sweep_line, e1);
1889 left = e1->link.prev;
1890 right = e1->link.next;
1892 if (left != &sweep_line.active) {
1893 event_insert_if_intersect_below_current_y (&sweep_line,
1894 link_to_edge (left), e1);
1897 if (right != &sweep_line.active) {
1898 event_insert_if_intersect_below_current_y (&sweep_line,
1899 e1, link_to_edge (right));
1904 case EVENT_TYPE_STOP:
1905 e1 = ((queue_event_t *) event)->e1;
1906 event_delete (&sweep_line, event);
1908 left = e1->link.prev;
1909 right = e1->link.next;
1911 sweep_line_delete (&sweep_line, e1);
1913 if (left != &sweep_line.active &&
1914 right != &sweep_line.active)
1916 event_insert_if_intersect_below_current_y (&sweep_line,
1917 link_to_edge (left),
1918 link_to_edge (right));
1923 case EVENT_TYPE_INTERSECTION:
1924 e1 = ((queue_event_t *) event)->e1;
1925 e2 = ((queue_event_t *) event)->e2;
1927 event_delete (&sweep_line, event);
1928 if (e1->flags & STOP)
1930 if (e2->flags & STOP)
1933 /* skip this intersection if its edges are not adjacent */
1934 if (&e2->link != e1->link.next)
1937 left = e1->link.prev;
1938 right = e2->link.next;
1940 sweep_line_swap (&sweep_line, e1, e2);
1942 /* after the swap e2 is left of e1 */
1943 if (left != &sweep_line.active) {
1944 event_insert_if_intersect_below_current_y (&sweep_line,
1945 link_to_edge (left), e2);
1948 if (right != &sweep_line.active) {
1949 event_insert_if_intersect_below_current_y (&sweep_line,
1950 e1, link_to_edge (right));
1956 event = event_next (&sweep_line);
1959 } while (event->y == sweep_line.current_subrow);
1960 } while (event->y < sweep_line.current_row + STEP_Y);
1962 bottom = sweep_line.current_row + STEP_Y;
1963 sub_emit (self, &sweep_line, renderer);
1964 sweep_line.current_subrow = bottom;
1965 sweep_line.current_row = sweep_line.current_subrow;
1969 /* flush any partial spans */
1970 if (sweep_line.current_subrow != sweep_line.current_row) {
1971 sub_emit (self, &sweep_line, renderer);
1972 sweep_line.current_row += STEP_Y;
1973 sweep_line.current_subrow = sweep_line.current_row;
1975 /* clear the rest */
1976 if (sweep_line.current_subrow < ybot) {
1977 bottom = _cairo_fixed_integer_part (sweep_line.current_row);
1978 status = renderer->render_rows (renderer,
1979 bottom, _cairo_fixed_integer_ceil (ybot) - bottom,
1984 sweep_line_fini (&sweep_line);
1989 static cairo_status_t
1990 _cairo_botor_scan_converter_generate (void *converter,
1991 cairo_span_renderer_t *renderer)
1993 cairo_botor_scan_converter_t *self = converter;
1994 start_event_t stack_events[CAIRO_STACK_ARRAY_LENGTH (start_event_t)];
1995 start_event_t *events;
1996 event_t *stack_event_ptrs[ARRAY_LENGTH (stack_events) + 1];
1997 event_t **event_ptrs;
1998 struct _cairo_botor_scan_converter_chunk *chunk;
1999 cairo_status_t status;
2003 num_events = self->num_edges;
2004 if (unlikely (0 == num_events)) {
2005 return renderer->render_rows (renderer,
2006 _cairo_fixed_integer_floor (self->extents.p1.y),
2007 _cairo_fixed_integer_ceil (self->extents.p2.y) -
2008 _cairo_fixed_integer_floor (self->extents.p1.y),
2012 events = stack_events;
2013 event_ptrs = stack_event_ptrs;
2014 if (unlikely (num_events >= ARRAY_LENGTH (stack_events))) {
2015 events = _cairo_malloc_ab_plus_c (num_events,
2016 sizeof (start_event_t) + sizeof (event_t *),
2017 sizeof (event_t *));
2018 if (unlikely (events == NULL))
2019 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
2021 event_ptrs = (event_t **) (events + num_events);
2025 for (chunk = &self->chunks; chunk != NULL; chunk = chunk->next) {
2029 for (i = 0; i < chunk->count; i++) {
2030 event_ptrs[j] = (event_t *) &events[j];
2032 events[j].y = edge->edge.top;
2033 events[j].type = EVENT_TYPE_START;
2034 events[j].edge = edge;
2040 status = botor_generate (self, event_ptrs, renderer);
2042 if (events != stack_events)
2049 botor_allocate_edge (cairo_botor_scan_converter_t *self)
2051 struct _cairo_botor_scan_converter_chunk *chunk;
2054 if (chunk->count == chunk->size) {
2057 size = chunk->size * 2;
2058 chunk->next = _cairo_malloc_ab_plus_c (size,
2060 sizeof (struct _cairo_botor_scan_converter_chunk));
2061 if (unlikely (chunk->next == NULL))
2064 chunk = chunk->next;
2068 chunk->base = chunk + 1;
2072 return (edge_t *) chunk->base + chunk->count++;
2075 static cairo_status_t
2076 botor_add_edge (cairo_botor_scan_converter_t *self,
2077 const cairo_edge_t *edge)
2080 cairo_fixed_t dx, dy;
2082 e = botor_allocate_edge (self);
2083 if (unlikely (e == NULL))
2084 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
2086 cairo_list_init (&e->link);
2089 dx = edge->line.p2.x - edge->line.p1.x;
2090 dy = edge->line.p2.y - edge->line.p1.y;
2095 e->x.quo = edge->line.p1.x;
2099 e->dxdy_full.quo = 0;
2100 e->dxdy_full.rem = 0;
2102 e->vertical = FALSE;
2103 e->dxdy = floored_divrem (dx, dy);
2104 if (edge->top == edge->line.p1.y) {
2105 e->x.quo = edge->line.p1.x;
2108 e->x = floored_muldivrem (edge->top - edge->line.p1.y,
2110 e->x.quo += edge->line.p1.x;
2113 if (_cairo_fixed_integer_part (edge->bottom) - _cairo_fixed_integer_part (edge->top) > 1) {
2114 e->dxdy_full = floored_muldivrem (STEP_Y, dx, dy);
2116 e->dxdy_full.quo = 0;
2117 e->dxdy_full.rem = 0;
2122 e->current_sign = 0;
2128 return CAIRO_STATUS_SUCCESS;
2132 _cairo_botor_scan_converter_destroy (void *converter)
2134 cairo_botor_scan_converter_t *self = converter;
2135 struct _cairo_botor_scan_converter_chunk *chunk, *next;
2137 for (chunk = self->chunks.next; chunk != NULL; chunk = next) {
2144 _cairo_botor_scan_converter_init (cairo_botor_scan_converter_t *self,
2145 const cairo_box_t *extents,
2146 cairo_fill_rule_t fill_rule)
2148 self->base.destroy = _cairo_botor_scan_converter_destroy;
2149 self->base.generate = _cairo_botor_scan_converter_generate;
2151 self->extents = *extents;
2152 self->fill_rule = fill_rule;
2154 self->xmin = _cairo_fixed_integer_floor (extents->p1.x);
2155 self->xmax = _cairo_fixed_integer_ceil (extents->p2.x);
2157 self->chunks.base = self->buf;
2158 self->chunks.next = NULL;
2159 self->chunks.count = 0;
2160 self->chunks.size = sizeof (self->buf) / sizeof (edge_t);
2161 self->tail = &self->chunks;
2163 self->num_edges = 0;