tizen 2.3.1 release
[framework/graphics/cairo.git] / src / drm / cairo-drm-i965-surface.c
1 /* Cairo - a vector graphics library with display and print output
2  *
3  * Copyright © 2009 Kristian Høgsberg
4  * Copyright © 2009 Chris Wilson
5  * Copyright © 2009 Intel Corporation
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it either under the terms of the GNU Lesser General Public
9  * License version 2.1 as published by the Free Software Foundation
10  * (the "LGPL") or, at your option, under the terms of the Mozilla
11  * Public License Version 1.1 (the "MPL"). If you do not alter this
12  * notice, a recipient may use your version of this file under either
13  * the MPL or the LGPL.
14  *
15  * You should have received a copy of the LGPL along with this library
16  * in the file COPYING-LGPL-2.1; if not, write to the Free Software
17  * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
18  * You should have received a copy of the MPL along with this library
19  * in the file COPYING-MPL-1.1
20  *
21  * The contents of this file are subject to the Mozilla Public License
22  * Version 1.1 (the "License"); you may not use this file except in
23  * compliance with the License. You may obtain a copy of the License at
24  * http://www.mozilla.org/MPL/
25  *
26  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
27  * OF ANY KIND, either express or implied. See the LGPL or the MPL for
28  * the specific language governing rights and limitations.
29  *
30  * The Original Code is the cairo graphics library.
31  *
32  * The Initial Developer of the Original Code is Kristian Høgsberg.
33  *
34  * Based on the xf86-intel-driver i965 render acceleration code,
35  * authored by:
36  *    Wang Zhenyu <zhenyu.z.wang@intel.com>
37  *    Eric Anholt <eric@anholt.net>
38  *    Carl Worth <cworth@redhat.com>
39  *    Keith Packard <keithp@keithp.com>
40  */
41
42 /* XXX
43  *
44  * FIXME: Use brw_PLN for [DevCTG-B+]
45  *
46  */
47
48 #include "cairoint.h"
49
50 #include "cairo-drm-private.h"
51 #include "cairo-drm-ioctl-private.h"
52 #include "cairo-drm-intel-private.h"
53 #include "cairo-drm-intel-command-private.h"
54 #include "cairo-drm-intel-ioctl-private.h"
55 #include "cairo-drm-i965-private.h"
56
57 #include "cairo-boxes-private.h"
58 #include "cairo-composite-rectangles-private.h"
59 #include "cairo-default-context-private.h"
60 #include "cairo-error-private.h"
61 #include "cairo-region-private.h"
62 #include "cairo-surface-offset-private.h"
63
64 #include <sys/ioctl.h>
65 #include <errno.h>
66
67 #define I965_MAX_SIZE 8192
68
69 static const cairo_surface_backend_t i965_surface_backend;
70
71 static void
72 i965_stream_init (i965_stream_t *stream,
73                   uint8_t *data, uint32_t size,
74                   struct i965_pending_relocation *pending, int max_pending,
75                   struct drm_i915_gem_relocation_entry *relocations, int max_relocations)
76
77 {
78     stream->used = stream->committed = 0;
79     stream->data = data;
80     stream->size = size;
81     stream->serial = 1;
82
83     stream->num_pending_relocations = 0;
84     stream->max_pending_relocations = max_pending;
85     stream->pending_relocations = pending;
86
87     stream->num_relocations = 0;
88     stream->max_relocations = max_relocations;
89     stream->relocations = relocations;
90 }
91
92 static void
93 i965_add_relocation (i965_device_t *device,
94                      intel_bo_t *bo,
95                      uint32_t read_domains,
96                      uint32_t write_domain)
97 {
98     if (bo->exec == NULL) {
99         int i;
100
101         device->exec.gtt_size += bo->base.size;
102
103         i = device->exec.count++;
104         assert (i < ARRAY_LENGTH (device->exec.exec));
105
106         device->exec.exec[i].handle = bo->base.handle;
107         device->exec.exec[i].relocation_count = 0;
108         device->exec.exec[i].relocs_ptr = 0;
109         device->exec.exec[i].alignment  = 0;
110         device->exec.exec[i].offset = 0;
111         device->exec.exec[i].flags  = 0;
112         device->exec.exec[i].rsvd1  = 0;
113         device->exec.exec[i].rsvd2  = 0;
114
115         device->exec.bo[i] = intel_bo_reference (bo);
116         bo->exec = &device->exec.exec[i];
117     }
118
119     if (cairo_list_is_empty (&bo->link))
120         cairo_list_add_tail (&device->flush, &bo->link);
121
122     assert (write_domain == 0 || bo->batch_write_domain == 0 || bo->batch_write_domain == write_domain);
123     bo->batch_read_domains |= read_domains;
124     bo->batch_write_domain |= write_domain;
125 }
126
127 void
128 i965_emit_relocation (i965_device_t *device,
129                       i965_stream_t *stream,
130                       intel_bo_t *target,
131                       uint32_t target_offset,
132                       uint32_t read_domains,
133                       uint32_t write_domain,
134                       uint32_t offset)
135 {
136     int n;
137
138     assert (target_offset < target->base.size);
139
140     i965_add_relocation (device, target, read_domains, write_domain);
141
142     n = stream->num_relocations++;
143     assert (n < stream->max_relocations);
144
145     stream->relocations[n].offset = offset;
146     stream->relocations[n].delta  = target_offset;
147     stream->relocations[n].target_handle   = target->base.handle;
148     stream->relocations[n].read_domains    = read_domains;
149     stream->relocations[n].write_domain    = write_domain;
150     stream->relocations[n].presumed_offset = target->offset;
151 }
152
153 static void
154 i965_stream_reset (i965_stream_t *stream)
155 {
156     stream->used = stream->committed = 0;
157     stream->num_relocations = 0;
158     stream->num_pending_relocations = 0;
159     if (++stream->serial == 0)
160         stream->serial = 1;
161 }
162
163 void
164 i965_stream_commit (i965_device_t *device,
165                     i965_stream_t *stream)
166 {
167     intel_bo_t *bo;
168     int n;
169
170     assert (stream->used);
171
172     bo = intel_bo_create (&device->intel,
173                           stream->used, stream->used,
174                           FALSE, I915_TILING_NONE, 0);
175
176     /* apply pending relocations */
177     for (n = 0; n < stream->num_pending_relocations; n++) {
178         struct i965_pending_relocation *p = &stream->pending_relocations[n];
179
180         i965_emit_relocation (device, &device->batch, bo,
181                               p->delta,
182                               p->read_domains,
183                               p->write_domain,
184                               p->offset);
185         if (bo->offset)
186             *(uint32_t *) (device->batch.data + p->offset) = bo->offset + p->delta;
187     }
188
189     intel_bo_write (&device->intel, bo, 0, stream->used, stream->data);
190
191     if (stream->num_relocations) {
192         assert (bo->exec != NULL);
193         bo->exec->relocs_ptr = (uintptr_t) stream->relocations;
194         bo->exec->relocation_count = stream->num_relocations;
195     }
196
197     intel_bo_destroy (&device->intel, bo);
198
199     i965_stream_reset (stream);
200 }
201
202 static void
203 sf_states_pluck (void *entry, void *closure)
204 {
205     i965_device_t *device = closure;
206
207     _cairo_hash_table_remove (device->sf_states, entry);
208     _cairo_freelist_free (&device->sf_freelist, entry);
209 }
210
211 static void
212 cc_offsets_pluck (void *entry, void *closure)
213 {
214     i965_device_t *device = closure;
215
216     _cairo_hash_table_remove (device->cc_states, entry);
217     _cairo_freelist_free (&device->cc_freelist, entry);
218 }
219
220 static void
221 wm_kernels_pluck (void *entry, void *closure)
222 {
223     i965_device_t *device = closure;
224
225     _cairo_hash_table_remove (device->wm_kernels, entry);
226     _cairo_freelist_free (&device->wm_kernel_freelist, entry);
227 }
228
229 static void
230 wm_states_pluck (void *entry, void *closure)
231 {
232     i965_device_t *device = closure;
233
234     _cairo_hash_table_remove (device->wm_states, entry);
235     _cairo_freelist_free (&device->wm_state_freelist, entry);
236 }
237
238 static void
239 wm_bindings_pluck (void *entry, void *closure)
240 {
241     i965_device_t *device = closure;
242
243     _cairo_hash_table_remove (device->wm_bindings, entry);
244     _cairo_freelist_free (&device->wm_binding_freelist, entry);
245 }
246
247 static void
248 samplers_pluck (void *entry, void *closure)
249 {
250     i965_device_t *device = closure;
251
252     _cairo_hash_table_remove (device->samplers, entry);
253     _cairo_freelist_free (&device->sampler_freelist, entry);
254 }
255
256 void
257 i965_general_state_reset (i965_device_t *device)
258 {
259     _cairo_hash_table_foreach (device->sf_states,
260                                sf_states_pluck,
261                                device);
262
263     _cairo_hash_table_foreach (device->cc_states,
264                                cc_offsets_pluck,
265                                device);
266
267     _cairo_hash_table_foreach (device->wm_kernels,
268                                wm_kernels_pluck,
269                                device);
270
271     _cairo_hash_table_foreach (device->wm_states,
272                                wm_states_pluck,
273                                device);
274
275     _cairo_hash_table_foreach (device->wm_bindings,
276                                wm_bindings_pluck,
277                                device);
278
279     _cairo_hash_table_foreach (device->samplers,
280                                samplers_pluck,
281                                device);
282
283     device->vs_offset = (uint32_t) -1;
284     device->border_color_offset = (uint32_t) -1;
285
286     if (device->general_state != NULL) {
287         intel_bo_destroy (&device->intel, device->general_state);
288         device->general_state = NULL;
289     }
290 }
291
292 static void
293 i965_device_reset (i965_device_t *device)
294 {
295     device->exec.count = 0;
296     device->exec.gtt_size = I965_VERTEX_SIZE +
297                             I965_SURFACE_SIZE +
298                             I965_GENERAL_SIZE +
299                             I965_BATCH_SIZE;
300
301     device->sf_state.entry.hash = (uint32_t) -1;
302     device->wm_state.entry.hash = (uint32_t) -1;
303     device->wm_binding.entry.hash = (uint32_t) -1;
304     device->cc_state.entry.hash = (uint32_t) -1;
305
306     device->target = NULL;
307     device->source = NULL;
308     device->mask = NULL;
309     device->clip = NULL;
310
311     device->draw_rectangle = (uint32_t) -1;
312
313     device->vertex_type = (uint32_t) -1;
314     device->vertex_size = 0;
315     device->rectangle_size   = 0;
316     device->last_vertex_size = 0;
317
318     device->constants = NULL;
319     device->constants_size = 0;
320
321     device->have_urb_fences = FALSE;
322 }
323
324 static cairo_status_t
325 i965_exec (i965_device_t *device, uint32_t offset)
326 {
327     struct drm_i915_gem_execbuffer2 execbuf;
328     cairo_status_t status = CAIRO_STATUS_SUCCESS;
329     int ret, i;
330
331     execbuf.buffers_ptr = (uintptr_t) device->exec.exec;
332     execbuf.buffer_count = device->exec.count;
333     execbuf.batch_start_offset = offset;
334     execbuf.batch_len = device->batch.used;
335     execbuf.DR1 = 0;
336     execbuf.DR4 = 0;
337     execbuf.num_cliprects = 0;
338     execbuf.cliprects_ptr = 0;
339     execbuf.flags = I915_GEM_3D_PIPELINE;
340     execbuf.rsvd1 = 0;
341     execbuf.rsvd2 = 0;
342
343 #if 0
344     printf ("exec: offset=%d, length=%d, buffers=%d\n",
345             offset, device->batch.used, device->exec.count);
346     intel_dump_batchbuffer ((uint32_t *) device->batch.data,
347                             device->batch.used,
348                             device->intel.base.chip_id);
349 #endif
350
351     ret = 0;
352     do {
353         ret = ioctl (device->intel.base.fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
354     } while (ret != 0 && errno == EINTR);
355     if (unlikely (ret)) {
356         if (errno == ENOMEM)
357             status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
358         else
359             status = _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
360
361         fprintf (stderr, "Batch submission failed: %d\n", errno);
362         fprintf (stderr, "   gtt size: %zd/%zd\n",
363                  device->exec.gtt_size, device->intel.gtt_avail_size);
364
365         fprintf (stderr, "   %d buffers:\n",
366                  device->exec.count);
367         for (i = 0; i < device->exec.count; i++) {
368             fprintf (stderr, "     exec[%d] = %d\n",
369                      i, device->exec.bo[i]->base.size);
370         }
371
372         intel_dump_batchbuffer ((uint32_t *) device->batch.data,
373                                 device->batch.used,
374                                 device->intel.base.chip_id);
375     }
376
377     /* XXX any write target within the batch should now be in error */
378     for (i = 0; i < device->exec.count; i++) {
379         intel_bo_t *bo = device->exec.bo[i];
380         cairo_bool_t ret;
381
382         bo->offset = device->exec.exec[i].offset;
383         bo->exec = NULL;
384         bo->batch_read_domains = 0;
385         bo->batch_write_domain = 0;
386
387         if (bo->virtual)
388             intel_bo_unmap (bo);
389         bo->cpu = FALSE;
390
391         if (bo->purgeable)
392             ret = intel_bo_madvise (&device->intel, bo, I915_MADV_DONTNEED);
393             /* ignore immediate notification of purging */
394
395         cairo_list_del (&bo->cache_list);
396         cairo_list_init (&bo->link);
397         intel_bo_destroy (&device->intel, bo);
398     }
399     cairo_list_init (&device->flush);
400
401     device->exec.count = 0;
402
403     return status;
404 }
405
406 static inline uint32_t
407 next_bo_size (uint32_t v)
408 {
409     v = (v + 8191) / 8192;
410
411     v--;
412     v |= v >> 1;
413     v |= v >> 2;
414     v |= v >> 4;
415     v |= v >> 8;
416     v |= v >> 16;
417     v++;
418
419     return v * 8192;
420 }
421
422 static void
423 _copy_to_bo_and_apply_relocations (i965_device_t *device,
424                                    intel_bo_t *bo,
425                                    i965_stream_t *stream,
426                                    uint32_t offset)
427 {
428     int n;
429
430     intel_bo_write (&device->intel, bo,
431                     offset, stream->used,
432                     stream->data);
433
434     for (n = 0; n < stream->num_pending_relocations; n++) {
435         struct i965_pending_relocation *p = &stream->pending_relocations[n];
436
437         i965_emit_relocation (device, &device->batch, bo,
438                               p->delta + offset,
439                               p->read_domains,
440                               p->write_domain,
441                               p->offset);
442
443         if (bo->offset) {
444             *(uint32_t *) (device->batch.data + p->offset) =
445                 bo->offset + p->delta + offset;
446         }
447     }
448 }
449
450 cairo_status_t
451 i965_device_flush (i965_device_t *device)
452 {
453     cairo_status_t status;
454     uint32_t aligned, max;
455     intel_bo_t *bo;
456     int n;
457
458     if (device->batch.used == 0)
459         return CAIRO_STATUS_SUCCESS;
460
461     i965_flush_vertices (device);
462
463     OUT_BATCH (MI_BATCH_BUFFER_END);
464     /* Emit a padding dword if we aren't going to be quad-word aligned. */
465     if (device->batch.used & 4)
466         OUT_BATCH (MI_NOOP);
467
468 #if 0
469     printf ("device flush: vertex=%d, constant=%d, surface=%d, general=%d, batch=%d\n",
470             device->vertex.used,
471             device->constant.used,
472             device->surface.used,
473             device->general.used,
474             device->batch.used);
475 #endif
476
477     /* can we pack the surface state into the tail of the general state? */
478     if (device->general.used == device->general.committed) {
479         if (device->general.used) {
480             assert (device->general.num_pending_relocations == 1);
481             assert (device->general_state != NULL);
482             i965_emit_relocation (device, &device->batch,
483                                   device->general_state,
484                                   device->general.pending_relocations[0].delta,
485                                   device->general.pending_relocations[0].read_domains,
486                                   device->general.pending_relocations[0].write_domain,
487                                   device->general.pending_relocations[0].offset);
488
489             if (device->general_state->offset) {
490                 *(uint32_t *) (device->batch.data +
491                                device->general.pending_relocations[0].offset) =
492                     device->general_state->offset +
493                     device->general.pending_relocations[0].delta;
494             }
495         }
496     } else {
497         assert (device->general.num_pending_relocations == 1);
498         if (device->general_state != NULL) {
499             intel_bo_destroy (&device->intel, device->general_state);
500             device->general_state = NULL;
501         }
502
503         bo = intel_bo_create (&device->intel,
504                               device->general.used,
505                               device->general.used,
506                               FALSE, I915_TILING_NONE, 0);
507         if (unlikely (bo == NULL))
508             return _cairo_error (CAIRO_STATUS_NO_MEMORY);
509
510         aligned = (device->general.used + 31) & -32;
511         if (device->surface.used &&
512             aligned + device->surface.used <= bo->base.size)
513         {
514             _copy_to_bo_and_apply_relocations (device, bo, &device->general, 0);
515             _copy_to_bo_and_apply_relocations (device, bo, &device->surface, aligned);
516
517             if (device->surface.num_relocations) {
518                 for (n = 0; n < device->surface.num_relocations; n++)
519                     device->surface.relocations[n].offset += aligned;
520
521                 assert (bo->exec != NULL);
522                 bo->exec->relocs_ptr = (uintptr_t) device->surface.relocations;
523                 bo->exec->relocation_count = device->surface.num_relocations;
524             }
525
526             i965_stream_reset (&device->surface);
527         }
528         else
529         {
530             _copy_to_bo_and_apply_relocations (device, bo, &device->general, 0);
531         }
532
533         /* Note we don't reset the general state, just mark what data we've committed. */
534         device->general.committed = device->general.used;
535         device->general_state = bo;
536     }
537     device->general.num_pending_relocations = 0;
538
539     /* Combine vertex+constant+surface+batch streams? */
540     max = aligned = device->vertex.used;
541     if (device->surface.used) {
542         aligned = (aligned + 63) & -64;
543         aligned += device->surface.used;
544         if (device->surface.used > max)
545             max = device->surface.used;
546     }
547     aligned = (aligned + 63) & -64;
548     aligned += device->batch.used;
549     if (device->batch.used > max)
550         max = device->batch.used;
551     if (aligned <= next_bo_size (max)) {
552         int batch_num_relocations;
553
554         if (aligned <= 8192)
555             max = aligned;
556
557         bo = intel_bo_create (&device->intel,
558                               max, max,
559                               FALSE, I915_TILING_NONE, 0);
560         if (unlikely (bo == NULL))
561             return _cairo_error (CAIRO_STATUS_NO_MEMORY);
562
563         assert (aligned <= bo->base.size);
564
565         if (device->vertex.used)
566             _copy_to_bo_and_apply_relocations (device, bo, &device->vertex, 0);
567
568         aligned = device->vertex.used;
569
570         batch_num_relocations = device->batch.num_relocations;
571         if (device->surface.used) {
572             aligned = (aligned + 63) & -64;
573             _copy_to_bo_and_apply_relocations (device, bo, &device->surface, aligned);
574
575             batch_num_relocations = device->batch.num_relocations;
576             if (device->surface.num_relocations) {
577                 assert (device->batch.num_relocations + device->surface.num_relocations < device->batch.max_relocations);
578
579                 memcpy (device->batch.relocations + device->batch.num_relocations,
580                         device->surface.relocations,
581                         sizeof (device->surface.relocations[0]) * device->surface.num_relocations);
582
583                 for (n = 0; n < device->surface.num_relocations; n++)
584                     device->batch.relocations[device->batch.num_relocations + n].offset += aligned;
585
586                 device->batch.num_relocations += device->surface.num_relocations;
587             }
588
589             aligned += device->surface.used;
590         }
591
592         aligned = (aligned + 63) & -64;
593         intel_bo_write (&device->intel, bo,
594                         aligned, device->batch.used,
595                         device->batch.data);
596
597         for (n = 0; n < batch_num_relocations; n++)
598             device->batch.relocations[n].offset += aligned;
599
600         if (device->exec.bo[device->exec.count-1] == bo) {
601             assert (bo->exec == &device->exec.exec[device->exec.count-1]);
602
603             bo->exec->relocation_count = device->batch.num_relocations;
604             bo->exec->relocs_ptr = (uintptr_t) device->batch.relocations;
605             intel_bo_destroy (&device->intel, bo);
606         } else {
607             assert (bo->exec ==  NULL);
608
609             n = device->exec.count++;
610             device->exec.exec[n].handle = bo->base.handle;
611             device->exec.exec[n].relocation_count = device->batch.num_relocations;
612             device->exec.exec[n].relocs_ptr = (uintptr_t) device->batch.relocations;
613             device->exec.exec[n].alignment = 0;
614             device->exec.exec[n].offset = 0;
615             device->exec.exec[n].flags = 0;
616             device->exec.exec[n].rsvd1 = 0;
617             device->exec.exec[n].rsvd2 = 0;
618
619             /* transfer ownership to the exec */
620             device->exec.bo[n] = bo;
621         }
622     } else {
623         i965_stream_commit (device, &device->vertex);
624         if (device->surface.used)
625             i965_stream_commit (device, &device->surface);
626
627         bo = intel_bo_create (&device->intel,
628                               device->batch.used, device->batch.used,
629                               FALSE, I915_TILING_NONE, 0);
630         if (unlikely (bo == NULL))
631             return _cairo_error (CAIRO_STATUS_NO_MEMORY);
632
633         intel_bo_write (&device->intel, bo,
634                         0, device->batch.used,
635                         device->batch.data);
636
637         n = device->exec.count++;
638         device->exec.exec[n].handle = bo->base.handle;
639         device->exec.exec[n].relocation_count = device->batch.num_relocations;
640         device->exec.exec[n].relocs_ptr = (uintptr_t) device->batch.relocations;
641         device->exec.exec[n].alignment = 0;
642         device->exec.exec[n].offset = 0;
643         device->exec.exec[n].flags = 0;
644         device->exec.exec[n].rsvd1 = 0;
645         device->exec.exec[n].rsvd2 = 0;
646
647         /* transfer ownership to the exec */
648         device->exec.bo[n] = bo;
649         aligned = 0;
650     }
651
652     status = i965_exec (device, aligned);
653
654     i965_stream_reset (&device->vertex);
655     i965_stream_reset (&device->surface);
656     i965_stream_reset (&device->batch);
657
658     intel_glyph_cache_unpin (&device->intel);
659     intel_snapshot_cache_thaw (&device->intel);
660
661     i965_device_reset (device);
662
663     return status;
664 }
665
666 static cairo_surface_t *
667 i965_surface_create_similar (void *abstract_other,
668                              cairo_content_t content,
669                              int width, int height)
670 {
671     i965_surface_t *other;
672     cairo_format_t format;
673
674     if (width > 8192 || height > 8192)
675         return NULL;
676
677     other = abstract_other;
678     if (content == other->intel.drm.base.content)
679         format = other->intel.drm.format;
680     else
681         format = _cairo_format_from_content (content);
682
683     return i965_surface_create_internal ((cairo_drm_device_t *) other->intel.drm.base.device,
684                                          format,
685                                          width, height,
686                                          I965_TILING_DEFAULT, TRUE);
687 }
688
689 static cairo_status_t
690 i965_surface_finish (void *abstract_surface)
691 {
692     i965_surface_t *surface = abstract_surface;
693
694     return intel_surface_finish (&surface->intel);
695 }
696
697 static cairo_status_t
698 i965_surface_flush (void *abstract_surface, unsigned flags)
699 {
700     i965_surface_t *surface = abstract_surface;
701     cairo_status_t status = CAIRO_STATUS_SUCCESS;
702
703     if (flags)
704         return CAIRO_STATUS_SUCCESS;
705
706     if (surface->intel.drm.fallback != NULL)
707         return intel_surface_flush (abstract_surface);
708
709     /* Forgo flushing on finish as the user cannot access the surface directly. */
710     if (! surface->intel.drm.base.finished &&
711         to_intel_bo (surface->intel.drm.bo)->exec != NULL)
712     {
713         status = cairo_device_acquire (surface->intel.drm.base.device);
714         if (likely (status == CAIRO_STATUS_SUCCESS)) {
715             i965_device_t *device;
716
717             device = i965_device (surface);
718             status = i965_device_flush (device);
719             cairo_device_release (&device->intel.base.base);
720         }
721     }
722
723     return status;
724 }
725
726 /* rasterisation */
727
728 static cairo_status_t
729 _composite_boxes_spans (void                            *closure,
730                         cairo_span_renderer_t           *renderer,
731                         const cairo_rectangle_int_t     *extents)
732 {
733     cairo_boxes_t *boxes = closure;
734     cairo_rectangular_scan_converter_t converter;
735     struct _cairo_boxes_chunk *chunk;
736     cairo_status_t status;
737
738     _cairo_rectangular_scan_converter_init (&converter, extents);
739     for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
740         cairo_box_t *box = chunk->base;
741         int i;
742
743         for (i = 0; i < chunk->count; i++) {
744             status = _cairo_rectangular_scan_converter_add_box (&converter, &box[i], 1);
745             if (unlikely (status))
746                 goto CLEANUP;
747         }
748     }
749
750     status = converter.base.generate (&converter.base, renderer);
751
752   CLEANUP:
753     converter.base.destroy (&converter.base);
754     return status;
755 }
756
757 cairo_status_t
758 i965_fixup_unbounded (i965_surface_t *dst,
759                       const cairo_composite_rectangles_t *extents,
760                       cairo_clip_t *clip)
761 {
762     i965_shader_t shader;
763     i965_device_t *device;
764     cairo_status_t status;
765
766     i965_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR);
767
768     if (clip != NULL) {
769         cairo_region_t *clip_region = NULL;
770
771         status = _cairo_clip_get_region (clip, &clip_region);
772         assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
773         assert (clip_region == NULL);
774
775         if (status == CAIRO_INT_STATUS_UNSUPPORTED)
776             i965_shader_set_clip (&shader, clip);
777     } else {
778         if (extents->bounded.width  == extents->unbounded.width &&
779             extents->bounded.height == extents->unbounded.height)
780         {
781             return CAIRO_STATUS_SUCCESS;
782         }
783     }
784
785     status = i965_shader_acquire_pattern (&shader,
786                                           &shader.source,
787                                           &_cairo_pattern_clear.base,
788                                           &extents->unbounded);
789     if (unlikely (status)) {
790         i965_shader_fini (&shader);
791         return status;
792     }
793
794     device = i965_device (dst);
795     status = cairo_device_acquire (&device->intel.base.base);
796     if (unlikely (status))
797         return status;
798
799     status = i965_shader_commit (&shader, device);
800     if (unlikely (status)) {
801         goto BAIL;
802     }
803
804     if (extents->bounded.width == 0 || extents->bounded.height == 0) {
805         i965_shader_add_rectangle (&shader,
806                                    extents->unbounded.x,
807                                    extents->unbounded.y,
808                                    extents->unbounded.width,
809                                    extents->unbounded.height);
810     } else { /* top */
811         if (extents->bounded.y != extents->unbounded.y) {
812             cairo_rectangle_int_t rect;
813
814             rect.x = extents->unbounded.x;
815             rect.y = extents->unbounded.y;
816             rect.width  = extents->unbounded.width;
817             rect.height = extents->bounded.y - rect.y;
818
819             i965_shader_add_rectangle (&shader,
820                                        rect.x, rect.y,
821                                        rect.width, rect.height);
822         }
823
824         /* left */
825         if (extents->bounded.x != extents->unbounded.x) {
826             cairo_rectangle_int_t rect;
827
828             rect.x = extents->unbounded.x;
829             rect.y = extents->bounded.y;
830             rect.width  = extents->bounded.x - extents->unbounded.x;
831             rect.height = extents->bounded.height;
832
833             i965_shader_add_rectangle (&shader,
834                                        rect.x, rect.y,
835                                        rect.width, rect.height);
836         }
837
838         /* right */
839         if (extents->bounded.x + extents->bounded.width != extents->unbounded.x + extents->unbounded.width) {
840             cairo_rectangle_int_t rect;
841
842             rect.x = extents->bounded.x + extents->bounded.width;
843             rect.y = extents->bounded.y;
844             rect.width  = extents->unbounded.x + extents->unbounded.width - rect.x;
845             rect.height = extents->bounded.height;
846
847             i965_shader_add_rectangle (&shader,
848                                        rect.x, rect.y,
849                                        rect.width, rect.height);
850         }
851
852         /* bottom */
853         if (extents->bounded.y + extents->bounded.height != extents->unbounded.y + extents->unbounded.height) {
854             cairo_rectangle_int_t rect;
855
856             rect.x = extents->unbounded.x;
857             rect.y = extents->bounded.y + extents->bounded.height;
858             rect.width  = extents->unbounded.width;
859             rect.height = extents->unbounded.y + extents->unbounded.height - rect.y;
860
861             i965_shader_add_rectangle (&shader,
862                                        rect.x, rect.y,
863                                        rect.width, rect.height);
864         }
865     }
866
867     i965_shader_fini (&shader);
868   BAIL:
869     cairo_device_release (&device->intel.base.base);
870     return status;
871 }
872
873 static cairo_status_t
874 i965_fixup_unbounded_boxes (i965_surface_t *dst,
875                             const cairo_composite_rectangles_t *extents,
876                             cairo_clip_t *clip,
877                             cairo_boxes_t *boxes)
878 {
879     cairo_boxes_t clear;
880     cairo_box_t box;
881     cairo_region_t *clip_region = NULL;
882     cairo_status_t status;
883     struct _cairo_boxes_chunk *chunk;
884     i965_shader_t shader;
885     int i;
886
887     if (boxes->num_boxes <= 1)
888         return i965_fixup_unbounded (dst, extents, clip);
889
890     i965_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR);
891     if (clip != NULL) {
892         status = _cairo_clip_get_region (clip, &clip_region);
893         assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
894         if (status == CAIRO_INT_STATUS_UNSUPPORTED)
895             i965_shader_set_clip (&shader, clip);
896     }
897
898     status = i965_shader_acquire_pattern (&shader,
899                                           &shader.source,
900                                           &_cairo_pattern_clear.base,
901                                           &extents->unbounded);
902     if (unlikely (status)) {
903         i965_shader_fini (&shader);
904         return status;
905     }
906
907     _cairo_boxes_init (&clear);
908
909     box.p1.x = _cairo_fixed_from_int (extents->unbounded.x + extents->unbounded.width);
910     box.p1.y = _cairo_fixed_from_int (extents->unbounded.y);
911     box.p2.x = _cairo_fixed_from_int (extents->unbounded.x);
912     box.p2.y = _cairo_fixed_from_int (extents->unbounded.y + extents->unbounded.height);
913
914     if (clip_region == NULL) {
915         cairo_boxes_t tmp;
916
917         _cairo_boxes_init (&tmp);
918
919         status = _cairo_boxes_add (&tmp, &box);
920         assert (status == CAIRO_STATUS_SUCCESS);
921
922         tmp.chunks.next = &boxes->chunks;
923         tmp.num_boxes += boxes->num_boxes;
924
925         status = _cairo_bentley_ottmann_tessellate_boxes (&tmp,
926                                                           CAIRO_FILL_RULE_WINDING,
927                                                           &clear);
928
929         tmp.chunks.next = NULL;
930     } else {
931         pixman_box32_t *pbox;
932
933         pbox = pixman_region32_rectangles (&clip_region->rgn, &i);
934         _cairo_boxes_limit (&clear, (cairo_box_t *) pbox, i);
935
936         status = _cairo_boxes_add (&clear, &box);
937         assert (status == CAIRO_STATUS_SUCCESS);
938
939         for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
940             for (i = 0; i < chunk->count; i++) {
941                 status = _cairo_boxes_add (&clear, &chunk->base[i]);
942                 if (unlikely (status)) {
943                     _cairo_boxes_fini (&clear);
944                     return status;
945                 }
946             }
947         }
948
949         status = _cairo_bentley_ottmann_tessellate_boxes (&clear,
950                                                           CAIRO_FILL_RULE_WINDING,
951                                                           &clear);
952     }
953
954     if (likely (status == CAIRO_STATUS_SUCCESS && clear.num_boxes)) {
955         i965_device_t *device;
956
957         device = i965_device (dst);
958         status = cairo_device_acquire (&device->intel.base.base);
959         if (unlikely (status))
960             goto err_shader;
961
962         status = i965_shader_commit (&shader, device);
963         if (unlikely (status))
964             goto err_device;
965
966         for (chunk = &clear.chunks; chunk != NULL; chunk = chunk->next) {
967             for (i = 0; i < chunk->count; i++) {
968                 int x1 = _cairo_fixed_integer_part (chunk->base[i].p1.x);
969                 int y1 = _cairo_fixed_integer_part (chunk->base[i].p1.y);
970                 int x2 = _cairo_fixed_integer_part (chunk->base[i].p2.x);
971                 int y2 = _cairo_fixed_integer_part (chunk->base[i].p2.y);
972
973                 i965_shader_add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
974             }
975         }
976
977 err_device:
978         cairo_device_release (&device->intel.base.base);
979 err_shader:
980         i965_shader_fini (&shader);
981     }
982
983     _cairo_boxes_fini (&clear);
984
985     return status;
986 }
987
988 static cairo_status_t
989 _composite_boxes (i965_surface_t *dst,
990                   cairo_operator_t op,
991                   const cairo_pattern_t *pattern,
992                   cairo_boxes_t *boxes,
993                   cairo_antialias_t antialias,
994                   cairo_clip_t *clip,
995                   const cairo_composite_rectangles_t *extents)
996 {
997     cairo_bool_t need_clip_surface = FALSE;
998     cairo_region_t *clip_region = NULL;
999     const struct _cairo_boxes_chunk *chunk;
1000     cairo_status_t status;
1001     i965_shader_t shader;
1002     i965_device_t *device;
1003     int i;
1004
1005     /* If the boxes are not pixel-aligned, we will need to compute a real mask */
1006     if (antialias != CAIRO_ANTIALIAS_NONE) {
1007         if (! boxes->is_pixel_aligned)
1008             return CAIRO_INT_STATUS_UNSUPPORTED;
1009     }
1010
1011     i965_shader_init (&shader, dst, op);
1012
1013     status = i965_shader_acquire_pattern (&shader,
1014                                           &shader.source,
1015                                           pattern,
1016                                           &extents->bounded);
1017     if (unlikely (status))
1018         return status;
1019
1020     if (clip != NULL) {
1021         status = _cairo_clip_get_region (clip, &clip_region);
1022         assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
1023         need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
1024         if (need_clip_surface)
1025             i965_shader_set_clip (&shader, clip);
1026     }
1027
1028     device = i965_device (dst);
1029     status = cairo_device_acquire (&device->intel.base.base);
1030     if (unlikely (status))
1031         goto err_shader;
1032
1033     status = i965_shader_commit (&shader, i965_device (dst));
1034     if (unlikely (status))
1035         goto err_device;
1036
1037     for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1038         cairo_box_t *box = chunk->base;
1039         for (i = 0; i < chunk->count; i++) {
1040             int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1041             int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1042             int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1043             int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1044
1045             if (x2 > x1 && y2 > y1)
1046                 i965_shader_add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
1047         }
1048     }
1049
1050     if (! extents->is_bounded)
1051         status = i965_fixup_unbounded_boxes (dst, extents, clip, boxes);
1052
1053   err_device:
1054     cairo_device_release (&device->intel.base.base);
1055   err_shader:
1056     i965_shader_fini (&shader);
1057
1058     return status;
1059 }
1060
1061 static cairo_status_t
1062 _clip_and_composite_boxes (i965_surface_t *dst,
1063                            cairo_operator_t op,
1064                            const cairo_pattern_t *src,
1065                            cairo_boxes_t *boxes,
1066                            cairo_antialias_t antialias,
1067                            const cairo_composite_rectangles_t *extents,
1068                            cairo_clip_t *clip)
1069 {
1070     cairo_status_t status;
1071
1072     if (boxes->num_boxes == 0) {
1073         if (extents->is_bounded)
1074             return CAIRO_STATUS_SUCCESS;
1075
1076         return i965_fixup_unbounded (dst, extents, clip);
1077     }
1078
1079     /* Use a fast path if the boxes are pixel aligned */
1080     status = _composite_boxes (dst, op, src, boxes, antialias, clip, extents);
1081     if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1082         return status;
1083
1084     /* Otherwise render the boxes via an implicit mask and composite in the usual
1085      * fashion.
1086      */
1087     return i965_clip_and_composite_spans (dst, op, src, antialias,
1088                                           _composite_boxes_spans, boxes,
1089                                           extents, clip);
1090 }
1091
1092 static cairo_int_status_t
1093 i965_surface_paint (void                        *abstract_dst,
1094                     cairo_operator_t             op,
1095                     const cairo_pattern_t       *source,
1096                     cairo_clip_t                *clip)
1097 {
1098     i965_surface_t *dst = abstract_dst;
1099     cairo_composite_rectangles_t extents;
1100     cairo_boxes_t boxes;
1101     cairo_box_t *clip_boxes = boxes.boxes_embedded;
1102     cairo_clip_t local_clip;
1103     cairo_bool_t have_clip = FALSE;
1104     int num_boxes = ARRAY_LENGTH (boxes.boxes_embedded);
1105     cairo_status_t status;
1106
1107     /* XXX unsupported operators? use pixel shader blending, eventually */
1108
1109     status = _cairo_composite_rectangles_init_for_paint (&extents,
1110                                                          dst->intel.drm.width,
1111                                                          dst->intel.drm.height,
1112                                                          op, source,
1113                                                          clip);
1114     if (unlikely (status))
1115         return status;
1116
1117     if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1118         clip = NULL;
1119
1120     if (clip != NULL) {
1121         clip = _cairo_clip_init_copy (&local_clip, clip);
1122         have_clip = TRUE;
1123     }
1124
1125     status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1126     if (unlikely (status)) {
1127         if (have_clip)
1128             _cairo_clip_fini (&local_clip);
1129
1130         return status;
1131     }
1132
1133     _cairo_boxes_init_for_array (&boxes, clip_boxes, num_boxes);
1134     status = _clip_and_composite_boxes (dst, op, source,
1135                                         &boxes, CAIRO_ANTIALIAS_DEFAULT,
1136                                         &extents, clip);
1137     if (clip_boxes != boxes.boxes_embedded)
1138         free (clip_boxes);
1139
1140     if (have_clip)
1141         _cairo_clip_fini (&local_clip);
1142
1143     return status;
1144 }
1145
1146 static cairo_int_status_t
1147 i965_surface_mask (void                         *abstract_dst,
1148                    cairo_operator_t              op,
1149                    const cairo_pattern_t        *source,
1150                    const cairo_pattern_t        *mask,
1151                    cairo_clip_t                 *clip)
1152 {
1153     i965_surface_t *dst = abstract_dst;
1154     cairo_composite_rectangles_t extents;
1155     i965_shader_t shader;
1156     i965_device_t *device;
1157     cairo_clip_t local_clip;
1158     cairo_region_t *clip_region = NULL;
1159     cairo_bool_t need_clip_surface = FALSE;
1160     cairo_bool_t have_clip = FALSE;
1161     cairo_status_t status;
1162
1163     status = _cairo_composite_rectangles_init_for_mask (&extents,
1164                                                         dst->intel.drm.width,
1165                                                         dst->intel.drm.height,
1166                                                         op, source, mask, clip);
1167     if (unlikely (status))
1168         return status;
1169
1170     if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1171         clip = NULL;
1172
1173     if (clip != NULL && extents.is_bounded) {
1174         clip = _cairo_clip_init_copy (&local_clip, clip);
1175         status = _cairo_clip_rectangle (clip, &extents.bounded);
1176         if (unlikely (status)) {
1177             _cairo_clip_fini (&local_clip);
1178             return status;
1179         }
1180
1181         have_clip = TRUE;
1182     }
1183
1184     i965_shader_init (&shader, dst, op);
1185
1186     status = i965_shader_acquire_pattern (&shader,
1187                                           &shader.source,
1188                                           source,
1189                                           &extents.bounded);
1190     if (unlikely (status))
1191         goto err_shader;
1192
1193     status = i965_shader_acquire_pattern (&shader,
1194                                           &shader.mask,
1195                                           mask,
1196                                           &extents.bounded);
1197     if (unlikely (status))
1198         goto err_shader;
1199
1200     if (clip != NULL) {
1201         status = _cairo_clip_get_region (clip, &clip_region);
1202         assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
1203         need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
1204         if (need_clip_surface)
1205             i965_shader_set_clip (&shader, clip);
1206     }
1207
1208     device = i965_device (dst);
1209     status = cairo_device_acquire (&device->intel.base.base);
1210     if (unlikely (status))
1211         goto err_shader;
1212
1213     status = i965_shader_commit (&shader, device);
1214     if (unlikely (status))
1215         goto err_device;
1216
1217     if (clip_region != NULL) {
1218         unsigned int n, num_rectangles;
1219
1220         num_rectangles = cairo_region_num_rectangles (clip_region);
1221         for (n = 0; n < num_rectangles; n++) {
1222             cairo_rectangle_int_t rect;
1223
1224             cairo_region_get_rectangle (clip_region, n, &rect);
1225
1226             i965_shader_add_rectangle (&shader,
1227                                        rect.x, rect.y,
1228                                        rect.width, rect.height);
1229         }
1230     } else {
1231         i965_shader_add_rectangle (&shader,
1232                                    extents.bounded.x,
1233                                    extents.bounded.y,
1234                                    extents.bounded.width,
1235                                    extents.bounded.height);
1236     }
1237
1238     if (! extents.is_bounded)
1239         status = i965_fixup_unbounded (dst, &extents, clip);
1240
1241   err_device:
1242     cairo_device_release (&device->intel.base.base);
1243   err_shader:
1244     i965_shader_fini (&shader);
1245     if (have_clip)
1246         _cairo_clip_fini (&local_clip);
1247
1248     return status;
1249 }
1250
1251 typedef struct {
1252     cairo_polygon_t             polygon;
1253     cairo_fill_rule_t            fill_rule;
1254     cairo_antialias_t            antialias;
1255 } composite_polygon_info_t;
1256
1257 static cairo_status_t
1258 _composite_polygon_spans (void                          *closure,
1259                           cairo_span_renderer_t         *renderer,
1260                           const cairo_rectangle_int_t   *extents)
1261 {
1262     composite_polygon_info_t *info = closure;
1263     cairo_botor_scan_converter_t converter;
1264     cairo_status_t status;
1265     cairo_box_t box;
1266
1267     box.p1.x = _cairo_fixed_from_int (extents->x);
1268     box.p1.y = _cairo_fixed_from_int (extents->y);
1269     box.p2.x = _cairo_fixed_from_int (extents->x + extents->width);
1270     box.p2.y = _cairo_fixed_from_int (extents->y + extents->height);
1271
1272     _cairo_botor_scan_converter_init (&converter, &box, info->fill_rule);
1273
1274     status = converter.base.add_polygon (&converter.base, &info->polygon);
1275     if (likely (status == CAIRO_STATUS_SUCCESS))
1276         status = converter.base.generate (&converter.base, renderer);
1277
1278     converter.base.destroy (&converter.base);
1279
1280     return status;
1281 }
1282
1283 static cairo_int_status_t
1284 i965_surface_stroke (void                       *abstract_dst,
1285                      cairo_operator_t            op,
1286                      const cairo_pattern_t      *source,
1287                      cairo_path_fixed_t         *path,
1288                      const cairo_stroke_style_t *stroke_style,
1289                      const cairo_matrix_t       *ctm,
1290                      const cairo_matrix_t       *ctm_inverse,
1291                      double                      tolerance,
1292                      cairo_antialias_t           antialias,
1293                      cairo_clip_t               *clip)
1294 {
1295     i965_surface_t *dst = abstract_dst;
1296     cairo_composite_rectangles_t extents;
1297     composite_polygon_info_t info;
1298     cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
1299     int num_boxes = ARRAY_LENGTH (boxes_stack);
1300     cairo_clip_t local_clip;
1301     cairo_bool_t have_clip = FALSE;
1302     cairo_status_t status;
1303
1304     status = _cairo_composite_rectangles_init_for_stroke (&extents,
1305                                                           dst->intel.drm.width,
1306                                                           dst->intel.drm.height,
1307                                                           op, source,
1308                                                           path, stroke_style, ctm,
1309                                                           clip);
1310     if (unlikely (status))
1311         return status;
1312
1313     if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1314         clip = NULL;
1315
1316     if (clip != NULL) {
1317         clip = _cairo_clip_init_copy (&local_clip, clip);
1318         have_clip = TRUE;
1319     }
1320
1321     status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1322     if (unlikely (status)) {
1323         if (have_clip)
1324             _cairo_clip_fini (&local_clip);
1325
1326         return status;
1327     }
1328
1329     if (_cairo_path_fixed_stroke_is_rectilinear (path)) {
1330         cairo_boxes_t boxes;
1331
1332         _cairo_boxes_init (&boxes);
1333         _cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
1334         status = _cairo_path_fixed_stroke_rectilinear_to_boxes (path,
1335                                                                 stroke_style,
1336                                                                 ctm,
1337                                                                 &boxes);
1338         if (likely (status == CAIRO_STATUS_SUCCESS)) {
1339             status = _clip_and_composite_boxes (dst, op, source,
1340                                                 &boxes, antialias,
1341                                                 &extents, clip);
1342         }
1343
1344         _cairo_boxes_fini (&boxes);
1345
1346         if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1347             goto CLEANUP_BOXES;
1348     }
1349
1350     _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
1351
1352     status = _cairo_path_fixed_stroke_to_polygon (path,
1353                                                   stroke_style,
1354                                                   ctm, ctm_inverse,
1355                                                   tolerance,
1356                                                   &info.polygon);
1357     if (unlikely (status))
1358         goto CLEANUP_POLYGON;
1359
1360     if (extents.is_bounded) {
1361         cairo_rectangle_int_t rect;
1362
1363         _cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
1364         if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
1365             goto CLEANUP_POLYGON;
1366     }
1367
1368     if (info.polygon.num_edges == 0) {
1369         if (! extents.is_bounded)
1370             status = i965_fixup_unbounded (dst, &extents, clip);
1371     } else {
1372         info.fill_rule = CAIRO_FILL_RULE_WINDING;
1373         info.antialias = antialias;
1374         status = i965_clip_and_composite_spans (dst, op, source, antialias,
1375                                                 _composite_polygon_spans, &info,
1376                                                 &extents, clip);
1377     }
1378
1379 CLEANUP_POLYGON:
1380     _cairo_polygon_fini (&info.polygon);
1381
1382 CLEANUP_BOXES:
1383     if (clip_boxes != boxes_stack)
1384         free (clip_boxes);
1385
1386     if (have_clip)
1387         _cairo_clip_fini (&local_clip);
1388
1389     return status;
1390 }
1391
1392 static cairo_int_status_t
1393 i965_surface_fill (void                 *abstract_dst,
1394                    cairo_operator_t      op,
1395                    const cairo_pattern_t*source,
1396                    cairo_path_fixed_t   *path,
1397                    cairo_fill_rule_t     fill_rule,
1398                    double                tolerance,
1399                    cairo_antialias_t     antialias,
1400                    cairo_clip_t         *clip)
1401 {
1402     i965_surface_t *dst = abstract_dst;
1403     cairo_composite_rectangles_t extents;
1404     composite_polygon_info_t info;
1405     cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
1406     cairo_clip_t local_clip;
1407     cairo_bool_t have_clip = FALSE;
1408     int num_boxes = ARRAY_LENGTH (boxes_stack);
1409     cairo_status_t status;
1410
1411     status = _cairo_composite_rectangles_init_for_fill (&extents,
1412                                                         dst->intel.drm.width,
1413                                                         dst->intel.drm.height,
1414                                                         op, source, path,
1415                                                         clip);
1416     if (unlikely (status))
1417         return status;
1418
1419     if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1420         clip = NULL;
1421
1422     if (clip != NULL) {
1423         clip = _cairo_clip_init_copy (&local_clip, clip);
1424         have_clip = TRUE;
1425     }
1426
1427     status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1428     if (unlikely (status)) {
1429         if (have_clip)
1430             _cairo_clip_fini (&local_clip);
1431
1432         return status;
1433     }
1434
1435     assert (! _cairo_path_fixed_fill_is_empty (path));
1436
1437     if (_cairo_path_fixed_fill_is_rectilinear (path)) {
1438         cairo_boxes_t boxes;
1439
1440         _cairo_boxes_init (&boxes);
1441         _cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
1442         status = _cairo_path_fixed_fill_rectilinear_to_boxes (path,
1443                                                               fill_rule,
1444                                                               &boxes);
1445         if (likely (status == CAIRO_STATUS_SUCCESS)) {
1446             status = _clip_and_composite_boxes (dst, op, source,
1447                                                 &boxes, antialias,
1448                                                 &extents, clip);
1449         }
1450
1451         _cairo_boxes_fini (&boxes);
1452
1453         if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1454             goto CLEANUP_BOXES;
1455     }
1456
1457     _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
1458
1459     status = _cairo_path_fixed_fill_to_polygon (path, tolerance, &info.polygon);
1460     if (unlikely (status))
1461         goto CLEANUP_POLYGON;
1462
1463     if (extents.is_bounded) {
1464         cairo_rectangle_int_t rect;
1465
1466         _cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
1467         if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
1468             goto CLEANUP_POLYGON;
1469     }
1470
1471     if (info.polygon.num_edges == 0) {
1472         if (! extents.is_bounded)
1473             status = i965_fixup_unbounded (dst, &extents, clip);
1474     } else {
1475         info.fill_rule = fill_rule;
1476         info.antialias = antialias;
1477         status = i965_clip_and_composite_spans (dst, op, source, antialias,
1478                                                 _composite_polygon_spans, &info,
1479                                                 &extents, clip);
1480     }
1481
1482 CLEANUP_POLYGON:
1483     _cairo_polygon_fini (&info.polygon);
1484
1485 CLEANUP_BOXES:
1486     if (clip_boxes != boxes_stack)
1487         free (clip_boxes);
1488
1489     if (have_clip)
1490         _cairo_clip_fini (&local_clip);
1491
1492     return status;
1493 }
1494
1495 static const cairo_surface_backend_t i965_surface_backend = {
1496     CAIRO_SURFACE_TYPE_DRM,
1497     _cairo_default_context_create,
1498
1499     i965_surface_create_similar,
1500     i965_surface_finish,
1501
1502     NULL,
1503     intel_surface_acquire_source_image,
1504     intel_surface_release_source_image,
1505
1506     NULL, NULL, NULL,
1507     NULL, /* composite */
1508     NULL, /* fill */
1509     NULL, /* trapezoids */
1510     NULL, /* span */
1511     NULL, /* check-span */
1512
1513     NULL, /* copy_page */
1514     NULL, /* show_page */
1515     _cairo_drm_surface_get_extents,
1516     NULL, /* old-glyphs */
1517     _cairo_drm_surface_get_font_options,
1518
1519     i965_surface_flush,
1520     NULL, /* mark_dirty */
1521     intel_scaled_font_fini,
1522     intel_scaled_glyph_fini,
1523
1524     i965_surface_paint,
1525     i965_surface_mask,
1526     i965_surface_stroke,
1527     i965_surface_fill,
1528     i965_surface_glyphs,
1529 };
1530
1531 static void
1532 i965_surface_init (i965_surface_t *surface,
1533                    cairo_drm_device_t *device,
1534                    cairo_format_t format,
1535                    int width, int height)
1536 {
1537     intel_surface_init (&surface->intel, &i965_surface_backend, device,
1538                         format, width, height);
1539     surface->stream = 0;
1540 }
1541
1542 static inline int cairo_const
1543 i965_tiling_stride (uint32_t tiling, int stride)
1544 {
1545     if (tiling == I915_TILING_NONE)
1546         return stride;
1547
1548     return (stride + 127) & -128;
1549 }
1550
1551 static inline int cairo_const
1552 i965_tiling_height (uint32_t tiling, int height)
1553 {
1554     switch (tiling) {
1555     default:
1556     case I915_TILING_NONE: return (height + 1) & -2;
1557     case I915_TILING_X: return (height + 7) & -8;
1558     case I915_TILING_Y: return (height + 31) & -32;
1559     }
1560 }
1561
1562 cairo_surface_t *
1563 i965_surface_create_internal (cairo_drm_device_t *base_dev,
1564                               cairo_format_t format,
1565                               int width, int height,
1566                               uint32_t tiling,
1567                               cairo_bool_t gpu_target)
1568 {
1569     i965_surface_t *surface;
1570     cairo_status_t status_ignored;
1571
1572     surface = malloc (sizeof (i965_surface_t));
1573     if (unlikely (surface == NULL))
1574         return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1575
1576     i965_surface_init (surface, base_dev, format, width, height);
1577
1578     if (width && height) {
1579         uint32_t size, stride;
1580         intel_bo_t *bo;
1581
1582         width = (width + 3) & -4;
1583         stride = cairo_format_stride_for_width (surface->intel.drm.format, width);
1584         stride = (stride + 63) & ~63;
1585         stride = i965_tiling_stride (tiling, stride);
1586         surface->intel.drm.stride = stride;
1587
1588         height = i965_tiling_height (tiling, height);
1589         assert (height <= I965_MAX_SIZE);
1590
1591         size = stride * height;
1592         bo = intel_bo_create (to_intel_device (&base_dev->base),
1593                               size, size,
1594                               gpu_target, tiling, stride);
1595         if (bo == NULL) {
1596             status_ignored = _cairo_drm_surface_finish (&surface->intel.drm);
1597             free (surface);
1598             return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1599         }
1600
1601         bo->tiling = tiling;
1602         bo->stride = stride;
1603         surface->intel.drm.bo = &bo->base;
1604
1605         assert (bo->base.size >= (size_t) stride*height);
1606     }
1607
1608     return &surface->intel.drm.base;
1609 }
1610
1611 static cairo_surface_t *
1612 i965_surface_create (cairo_drm_device_t *device,
1613                      cairo_format_t format, int width, int height)
1614 {
1615     switch (format) {
1616     case CAIRO_FORMAT_ARGB32:
1617     case CAIRO_FORMAT_RGB16_565:
1618     case CAIRO_FORMAT_RGB24:
1619     case CAIRO_FORMAT_A8:
1620         break;
1621     case CAIRO_FORMAT_INVALID:
1622     default:
1623     case CAIRO_FORMAT_A1:
1624         return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
1625     }
1626
1627     return i965_surface_create_internal (device, format, width, height,
1628                                          I965_TILING_DEFAULT, TRUE);
1629 }
1630
1631 static cairo_surface_t *
1632 i965_surface_create_for_name (cairo_drm_device_t *base_dev,
1633                               unsigned int name,
1634                               cairo_format_t format,
1635                               int width, int height, int stride)
1636 {
1637     i965_device_t *device;
1638     i965_surface_t *surface;
1639     cairo_status_t status_ignored;
1640     int min_stride;
1641
1642     min_stride = cairo_format_stride_for_width (format, (width + 3) & -4);
1643     if (stride < min_stride || stride & 63)
1644         return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_STRIDE));
1645
1646     if (format == CAIRO_FORMAT_A1)
1647         return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
1648
1649     switch (format) {
1650     case CAIRO_FORMAT_ARGB32:
1651     case CAIRO_FORMAT_RGB16_565:
1652     case CAIRO_FORMAT_RGB24:
1653     case CAIRO_FORMAT_A8:
1654         break;
1655     case CAIRO_FORMAT_INVALID:
1656     default:
1657     case CAIRO_FORMAT_A1:
1658         return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
1659     }
1660
1661     surface = malloc (sizeof (i965_surface_t));
1662     if (unlikely (surface == NULL))
1663         return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1664
1665     i965_surface_init (surface, base_dev, format, width, height);
1666
1667     device = (i965_device_t *) base_dev;
1668     surface->intel.drm.bo = &intel_bo_create_for_name (&device->intel, name)->base;
1669     if (unlikely (surface->intel.drm.bo == NULL)) {
1670         status_ignored = _cairo_drm_surface_finish (&surface->intel.drm);
1671         free (surface);
1672         return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1673     }
1674
1675     surface->intel.drm.stride = stride;
1676
1677     return &surface->intel.drm.base;
1678 }
1679
1680 static cairo_status_t
1681 i965_surface_enable_scan_out (void *abstract_surface)
1682 {
1683     i965_surface_t *surface = abstract_surface;
1684     intel_bo_t *bo;
1685
1686     if (unlikely (surface->intel.drm.bo == NULL))
1687         return _cairo_error (CAIRO_STATUS_INVALID_SIZE);
1688
1689     bo = to_intel_bo (surface->intel.drm.bo);
1690     if (bo->tiling != I915_TILING_X) {
1691         i965_device_t *device = i965_device (surface);
1692         cairo_surface_pattern_t pattern;
1693         cairo_surface_t *clone;
1694         cairo_status_t status;
1695
1696         clone = i965_surface_create_internal (&device->intel.base,
1697                                               surface->intel.drm.base.content,
1698                                               surface->intel.drm.width,
1699                                               surface->intel.drm.height,
1700                                               I915_TILING_X,
1701                                               TRUE);
1702         if (unlikely (clone->status))
1703             return clone->status;
1704
1705         /* 2D blit? */
1706         _cairo_pattern_init_for_surface (&pattern, &surface->intel.drm.base);
1707         pattern.base.filter = CAIRO_FILTER_NEAREST;
1708
1709         status = _cairo_surface_paint (clone,
1710                                        CAIRO_OPERATOR_SOURCE,
1711                                        &pattern.base,
1712                                        NULL);
1713
1714         _cairo_pattern_fini (&pattern.base);
1715
1716         if (unlikely (status)) {
1717             cairo_surface_destroy (clone);
1718             return status;
1719         }
1720
1721         /* swap buffer objects */
1722         surface->intel.drm.bo = ((cairo_drm_surface_t *) clone)->bo;
1723         ((cairo_drm_surface_t *) clone)->bo = &bo->base;
1724         bo = to_intel_bo (surface->intel.drm.bo);
1725
1726         cairo_surface_destroy (clone);
1727     }
1728
1729     if (unlikely (bo->tiling == I915_TILING_Y))
1730         return _cairo_error (CAIRO_STATUS_INVALID_FORMAT); /* XXX */
1731
1732     return CAIRO_STATUS_SUCCESS;
1733 }
1734
1735 static cairo_int_status_t
1736 _i965_device_flush (cairo_drm_device_t *device)
1737 {
1738     cairo_status_t status;
1739
1740     if (unlikely (device->base.finished))
1741         return CAIRO_STATUS_SUCCESS;
1742
1743     status = cairo_device_acquire (&device->base);
1744     if (likely (status == CAIRO_STATUS_SUCCESS))
1745         status = i965_device_flush ((i965_device_t *) device);
1746
1747     cairo_device_release (&device->base);
1748
1749     return status;
1750 }
1751
1752 static cairo_int_status_t
1753 _i965_device_throttle (cairo_drm_device_t *device)
1754 {
1755     cairo_status_t status;
1756
1757     status = cairo_device_acquire (&device->base);
1758     if (unlikely (status))
1759         return status;
1760
1761     status = i965_device_flush ((i965_device_t *) device);
1762     intel_throttle ((intel_device_t *) device);
1763
1764     cairo_device_release (&device->base);
1765
1766     return status;
1767 }
1768
1769 static void
1770 _i965_device_destroy (void *base)
1771 {
1772     i965_device_t *device = base;
1773
1774     i965_device_reset (device);
1775     i965_general_state_reset (device);
1776
1777     _cairo_hash_table_destroy (device->sf_states);
1778     _cairo_hash_table_destroy (device->samplers);
1779     _cairo_hash_table_destroy (device->cc_states);
1780     _cairo_hash_table_destroy (device->wm_kernels);
1781     _cairo_hash_table_destroy (device->wm_states);
1782     _cairo_hash_table_destroy (device->wm_bindings);
1783
1784     _cairo_freelist_fini (&device->sf_freelist);
1785     _cairo_freelist_fini (&device->cc_freelist);
1786     _cairo_freelist_fini (&device->wm_kernel_freelist);
1787     _cairo_freelist_fini (&device->wm_state_freelist);
1788     _cairo_freelist_fini (&device->wm_binding_freelist);
1789     _cairo_freelist_fini (&device->sampler_freelist);
1790
1791     intel_device_fini (&device->intel);
1792     free (device);
1793 }
1794
1795 static cairo_bool_t
1796 hash_equal (const void *A, const void *B)
1797 {
1798     const cairo_hash_entry_t *a = A, *b = B;
1799     return a->hash == b->hash;
1800 }
1801
1802 cairo_drm_device_t *
1803 _cairo_drm_i965_device_create (int fd, dev_t dev, int vendor_id, int chip_id)
1804 {
1805     i965_device_t *device;
1806     uint64_t gtt_size;
1807     cairo_status_t status;
1808
1809     if (! intel_info (fd, &gtt_size))
1810         return  NULL;
1811
1812     device = malloc (sizeof (i965_device_t));
1813     if (unlikely (device == NULL))
1814         return (cairo_drm_device_t *) _cairo_device_create_in_error (CAIRO_STATUS_NO_MEMORY);
1815
1816     status = intel_device_init (&device->intel, fd);
1817     if (unlikely (status))
1818         goto CLEANUP;
1819
1820     device->is_g4x = IS_G4X (chip_id);
1821     //device->is_g5x = IS_G5X (chip_id);
1822
1823     device->intel.base.surface.create = i965_surface_create;
1824     device->intel.base.surface.create_for_name = i965_surface_create_for_name;
1825     device->intel.base.surface.create_from_cacheable_image = NULL;
1826     device->intel.base.surface.enable_scan_out = i965_surface_enable_scan_out;
1827
1828     device->intel.base.device.flush = _i965_device_flush;
1829     device->intel.base.device.throttle = _i965_device_throttle;
1830     device->intel.base.device.destroy = _i965_device_destroy;
1831
1832     device->sf_states = _cairo_hash_table_create (i965_sf_state_equal);
1833     if (unlikely (device->sf_states == NULL))
1834         goto CLEANUP_INTEL;
1835
1836     _cairo_freelist_init (&device->sf_freelist,
1837                           sizeof (struct i965_sf_state));
1838
1839
1840     device->cc_states = _cairo_hash_table_create (i965_cc_state_equal);
1841     if (unlikely (device->cc_states == NULL))
1842         goto CLEANUP_SF;
1843
1844     _cairo_freelist_init (&device->cc_freelist,
1845                           sizeof (struct i965_cc_state));
1846
1847
1848     device->wm_kernels = _cairo_hash_table_create (hash_equal);
1849     if (unlikely (device->wm_kernels == NULL))
1850         goto CLEANUP_CC;
1851
1852     _cairo_freelist_init (&device->wm_kernel_freelist,
1853                           sizeof (struct i965_wm_kernel));
1854
1855     device->wm_states = _cairo_hash_table_create (i965_wm_state_equal);
1856     if (unlikely (device->wm_states == NULL))
1857         goto CLEANUP_WM_KERNEL;
1858
1859     _cairo_freelist_init (&device->wm_state_freelist,
1860                           sizeof (struct i965_wm_state));
1861
1862
1863     device->wm_bindings = _cairo_hash_table_create (i965_wm_binding_equal);
1864     if (unlikely (device->wm_bindings == NULL))
1865         goto CLEANUP_WM_STATE;
1866
1867     _cairo_freelist_init (&device->wm_binding_freelist,
1868                           sizeof (struct i965_wm_binding));
1869
1870     device->samplers = _cairo_hash_table_create (hash_equal);
1871     if (unlikely (device->samplers == NULL))
1872         goto CLEANUP_WM_BINDING;
1873
1874     _cairo_freelist_init (&device->sampler_freelist,
1875                           sizeof (struct i965_sampler));
1876
1877     i965_stream_init (&device->batch,
1878                       device->batch_base, sizeof (device->batch_base),
1879                       NULL, 0,
1880                       device->batch_relocations,
1881                       ARRAY_LENGTH (device->batch_relocations));
1882
1883     i965_stream_init (&device->surface,
1884                       device->surface_base, sizeof (device->surface_base),
1885                       device->surface_pending_relocations,
1886                       ARRAY_LENGTH (device->surface_pending_relocations),
1887                       device->surface_relocations,
1888                       ARRAY_LENGTH (device->surface_relocations));
1889
1890     i965_stream_init (&device->general,
1891                       device->general_base, sizeof (device->general_base),
1892                       device->general_pending_relocations,
1893                       ARRAY_LENGTH (device->general_pending_relocations),
1894                       NULL, 0);
1895
1896     i965_stream_init (&device->vertex,
1897                       device->vertex_base, sizeof (device->vertex_base),
1898                       device->vertex_pending_relocations,
1899                       ARRAY_LENGTH (device->vertex_pending_relocations),
1900                       NULL, 0);
1901
1902     cairo_list_init (&device->flush);
1903     i965_device_reset (device);
1904     device->vs_offset = (uint32_t) -1;
1905     device->border_color_offset = (uint32_t) -1;
1906     device->general_state = NULL;
1907
1908     return _cairo_drm_device_init (&device->intel.base,
1909                                    fd, dev, vendor_id, chip_id,
1910                                    I965_MAX_SIZE);
1911
1912   CLEANUP_WM_BINDING:
1913     _cairo_hash_table_destroy (device->wm_bindings);
1914   CLEANUP_WM_STATE:
1915     _cairo_hash_table_destroy (device->wm_states);
1916   CLEANUP_WM_KERNEL:
1917     _cairo_hash_table_destroy (device->wm_kernels);
1918   CLEANUP_CC:
1919     _cairo_hash_table_destroy (device->cc_states);
1920   CLEANUP_SF:
1921     _cairo_hash_table_destroy (device->sf_states);
1922   CLEANUP_INTEL:
1923     intel_device_fini (&device->intel);
1924   CLEANUP:
1925     free (device);
1926     return (cairo_drm_device_t *) _cairo_device_create_in_error (status);
1927 }