1 /* Cairo - a vector graphics library with display and print output
3 * Copyright © 2009 Kristian Høgsberg
4 * Copyright © 2009 Chris Wilson
5 * Copyright © 2009 Intel Corporation
7 * This library is free software; you can redistribute it and/or
8 * modify it either under the terms of the GNU Lesser General Public
9 * License version 2.1 as published by the Free Software Foundation
10 * (the "LGPL") or, at your option, under the terms of the Mozilla
11 * Public License Version 1.1 (the "MPL"). If you do not alter this
12 * notice, a recipient may use your version of this file under either
13 * the MPL or the LGPL.
15 * You should have received a copy of the LGPL along with this library
16 * in the file COPYING-LGPL-2.1; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
18 * You should have received a copy of the MPL along with this library
19 * in the file COPYING-MPL-1.1
21 * The contents of this file are subject to the Mozilla Public License
22 * Version 1.1 (the "License"); you may not use this file except in
23 * compliance with the License. You may obtain a copy of the License at
24 * http://www.mozilla.org/MPL/
26 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
27 * OF ANY KIND, either express or implied. See the LGPL or the MPL for
28 * the specific language governing rights and limitations.
30 * The Original Code is the cairo graphics library.
32 * The Initial Developer of the Original Code is Kristian Høgsberg.
34 * Based on the xf86-intel-driver i965 render acceleration code,
36 * Wang Zhenyu <zhenyu.z.wang@intel.com>
37 * Eric Anholt <eric@anholt.net>
38 * Carl Worth <cworth@redhat.com>
39 * Keith Packard <keithp@keithp.com>
44 * FIXME: Use brw_PLN for [DevCTG-B+]
50 #include "cairo-drm-private.h"
51 #include "cairo-drm-ioctl-private.h"
52 #include "cairo-drm-intel-private.h"
53 #include "cairo-drm-intel-command-private.h"
54 #include "cairo-drm-intel-ioctl-private.h"
55 #include "cairo-drm-i965-private.h"
57 #include "cairo-boxes-private.h"
58 #include "cairo-composite-rectangles-private.h"
59 #include "cairo-default-context-private.h"
60 #include "cairo-error-private.h"
61 #include "cairo-region-private.h"
62 #include "cairo-surface-offset-private.h"
64 #include <sys/ioctl.h>
67 #define I965_MAX_SIZE 8192
69 static const cairo_surface_backend_t i965_surface_backend;
72 i965_stream_init (i965_stream_t *stream,
73 uint8_t *data, uint32_t size,
74 struct i965_pending_relocation *pending, int max_pending,
75 struct drm_i915_gem_relocation_entry *relocations, int max_relocations)
78 stream->used = stream->committed = 0;
83 stream->num_pending_relocations = 0;
84 stream->max_pending_relocations = max_pending;
85 stream->pending_relocations = pending;
87 stream->num_relocations = 0;
88 stream->max_relocations = max_relocations;
89 stream->relocations = relocations;
93 i965_add_relocation (i965_device_t *device,
95 uint32_t read_domains,
96 uint32_t write_domain)
98 if (bo->exec == NULL) {
101 device->exec.gtt_size += bo->base.size;
103 i = device->exec.count++;
104 assert (i < ARRAY_LENGTH (device->exec.exec));
106 device->exec.exec[i].handle = bo->base.handle;
107 device->exec.exec[i].relocation_count = 0;
108 device->exec.exec[i].relocs_ptr = 0;
109 device->exec.exec[i].alignment = 0;
110 device->exec.exec[i].offset = 0;
111 device->exec.exec[i].flags = 0;
112 device->exec.exec[i].rsvd1 = 0;
113 device->exec.exec[i].rsvd2 = 0;
115 device->exec.bo[i] = intel_bo_reference (bo);
116 bo->exec = &device->exec.exec[i];
119 if (cairo_list_is_empty (&bo->link))
120 cairo_list_add_tail (&device->flush, &bo->link);
122 assert (write_domain == 0 || bo->batch_write_domain == 0 || bo->batch_write_domain == write_domain);
123 bo->batch_read_domains |= read_domains;
124 bo->batch_write_domain |= write_domain;
128 i965_emit_relocation (i965_device_t *device,
129 i965_stream_t *stream,
131 uint32_t target_offset,
132 uint32_t read_domains,
133 uint32_t write_domain,
138 assert (target_offset < target->base.size);
140 i965_add_relocation (device, target, read_domains, write_domain);
142 n = stream->num_relocations++;
143 assert (n < stream->max_relocations);
145 stream->relocations[n].offset = offset;
146 stream->relocations[n].delta = target_offset;
147 stream->relocations[n].target_handle = target->base.handle;
148 stream->relocations[n].read_domains = read_domains;
149 stream->relocations[n].write_domain = write_domain;
150 stream->relocations[n].presumed_offset = target->offset;
154 i965_stream_reset (i965_stream_t *stream)
156 stream->used = stream->committed = 0;
157 stream->num_relocations = 0;
158 stream->num_pending_relocations = 0;
159 if (++stream->serial == 0)
164 i965_stream_commit (i965_device_t *device,
165 i965_stream_t *stream)
170 assert (stream->used);
172 bo = intel_bo_create (&device->intel,
173 stream->used, stream->used,
174 FALSE, I915_TILING_NONE, 0);
176 /* apply pending relocations */
177 for (n = 0; n < stream->num_pending_relocations; n++) {
178 struct i965_pending_relocation *p = &stream->pending_relocations[n];
180 i965_emit_relocation (device, &device->batch, bo,
186 *(uint32_t *) (device->batch.data + p->offset) = bo->offset + p->delta;
189 intel_bo_write (&device->intel, bo, 0, stream->used, stream->data);
191 if (stream->num_relocations) {
192 assert (bo->exec != NULL);
193 bo->exec->relocs_ptr = (uintptr_t) stream->relocations;
194 bo->exec->relocation_count = stream->num_relocations;
197 intel_bo_destroy (&device->intel, bo);
199 i965_stream_reset (stream);
203 sf_states_pluck (void *entry, void *closure)
205 i965_device_t *device = closure;
207 _cairo_hash_table_remove (device->sf_states, entry);
208 _cairo_freelist_free (&device->sf_freelist, entry);
212 cc_offsets_pluck (void *entry, void *closure)
214 i965_device_t *device = closure;
216 _cairo_hash_table_remove (device->cc_states, entry);
217 _cairo_freelist_free (&device->cc_freelist, entry);
221 wm_kernels_pluck (void *entry, void *closure)
223 i965_device_t *device = closure;
225 _cairo_hash_table_remove (device->wm_kernels, entry);
226 _cairo_freelist_free (&device->wm_kernel_freelist, entry);
230 wm_states_pluck (void *entry, void *closure)
232 i965_device_t *device = closure;
234 _cairo_hash_table_remove (device->wm_states, entry);
235 _cairo_freelist_free (&device->wm_state_freelist, entry);
239 wm_bindings_pluck (void *entry, void *closure)
241 i965_device_t *device = closure;
243 _cairo_hash_table_remove (device->wm_bindings, entry);
244 _cairo_freelist_free (&device->wm_binding_freelist, entry);
248 samplers_pluck (void *entry, void *closure)
250 i965_device_t *device = closure;
252 _cairo_hash_table_remove (device->samplers, entry);
253 _cairo_freelist_free (&device->sampler_freelist, entry);
257 i965_general_state_reset (i965_device_t *device)
259 _cairo_hash_table_foreach (device->sf_states,
263 _cairo_hash_table_foreach (device->cc_states,
267 _cairo_hash_table_foreach (device->wm_kernels,
271 _cairo_hash_table_foreach (device->wm_states,
275 _cairo_hash_table_foreach (device->wm_bindings,
279 _cairo_hash_table_foreach (device->samplers,
283 device->vs_offset = (uint32_t) -1;
284 device->border_color_offset = (uint32_t) -1;
286 if (device->general_state != NULL) {
287 intel_bo_destroy (&device->intel, device->general_state);
288 device->general_state = NULL;
293 i965_device_reset (i965_device_t *device)
295 device->exec.count = 0;
296 device->exec.gtt_size = I965_VERTEX_SIZE +
301 device->sf_state.entry.hash = (uint32_t) -1;
302 device->wm_state.entry.hash = (uint32_t) -1;
303 device->wm_binding.entry.hash = (uint32_t) -1;
304 device->cc_state.entry.hash = (uint32_t) -1;
306 device->target = NULL;
307 device->source = NULL;
311 device->draw_rectangle = (uint32_t) -1;
313 device->vertex_type = (uint32_t) -1;
314 device->vertex_size = 0;
315 device->rectangle_size = 0;
316 device->last_vertex_size = 0;
318 device->constants = NULL;
319 device->constants_size = 0;
321 device->have_urb_fences = FALSE;
324 static cairo_status_t
325 i965_exec (i965_device_t *device, uint32_t offset)
327 struct drm_i915_gem_execbuffer2 execbuf;
328 cairo_status_t status = CAIRO_STATUS_SUCCESS;
331 execbuf.buffers_ptr = (uintptr_t) device->exec.exec;
332 execbuf.buffer_count = device->exec.count;
333 execbuf.batch_start_offset = offset;
334 execbuf.batch_len = device->batch.used;
337 execbuf.num_cliprects = 0;
338 execbuf.cliprects_ptr = 0;
339 execbuf.flags = I915_GEM_3D_PIPELINE;
344 printf ("exec: offset=%d, length=%d, buffers=%d\n",
345 offset, device->batch.used, device->exec.count);
346 intel_dump_batchbuffer ((uint32_t *) device->batch.data,
348 device->intel.base.chip_id);
353 ret = ioctl (device->intel.base.fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
354 } while (ret != 0 && errno == EINTR);
355 if (unlikely (ret)) {
357 status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
359 status = _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
361 fprintf (stderr, "Batch submission failed: %d\n", errno);
362 fprintf (stderr, " gtt size: %zd/%zd\n",
363 device->exec.gtt_size, device->intel.gtt_avail_size);
365 fprintf (stderr, " %d buffers:\n",
367 for (i = 0; i < device->exec.count; i++) {
368 fprintf (stderr, " exec[%d] = %d\n",
369 i, device->exec.bo[i]->base.size);
372 intel_dump_batchbuffer ((uint32_t *) device->batch.data,
374 device->intel.base.chip_id);
377 /* XXX any write target within the batch should now be in error */
378 for (i = 0; i < device->exec.count; i++) {
379 intel_bo_t *bo = device->exec.bo[i];
382 bo->offset = device->exec.exec[i].offset;
384 bo->batch_read_domains = 0;
385 bo->batch_write_domain = 0;
392 ret = intel_bo_madvise (&device->intel, bo, I915_MADV_DONTNEED);
393 /* ignore immediate notification of purging */
395 cairo_list_del (&bo->cache_list);
396 cairo_list_init (&bo->link);
397 intel_bo_destroy (&device->intel, bo);
399 cairo_list_init (&device->flush);
401 device->exec.count = 0;
406 static inline uint32_t
407 next_bo_size (uint32_t v)
409 v = (v + 8191) / 8192;
423 _copy_to_bo_and_apply_relocations (i965_device_t *device,
425 i965_stream_t *stream,
430 intel_bo_write (&device->intel, bo,
431 offset, stream->used,
434 for (n = 0; n < stream->num_pending_relocations; n++) {
435 struct i965_pending_relocation *p = &stream->pending_relocations[n];
437 i965_emit_relocation (device, &device->batch, bo,
444 *(uint32_t *) (device->batch.data + p->offset) =
445 bo->offset + p->delta + offset;
451 i965_device_flush (i965_device_t *device)
453 cairo_status_t status;
454 uint32_t aligned, max;
458 if (device->batch.used == 0)
459 return CAIRO_STATUS_SUCCESS;
461 i965_flush_vertices (device);
463 OUT_BATCH (MI_BATCH_BUFFER_END);
464 /* Emit a padding dword if we aren't going to be quad-word aligned. */
465 if (device->batch.used & 4)
469 printf ("device flush: vertex=%d, constant=%d, surface=%d, general=%d, batch=%d\n",
471 device->constant.used,
472 device->surface.used,
473 device->general.used,
477 /* can we pack the surface state into the tail of the general state? */
478 if (device->general.used == device->general.committed) {
479 if (device->general.used) {
480 assert (device->general.num_pending_relocations == 1);
481 assert (device->general_state != NULL);
482 i965_emit_relocation (device, &device->batch,
483 device->general_state,
484 device->general.pending_relocations[0].delta,
485 device->general.pending_relocations[0].read_domains,
486 device->general.pending_relocations[0].write_domain,
487 device->general.pending_relocations[0].offset);
489 if (device->general_state->offset) {
490 *(uint32_t *) (device->batch.data +
491 device->general.pending_relocations[0].offset) =
492 device->general_state->offset +
493 device->general.pending_relocations[0].delta;
497 assert (device->general.num_pending_relocations == 1);
498 if (device->general_state != NULL) {
499 intel_bo_destroy (&device->intel, device->general_state);
500 device->general_state = NULL;
503 bo = intel_bo_create (&device->intel,
504 device->general.used,
505 device->general.used,
506 FALSE, I915_TILING_NONE, 0);
507 if (unlikely (bo == NULL))
508 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
510 aligned = (device->general.used + 31) & -32;
511 if (device->surface.used &&
512 aligned + device->surface.used <= bo->base.size)
514 _copy_to_bo_and_apply_relocations (device, bo, &device->general, 0);
515 _copy_to_bo_and_apply_relocations (device, bo, &device->surface, aligned);
517 if (device->surface.num_relocations) {
518 for (n = 0; n < device->surface.num_relocations; n++)
519 device->surface.relocations[n].offset += aligned;
521 assert (bo->exec != NULL);
522 bo->exec->relocs_ptr = (uintptr_t) device->surface.relocations;
523 bo->exec->relocation_count = device->surface.num_relocations;
526 i965_stream_reset (&device->surface);
530 _copy_to_bo_and_apply_relocations (device, bo, &device->general, 0);
533 /* Note we don't reset the general state, just mark what data we've committed. */
534 device->general.committed = device->general.used;
535 device->general_state = bo;
537 device->general.num_pending_relocations = 0;
539 /* Combine vertex+constant+surface+batch streams? */
540 max = aligned = device->vertex.used;
541 if (device->surface.used) {
542 aligned = (aligned + 63) & -64;
543 aligned += device->surface.used;
544 if (device->surface.used > max)
545 max = device->surface.used;
547 aligned = (aligned + 63) & -64;
548 aligned += device->batch.used;
549 if (device->batch.used > max)
550 max = device->batch.used;
551 if (aligned <= next_bo_size (max)) {
552 int batch_num_relocations;
557 bo = intel_bo_create (&device->intel,
559 FALSE, I915_TILING_NONE, 0);
560 if (unlikely (bo == NULL))
561 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
563 assert (aligned <= bo->base.size);
565 if (device->vertex.used)
566 _copy_to_bo_and_apply_relocations (device, bo, &device->vertex, 0);
568 aligned = device->vertex.used;
570 batch_num_relocations = device->batch.num_relocations;
571 if (device->surface.used) {
572 aligned = (aligned + 63) & -64;
573 _copy_to_bo_and_apply_relocations (device, bo, &device->surface, aligned);
575 batch_num_relocations = device->batch.num_relocations;
576 if (device->surface.num_relocations) {
577 assert (device->batch.num_relocations + device->surface.num_relocations < device->batch.max_relocations);
579 memcpy (device->batch.relocations + device->batch.num_relocations,
580 device->surface.relocations,
581 sizeof (device->surface.relocations[0]) * device->surface.num_relocations);
583 for (n = 0; n < device->surface.num_relocations; n++)
584 device->batch.relocations[device->batch.num_relocations + n].offset += aligned;
586 device->batch.num_relocations += device->surface.num_relocations;
589 aligned += device->surface.used;
592 aligned = (aligned + 63) & -64;
593 intel_bo_write (&device->intel, bo,
594 aligned, device->batch.used,
597 for (n = 0; n < batch_num_relocations; n++)
598 device->batch.relocations[n].offset += aligned;
600 if (device->exec.bo[device->exec.count-1] == bo) {
601 assert (bo->exec == &device->exec.exec[device->exec.count-1]);
603 bo->exec->relocation_count = device->batch.num_relocations;
604 bo->exec->relocs_ptr = (uintptr_t) device->batch.relocations;
605 intel_bo_destroy (&device->intel, bo);
607 assert (bo->exec == NULL);
609 n = device->exec.count++;
610 device->exec.exec[n].handle = bo->base.handle;
611 device->exec.exec[n].relocation_count = device->batch.num_relocations;
612 device->exec.exec[n].relocs_ptr = (uintptr_t) device->batch.relocations;
613 device->exec.exec[n].alignment = 0;
614 device->exec.exec[n].offset = 0;
615 device->exec.exec[n].flags = 0;
616 device->exec.exec[n].rsvd1 = 0;
617 device->exec.exec[n].rsvd2 = 0;
619 /* transfer ownership to the exec */
620 device->exec.bo[n] = bo;
623 i965_stream_commit (device, &device->vertex);
624 if (device->surface.used)
625 i965_stream_commit (device, &device->surface);
627 bo = intel_bo_create (&device->intel,
628 device->batch.used, device->batch.used,
629 FALSE, I915_TILING_NONE, 0);
630 if (unlikely (bo == NULL))
631 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
633 intel_bo_write (&device->intel, bo,
634 0, device->batch.used,
637 n = device->exec.count++;
638 device->exec.exec[n].handle = bo->base.handle;
639 device->exec.exec[n].relocation_count = device->batch.num_relocations;
640 device->exec.exec[n].relocs_ptr = (uintptr_t) device->batch.relocations;
641 device->exec.exec[n].alignment = 0;
642 device->exec.exec[n].offset = 0;
643 device->exec.exec[n].flags = 0;
644 device->exec.exec[n].rsvd1 = 0;
645 device->exec.exec[n].rsvd2 = 0;
647 /* transfer ownership to the exec */
648 device->exec.bo[n] = bo;
652 status = i965_exec (device, aligned);
654 i965_stream_reset (&device->vertex);
655 i965_stream_reset (&device->surface);
656 i965_stream_reset (&device->batch);
658 intel_glyph_cache_unpin (&device->intel);
659 intel_snapshot_cache_thaw (&device->intel);
661 i965_device_reset (device);
666 static cairo_surface_t *
667 i965_surface_create_similar (void *abstract_other,
668 cairo_content_t content,
669 int width, int height)
671 i965_surface_t *other;
672 cairo_format_t format;
674 if (width > 8192 || height > 8192)
677 other = abstract_other;
678 if (content == other->intel.drm.base.content)
679 format = other->intel.drm.format;
681 format = _cairo_format_from_content (content);
683 return i965_surface_create_internal ((cairo_drm_device_t *) other->intel.drm.base.device,
686 I965_TILING_DEFAULT, TRUE);
689 static cairo_status_t
690 i965_surface_finish (void *abstract_surface)
692 i965_surface_t *surface = abstract_surface;
694 return intel_surface_finish (&surface->intel);
697 static cairo_status_t
698 i965_surface_flush (void *abstract_surface, unsigned flags)
700 i965_surface_t *surface = abstract_surface;
701 cairo_status_t status = CAIRO_STATUS_SUCCESS;
704 return CAIRO_STATUS_SUCCESS;
706 if (surface->intel.drm.fallback != NULL)
707 return intel_surface_flush (abstract_surface);
709 /* Forgo flushing on finish as the user cannot access the surface directly. */
710 if (! surface->intel.drm.base.finished &&
711 to_intel_bo (surface->intel.drm.bo)->exec != NULL)
713 status = cairo_device_acquire (surface->intel.drm.base.device);
714 if (likely (status == CAIRO_STATUS_SUCCESS)) {
715 i965_device_t *device;
717 device = i965_device (surface);
718 status = i965_device_flush (device);
719 cairo_device_release (&device->intel.base.base);
728 static cairo_status_t
729 _composite_boxes_spans (void *closure,
730 cairo_span_renderer_t *renderer,
731 const cairo_rectangle_int_t *extents)
733 cairo_boxes_t *boxes = closure;
734 cairo_rectangular_scan_converter_t converter;
735 struct _cairo_boxes_chunk *chunk;
736 cairo_status_t status;
738 _cairo_rectangular_scan_converter_init (&converter, extents);
739 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
740 cairo_box_t *box = chunk->base;
743 for (i = 0; i < chunk->count; i++) {
744 status = _cairo_rectangular_scan_converter_add_box (&converter, &box[i], 1);
745 if (unlikely (status))
750 status = converter.base.generate (&converter.base, renderer);
753 converter.base.destroy (&converter.base);
758 i965_fixup_unbounded (i965_surface_t *dst,
759 const cairo_composite_rectangles_t *extents,
762 i965_shader_t shader;
763 i965_device_t *device;
764 cairo_status_t status;
766 i965_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR);
769 cairo_region_t *clip_region = NULL;
771 status = _cairo_clip_get_region (clip, &clip_region);
772 assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
773 assert (clip_region == NULL);
775 if (status == CAIRO_INT_STATUS_UNSUPPORTED)
776 i965_shader_set_clip (&shader, clip);
778 if (extents->bounded.width == extents->unbounded.width &&
779 extents->bounded.height == extents->unbounded.height)
781 return CAIRO_STATUS_SUCCESS;
785 status = i965_shader_acquire_pattern (&shader,
787 &_cairo_pattern_clear.base,
788 &extents->unbounded);
789 if (unlikely (status)) {
790 i965_shader_fini (&shader);
794 device = i965_device (dst);
795 status = cairo_device_acquire (&device->intel.base.base);
796 if (unlikely (status))
799 status = i965_shader_commit (&shader, device);
800 if (unlikely (status)) {
804 if (extents->bounded.width == 0 || extents->bounded.height == 0) {
805 i965_shader_add_rectangle (&shader,
806 extents->unbounded.x,
807 extents->unbounded.y,
808 extents->unbounded.width,
809 extents->unbounded.height);
811 if (extents->bounded.y != extents->unbounded.y) {
812 cairo_rectangle_int_t rect;
814 rect.x = extents->unbounded.x;
815 rect.y = extents->unbounded.y;
816 rect.width = extents->unbounded.width;
817 rect.height = extents->bounded.y - rect.y;
819 i965_shader_add_rectangle (&shader,
821 rect.width, rect.height);
825 if (extents->bounded.x != extents->unbounded.x) {
826 cairo_rectangle_int_t rect;
828 rect.x = extents->unbounded.x;
829 rect.y = extents->bounded.y;
830 rect.width = extents->bounded.x - extents->unbounded.x;
831 rect.height = extents->bounded.height;
833 i965_shader_add_rectangle (&shader,
835 rect.width, rect.height);
839 if (extents->bounded.x + extents->bounded.width != extents->unbounded.x + extents->unbounded.width) {
840 cairo_rectangle_int_t rect;
842 rect.x = extents->bounded.x + extents->bounded.width;
843 rect.y = extents->bounded.y;
844 rect.width = extents->unbounded.x + extents->unbounded.width - rect.x;
845 rect.height = extents->bounded.height;
847 i965_shader_add_rectangle (&shader,
849 rect.width, rect.height);
853 if (extents->bounded.y + extents->bounded.height != extents->unbounded.y + extents->unbounded.height) {
854 cairo_rectangle_int_t rect;
856 rect.x = extents->unbounded.x;
857 rect.y = extents->bounded.y + extents->bounded.height;
858 rect.width = extents->unbounded.width;
859 rect.height = extents->unbounded.y + extents->unbounded.height - rect.y;
861 i965_shader_add_rectangle (&shader,
863 rect.width, rect.height);
867 i965_shader_fini (&shader);
869 cairo_device_release (&device->intel.base.base);
873 static cairo_status_t
874 i965_fixup_unbounded_boxes (i965_surface_t *dst,
875 const cairo_composite_rectangles_t *extents,
877 cairo_boxes_t *boxes)
881 cairo_region_t *clip_region = NULL;
882 cairo_status_t status;
883 struct _cairo_boxes_chunk *chunk;
884 i965_shader_t shader;
887 if (boxes->num_boxes <= 1)
888 return i965_fixup_unbounded (dst, extents, clip);
890 i965_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR);
892 status = _cairo_clip_get_region (clip, &clip_region);
893 assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
894 if (status == CAIRO_INT_STATUS_UNSUPPORTED)
895 i965_shader_set_clip (&shader, clip);
898 status = i965_shader_acquire_pattern (&shader,
900 &_cairo_pattern_clear.base,
901 &extents->unbounded);
902 if (unlikely (status)) {
903 i965_shader_fini (&shader);
907 _cairo_boxes_init (&clear);
909 box.p1.x = _cairo_fixed_from_int (extents->unbounded.x + extents->unbounded.width);
910 box.p1.y = _cairo_fixed_from_int (extents->unbounded.y);
911 box.p2.x = _cairo_fixed_from_int (extents->unbounded.x);
912 box.p2.y = _cairo_fixed_from_int (extents->unbounded.y + extents->unbounded.height);
914 if (clip_region == NULL) {
917 _cairo_boxes_init (&tmp);
919 status = _cairo_boxes_add (&tmp, &box);
920 assert (status == CAIRO_STATUS_SUCCESS);
922 tmp.chunks.next = &boxes->chunks;
923 tmp.num_boxes += boxes->num_boxes;
925 status = _cairo_bentley_ottmann_tessellate_boxes (&tmp,
926 CAIRO_FILL_RULE_WINDING,
929 tmp.chunks.next = NULL;
931 pixman_box32_t *pbox;
933 pbox = pixman_region32_rectangles (&clip_region->rgn, &i);
934 _cairo_boxes_limit (&clear, (cairo_box_t *) pbox, i);
936 status = _cairo_boxes_add (&clear, &box);
937 assert (status == CAIRO_STATUS_SUCCESS);
939 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
940 for (i = 0; i < chunk->count; i++) {
941 status = _cairo_boxes_add (&clear, &chunk->base[i]);
942 if (unlikely (status)) {
943 _cairo_boxes_fini (&clear);
949 status = _cairo_bentley_ottmann_tessellate_boxes (&clear,
950 CAIRO_FILL_RULE_WINDING,
954 if (likely (status == CAIRO_STATUS_SUCCESS && clear.num_boxes)) {
955 i965_device_t *device;
957 device = i965_device (dst);
958 status = cairo_device_acquire (&device->intel.base.base);
959 if (unlikely (status))
962 status = i965_shader_commit (&shader, device);
963 if (unlikely (status))
966 for (chunk = &clear.chunks; chunk != NULL; chunk = chunk->next) {
967 for (i = 0; i < chunk->count; i++) {
968 int x1 = _cairo_fixed_integer_part (chunk->base[i].p1.x);
969 int y1 = _cairo_fixed_integer_part (chunk->base[i].p1.y);
970 int x2 = _cairo_fixed_integer_part (chunk->base[i].p2.x);
971 int y2 = _cairo_fixed_integer_part (chunk->base[i].p2.y);
973 i965_shader_add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
978 cairo_device_release (&device->intel.base.base);
980 i965_shader_fini (&shader);
983 _cairo_boxes_fini (&clear);
988 static cairo_status_t
989 _composite_boxes (i965_surface_t *dst,
991 const cairo_pattern_t *pattern,
992 cairo_boxes_t *boxes,
993 cairo_antialias_t antialias,
995 const cairo_composite_rectangles_t *extents)
997 cairo_bool_t need_clip_surface = FALSE;
998 cairo_region_t *clip_region = NULL;
999 const struct _cairo_boxes_chunk *chunk;
1000 cairo_status_t status;
1001 i965_shader_t shader;
1002 i965_device_t *device;
1005 /* If the boxes are not pixel-aligned, we will need to compute a real mask */
1006 if (antialias != CAIRO_ANTIALIAS_NONE) {
1007 if (! boxes->is_pixel_aligned)
1008 return CAIRO_INT_STATUS_UNSUPPORTED;
1011 i965_shader_init (&shader, dst, op);
1013 status = i965_shader_acquire_pattern (&shader,
1017 if (unlikely (status))
1021 status = _cairo_clip_get_region (clip, &clip_region);
1022 assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
1023 need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
1024 if (need_clip_surface)
1025 i965_shader_set_clip (&shader, clip);
1028 device = i965_device (dst);
1029 status = cairo_device_acquire (&device->intel.base.base);
1030 if (unlikely (status))
1033 status = i965_shader_commit (&shader, i965_device (dst));
1034 if (unlikely (status))
1037 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1038 cairo_box_t *box = chunk->base;
1039 for (i = 0; i < chunk->count; i++) {
1040 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1041 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1042 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1043 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1045 if (x2 > x1 && y2 > y1)
1046 i965_shader_add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
1050 if (! extents->is_bounded)
1051 status = i965_fixup_unbounded_boxes (dst, extents, clip, boxes);
1054 cairo_device_release (&device->intel.base.base);
1056 i965_shader_fini (&shader);
1061 static cairo_status_t
1062 _clip_and_composite_boxes (i965_surface_t *dst,
1063 cairo_operator_t op,
1064 const cairo_pattern_t *src,
1065 cairo_boxes_t *boxes,
1066 cairo_antialias_t antialias,
1067 const cairo_composite_rectangles_t *extents,
1070 cairo_status_t status;
1072 if (boxes->num_boxes == 0) {
1073 if (extents->is_bounded)
1074 return CAIRO_STATUS_SUCCESS;
1076 return i965_fixup_unbounded (dst, extents, clip);
1079 /* Use a fast path if the boxes are pixel aligned */
1080 status = _composite_boxes (dst, op, src, boxes, antialias, clip, extents);
1081 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1084 /* Otherwise render the boxes via an implicit mask and composite in the usual
1087 return i965_clip_and_composite_spans (dst, op, src, antialias,
1088 _composite_boxes_spans, boxes,
1092 static cairo_int_status_t
1093 i965_surface_paint (void *abstract_dst,
1094 cairo_operator_t op,
1095 const cairo_pattern_t *source,
1098 i965_surface_t *dst = abstract_dst;
1099 cairo_composite_rectangles_t extents;
1100 cairo_boxes_t boxes;
1101 cairo_box_t *clip_boxes = boxes.boxes_embedded;
1102 cairo_clip_t local_clip;
1103 cairo_bool_t have_clip = FALSE;
1104 int num_boxes = ARRAY_LENGTH (boxes.boxes_embedded);
1105 cairo_status_t status;
1107 /* XXX unsupported operators? use pixel shader blending, eventually */
1109 status = _cairo_composite_rectangles_init_for_paint (&extents,
1110 dst->intel.drm.width,
1111 dst->intel.drm.height,
1114 if (unlikely (status))
1117 if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1121 clip = _cairo_clip_init_copy (&local_clip, clip);
1125 status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1126 if (unlikely (status)) {
1128 _cairo_clip_fini (&local_clip);
1133 _cairo_boxes_init_for_array (&boxes, clip_boxes, num_boxes);
1134 status = _clip_and_composite_boxes (dst, op, source,
1135 &boxes, CAIRO_ANTIALIAS_DEFAULT,
1137 if (clip_boxes != boxes.boxes_embedded)
1141 _cairo_clip_fini (&local_clip);
1146 static cairo_int_status_t
1147 i965_surface_mask (void *abstract_dst,
1148 cairo_operator_t op,
1149 const cairo_pattern_t *source,
1150 const cairo_pattern_t *mask,
1153 i965_surface_t *dst = abstract_dst;
1154 cairo_composite_rectangles_t extents;
1155 i965_shader_t shader;
1156 i965_device_t *device;
1157 cairo_clip_t local_clip;
1158 cairo_region_t *clip_region = NULL;
1159 cairo_bool_t need_clip_surface = FALSE;
1160 cairo_bool_t have_clip = FALSE;
1161 cairo_status_t status;
1163 status = _cairo_composite_rectangles_init_for_mask (&extents,
1164 dst->intel.drm.width,
1165 dst->intel.drm.height,
1166 op, source, mask, clip);
1167 if (unlikely (status))
1170 if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1173 if (clip != NULL && extents.is_bounded) {
1174 clip = _cairo_clip_init_copy (&local_clip, clip);
1175 status = _cairo_clip_rectangle (clip, &extents.bounded);
1176 if (unlikely (status)) {
1177 _cairo_clip_fini (&local_clip);
1184 i965_shader_init (&shader, dst, op);
1186 status = i965_shader_acquire_pattern (&shader,
1190 if (unlikely (status))
1193 status = i965_shader_acquire_pattern (&shader,
1197 if (unlikely (status))
1201 status = _cairo_clip_get_region (clip, &clip_region);
1202 assert (status == CAIRO_STATUS_SUCCESS || CAIRO_INT_STATUS_UNSUPPORTED);
1203 need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
1204 if (need_clip_surface)
1205 i965_shader_set_clip (&shader, clip);
1208 device = i965_device (dst);
1209 status = cairo_device_acquire (&device->intel.base.base);
1210 if (unlikely (status))
1213 status = i965_shader_commit (&shader, device);
1214 if (unlikely (status))
1217 if (clip_region != NULL) {
1218 unsigned int n, num_rectangles;
1220 num_rectangles = cairo_region_num_rectangles (clip_region);
1221 for (n = 0; n < num_rectangles; n++) {
1222 cairo_rectangle_int_t rect;
1224 cairo_region_get_rectangle (clip_region, n, &rect);
1226 i965_shader_add_rectangle (&shader,
1228 rect.width, rect.height);
1231 i965_shader_add_rectangle (&shader,
1234 extents.bounded.width,
1235 extents.bounded.height);
1238 if (! extents.is_bounded)
1239 status = i965_fixup_unbounded (dst, &extents, clip);
1242 cairo_device_release (&device->intel.base.base);
1244 i965_shader_fini (&shader);
1246 _cairo_clip_fini (&local_clip);
1252 cairo_polygon_t polygon;
1253 cairo_fill_rule_t fill_rule;
1254 cairo_antialias_t antialias;
1255 } composite_polygon_info_t;
1257 static cairo_status_t
1258 _composite_polygon_spans (void *closure,
1259 cairo_span_renderer_t *renderer,
1260 const cairo_rectangle_int_t *extents)
1262 composite_polygon_info_t *info = closure;
1263 cairo_botor_scan_converter_t converter;
1264 cairo_status_t status;
1267 box.p1.x = _cairo_fixed_from_int (extents->x);
1268 box.p1.y = _cairo_fixed_from_int (extents->y);
1269 box.p2.x = _cairo_fixed_from_int (extents->x + extents->width);
1270 box.p2.y = _cairo_fixed_from_int (extents->y + extents->height);
1272 _cairo_botor_scan_converter_init (&converter, &box, info->fill_rule);
1274 status = converter.base.add_polygon (&converter.base, &info->polygon);
1275 if (likely (status == CAIRO_STATUS_SUCCESS))
1276 status = converter.base.generate (&converter.base, renderer);
1278 converter.base.destroy (&converter.base);
1283 static cairo_int_status_t
1284 i965_surface_stroke (void *abstract_dst,
1285 cairo_operator_t op,
1286 const cairo_pattern_t *source,
1287 cairo_path_fixed_t *path,
1288 const cairo_stroke_style_t *stroke_style,
1289 const cairo_matrix_t *ctm,
1290 const cairo_matrix_t *ctm_inverse,
1292 cairo_antialias_t antialias,
1295 i965_surface_t *dst = abstract_dst;
1296 cairo_composite_rectangles_t extents;
1297 composite_polygon_info_t info;
1298 cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
1299 int num_boxes = ARRAY_LENGTH (boxes_stack);
1300 cairo_clip_t local_clip;
1301 cairo_bool_t have_clip = FALSE;
1302 cairo_status_t status;
1304 status = _cairo_composite_rectangles_init_for_stroke (&extents,
1305 dst->intel.drm.width,
1306 dst->intel.drm.height,
1308 path, stroke_style, ctm,
1310 if (unlikely (status))
1313 if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1317 clip = _cairo_clip_init_copy (&local_clip, clip);
1321 status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1322 if (unlikely (status)) {
1324 _cairo_clip_fini (&local_clip);
1329 if (_cairo_path_fixed_stroke_is_rectilinear (path)) {
1330 cairo_boxes_t boxes;
1332 _cairo_boxes_init (&boxes);
1333 _cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
1334 status = _cairo_path_fixed_stroke_rectilinear_to_boxes (path,
1338 if (likely (status == CAIRO_STATUS_SUCCESS)) {
1339 status = _clip_and_composite_boxes (dst, op, source,
1344 _cairo_boxes_fini (&boxes);
1346 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1350 _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
1352 status = _cairo_path_fixed_stroke_to_polygon (path,
1357 if (unlikely (status))
1358 goto CLEANUP_POLYGON;
1360 if (extents.is_bounded) {
1361 cairo_rectangle_int_t rect;
1363 _cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
1364 if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
1365 goto CLEANUP_POLYGON;
1368 if (info.polygon.num_edges == 0) {
1369 if (! extents.is_bounded)
1370 status = i965_fixup_unbounded (dst, &extents, clip);
1372 info.fill_rule = CAIRO_FILL_RULE_WINDING;
1373 info.antialias = antialias;
1374 status = i965_clip_and_composite_spans (dst, op, source, antialias,
1375 _composite_polygon_spans, &info,
1380 _cairo_polygon_fini (&info.polygon);
1383 if (clip_boxes != boxes_stack)
1387 _cairo_clip_fini (&local_clip);
1392 static cairo_int_status_t
1393 i965_surface_fill (void *abstract_dst,
1394 cairo_operator_t op,
1395 const cairo_pattern_t*source,
1396 cairo_path_fixed_t *path,
1397 cairo_fill_rule_t fill_rule,
1399 cairo_antialias_t antialias,
1402 i965_surface_t *dst = abstract_dst;
1403 cairo_composite_rectangles_t extents;
1404 composite_polygon_info_t info;
1405 cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
1406 cairo_clip_t local_clip;
1407 cairo_bool_t have_clip = FALSE;
1408 int num_boxes = ARRAY_LENGTH (boxes_stack);
1409 cairo_status_t status;
1411 status = _cairo_composite_rectangles_init_for_fill (&extents,
1412 dst->intel.drm.width,
1413 dst->intel.drm.height,
1416 if (unlikely (status))
1419 if (clip != NULL && _cairo_clip_contains_extents (clip, &extents))
1423 clip = _cairo_clip_init_copy (&local_clip, clip);
1427 status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1428 if (unlikely (status)) {
1430 _cairo_clip_fini (&local_clip);
1435 assert (! _cairo_path_fixed_fill_is_empty (path));
1437 if (_cairo_path_fixed_fill_is_rectilinear (path)) {
1438 cairo_boxes_t boxes;
1440 _cairo_boxes_init (&boxes);
1441 _cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
1442 status = _cairo_path_fixed_fill_rectilinear_to_boxes (path,
1445 if (likely (status == CAIRO_STATUS_SUCCESS)) {
1446 status = _clip_and_composite_boxes (dst, op, source,
1451 _cairo_boxes_fini (&boxes);
1453 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1457 _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
1459 status = _cairo_path_fixed_fill_to_polygon (path, tolerance, &info.polygon);
1460 if (unlikely (status))
1461 goto CLEANUP_POLYGON;
1463 if (extents.is_bounded) {
1464 cairo_rectangle_int_t rect;
1466 _cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
1467 if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
1468 goto CLEANUP_POLYGON;
1471 if (info.polygon.num_edges == 0) {
1472 if (! extents.is_bounded)
1473 status = i965_fixup_unbounded (dst, &extents, clip);
1475 info.fill_rule = fill_rule;
1476 info.antialias = antialias;
1477 status = i965_clip_and_composite_spans (dst, op, source, antialias,
1478 _composite_polygon_spans, &info,
1483 _cairo_polygon_fini (&info.polygon);
1486 if (clip_boxes != boxes_stack)
1490 _cairo_clip_fini (&local_clip);
1495 static const cairo_surface_backend_t i965_surface_backend = {
1496 CAIRO_SURFACE_TYPE_DRM,
1497 _cairo_default_context_create,
1499 i965_surface_create_similar,
1500 i965_surface_finish,
1503 intel_surface_acquire_source_image,
1504 intel_surface_release_source_image,
1507 NULL, /* composite */
1509 NULL, /* trapezoids */
1511 NULL, /* check-span */
1513 NULL, /* copy_page */
1514 NULL, /* show_page */
1515 _cairo_drm_surface_get_extents,
1516 NULL, /* old-glyphs */
1517 _cairo_drm_surface_get_font_options,
1520 NULL, /* mark_dirty */
1521 intel_scaled_font_fini,
1522 intel_scaled_glyph_fini,
1526 i965_surface_stroke,
1528 i965_surface_glyphs,
1532 i965_surface_init (i965_surface_t *surface,
1533 cairo_drm_device_t *device,
1534 cairo_format_t format,
1535 int width, int height)
1537 intel_surface_init (&surface->intel, &i965_surface_backend, device,
1538 format, width, height);
1539 surface->stream = 0;
1542 static inline int cairo_const
1543 i965_tiling_stride (uint32_t tiling, int stride)
1545 if (tiling == I915_TILING_NONE)
1548 return (stride + 127) & -128;
1551 static inline int cairo_const
1552 i965_tiling_height (uint32_t tiling, int height)
1556 case I915_TILING_NONE: return (height + 1) & -2;
1557 case I915_TILING_X: return (height + 7) & -8;
1558 case I915_TILING_Y: return (height + 31) & -32;
1563 i965_surface_create_internal (cairo_drm_device_t *base_dev,
1564 cairo_format_t format,
1565 int width, int height,
1567 cairo_bool_t gpu_target)
1569 i965_surface_t *surface;
1570 cairo_status_t status_ignored;
1572 surface = malloc (sizeof (i965_surface_t));
1573 if (unlikely (surface == NULL))
1574 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1576 i965_surface_init (surface, base_dev, format, width, height);
1578 if (width && height) {
1579 uint32_t size, stride;
1582 width = (width + 3) & -4;
1583 stride = cairo_format_stride_for_width (surface->intel.drm.format, width);
1584 stride = (stride + 63) & ~63;
1585 stride = i965_tiling_stride (tiling, stride);
1586 surface->intel.drm.stride = stride;
1588 height = i965_tiling_height (tiling, height);
1589 assert (height <= I965_MAX_SIZE);
1591 size = stride * height;
1592 bo = intel_bo_create (to_intel_device (&base_dev->base),
1594 gpu_target, tiling, stride);
1596 status_ignored = _cairo_drm_surface_finish (&surface->intel.drm);
1598 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1601 bo->tiling = tiling;
1602 bo->stride = stride;
1603 surface->intel.drm.bo = &bo->base;
1605 assert (bo->base.size >= (size_t) stride*height);
1608 return &surface->intel.drm.base;
1611 static cairo_surface_t *
1612 i965_surface_create (cairo_drm_device_t *device,
1613 cairo_format_t format, int width, int height)
1616 case CAIRO_FORMAT_ARGB32:
1617 case CAIRO_FORMAT_RGB16_565:
1618 case CAIRO_FORMAT_RGB24:
1619 case CAIRO_FORMAT_A8:
1621 case CAIRO_FORMAT_INVALID:
1623 case CAIRO_FORMAT_A1:
1624 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
1627 return i965_surface_create_internal (device, format, width, height,
1628 I965_TILING_DEFAULT, TRUE);
1631 static cairo_surface_t *
1632 i965_surface_create_for_name (cairo_drm_device_t *base_dev,
1634 cairo_format_t format,
1635 int width, int height, int stride)
1637 i965_device_t *device;
1638 i965_surface_t *surface;
1639 cairo_status_t status_ignored;
1642 min_stride = cairo_format_stride_for_width (format, (width + 3) & -4);
1643 if (stride < min_stride || stride & 63)
1644 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_STRIDE));
1646 if (format == CAIRO_FORMAT_A1)
1647 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
1650 case CAIRO_FORMAT_ARGB32:
1651 case CAIRO_FORMAT_RGB16_565:
1652 case CAIRO_FORMAT_RGB24:
1653 case CAIRO_FORMAT_A8:
1655 case CAIRO_FORMAT_INVALID:
1657 case CAIRO_FORMAT_A1:
1658 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
1661 surface = malloc (sizeof (i965_surface_t));
1662 if (unlikely (surface == NULL))
1663 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1665 i965_surface_init (surface, base_dev, format, width, height);
1667 device = (i965_device_t *) base_dev;
1668 surface->intel.drm.bo = &intel_bo_create_for_name (&device->intel, name)->base;
1669 if (unlikely (surface->intel.drm.bo == NULL)) {
1670 status_ignored = _cairo_drm_surface_finish (&surface->intel.drm);
1672 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
1675 surface->intel.drm.stride = stride;
1677 return &surface->intel.drm.base;
1680 static cairo_status_t
1681 i965_surface_enable_scan_out (void *abstract_surface)
1683 i965_surface_t *surface = abstract_surface;
1686 if (unlikely (surface->intel.drm.bo == NULL))
1687 return _cairo_error (CAIRO_STATUS_INVALID_SIZE);
1689 bo = to_intel_bo (surface->intel.drm.bo);
1690 if (bo->tiling != I915_TILING_X) {
1691 i965_device_t *device = i965_device (surface);
1692 cairo_surface_pattern_t pattern;
1693 cairo_surface_t *clone;
1694 cairo_status_t status;
1696 clone = i965_surface_create_internal (&device->intel.base,
1697 surface->intel.drm.base.content,
1698 surface->intel.drm.width,
1699 surface->intel.drm.height,
1702 if (unlikely (clone->status))
1703 return clone->status;
1706 _cairo_pattern_init_for_surface (&pattern, &surface->intel.drm.base);
1707 pattern.base.filter = CAIRO_FILTER_NEAREST;
1709 status = _cairo_surface_paint (clone,
1710 CAIRO_OPERATOR_SOURCE,
1714 _cairo_pattern_fini (&pattern.base);
1716 if (unlikely (status)) {
1717 cairo_surface_destroy (clone);
1721 /* swap buffer objects */
1722 surface->intel.drm.bo = ((cairo_drm_surface_t *) clone)->bo;
1723 ((cairo_drm_surface_t *) clone)->bo = &bo->base;
1724 bo = to_intel_bo (surface->intel.drm.bo);
1726 cairo_surface_destroy (clone);
1729 if (unlikely (bo->tiling == I915_TILING_Y))
1730 return _cairo_error (CAIRO_STATUS_INVALID_FORMAT); /* XXX */
1732 return CAIRO_STATUS_SUCCESS;
1735 static cairo_int_status_t
1736 _i965_device_flush (cairo_drm_device_t *device)
1738 cairo_status_t status;
1740 if (unlikely (device->base.finished))
1741 return CAIRO_STATUS_SUCCESS;
1743 status = cairo_device_acquire (&device->base);
1744 if (likely (status == CAIRO_STATUS_SUCCESS))
1745 status = i965_device_flush ((i965_device_t *) device);
1747 cairo_device_release (&device->base);
1752 static cairo_int_status_t
1753 _i965_device_throttle (cairo_drm_device_t *device)
1755 cairo_status_t status;
1757 status = cairo_device_acquire (&device->base);
1758 if (unlikely (status))
1761 status = i965_device_flush ((i965_device_t *) device);
1762 intel_throttle ((intel_device_t *) device);
1764 cairo_device_release (&device->base);
1770 _i965_device_destroy (void *base)
1772 i965_device_t *device = base;
1774 i965_device_reset (device);
1775 i965_general_state_reset (device);
1777 _cairo_hash_table_destroy (device->sf_states);
1778 _cairo_hash_table_destroy (device->samplers);
1779 _cairo_hash_table_destroy (device->cc_states);
1780 _cairo_hash_table_destroy (device->wm_kernels);
1781 _cairo_hash_table_destroy (device->wm_states);
1782 _cairo_hash_table_destroy (device->wm_bindings);
1784 _cairo_freelist_fini (&device->sf_freelist);
1785 _cairo_freelist_fini (&device->cc_freelist);
1786 _cairo_freelist_fini (&device->wm_kernel_freelist);
1787 _cairo_freelist_fini (&device->wm_state_freelist);
1788 _cairo_freelist_fini (&device->wm_binding_freelist);
1789 _cairo_freelist_fini (&device->sampler_freelist);
1791 intel_device_fini (&device->intel);
1796 hash_equal (const void *A, const void *B)
1798 const cairo_hash_entry_t *a = A, *b = B;
1799 return a->hash == b->hash;
1802 cairo_drm_device_t *
1803 _cairo_drm_i965_device_create (int fd, dev_t dev, int vendor_id, int chip_id)
1805 i965_device_t *device;
1807 cairo_status_t status;
1809 if (! intel_info (fd, >t_size))
1812 device = malloc (sizeof (i965_device_t));
1813 if (unlikely (device == NULL))
1814 return (cairo_drm_device_t *) _cairo_device_create_in_error (CAIRO_STATUS_NO_MEMORY);
1816 status = intel_device_init (&device->intel, fd);
1817 if (unlikely (status))
1820 device->is_g4x = IS_G4X (chip_id);
1821 //device->is_g5x = IS_G5X (chip_id);
1823 device->intel.base.surface.create = i965_surface_create;
1824 device->intel.base.surface.create_for_name = i965_surface_create_for_name;
1825 device->intel.base.surface.create_from_cacheable_image = NULL;
1826 device->intel.base.surface.enable_scan_out = i965_surface_enable_scan_out;
1828 device->intel.base.device.flush = _i965_device_flush;
1829 device->intel.base.device.throttle = _i965_device_throttle;
1830 device->intel.base.device.destroy = _i965_device_destroy;
1832 device->sf_states = _cairo_hash_table_create (i965_sf_state_equal);
1833 if (unlikely (device->sf_states == NULL))
1836 _cairo_freelist_init (&device->sf_freelist,
1837 sizeof (struct i965_sf_state));
1840 device->cc_states = _cairo_hash_table_create (i965_cc_state_equal);
1841 if (unlikely (device->cc_states == NULL))
1844 _cairo_freelist_init (&device->cc_freelist,
1845 sizeof (struct i965_cc_state));
1848 device->wm_kernels = _cairo_hash_table_create (hash_equal);
1849 if (unlikely (device->wm_kernels == NULL))
1852 _cairo_freelist_init (&device->wm_kernel_freelist,
1853 sizeof (struct i965_wm_kernel));
1855 device->wm_states = _cairo_hash_table_create (i965_wm_state_equal);
1856 if (unlikely (device->wm_states == NULL))
1857 goto CLEANUP_WM_KERNEL;
1859 _cairo_freelist_init (&device->wm_state_freelist,
1860 sizeof (struct i965_wm_state));
1863 device->wm_bindings = _cairo_hash_table_create (i965_wm_binding_equal);
1864 if (unlikely (device->wm_bindings == NULL))
1865 goto CLEANUP_WM_STATE;
1867 _cairo_freelist_init (&device->wm_binding_freelist,
1868 sizeof (struct i965_wm_binding));
1870 device->samplers = _cairo_hash_table_create (hash_equal);
1871 if (unlikely (device->samplers == NULL))
1872 goto CLEANUP_WM_BINDING;
1874 _cairo_freelist_init (&device->sampler_freelist,
1875 sizeof (struct i965_sampler));
1877 i965_stream_init (&device->batch,
1878 device->batch_base, sizeof (device->batch_base),
1880 device->batch_relocations,
1881 ARRAY_LENGTH (device->batch_relocations));
1883 i965_stream_init (&device->surface,
1884 device->surface_base, sizeof (device->surface_base),
1885 device->surface_pending_relocations,
1886 ARRAY_LENGTH (device->surface_pending_relocations),
1887 device->surface_relocations,
1888 ARRAY_LENGTH (device->surface_relocations));
1890 i965_stream_init (&device->general,
1891 device->general_base, sizeof (device->general_base),
1892 device->general_pending_relocations,
1893 ARRAY_LENGTH (device->general_pending_relocations),
1896 i965_stream_init (&device->vertex,
1897 device->vertex_base, sizeof (device->vertex_base),
1898 device->vertex_pending_relocations,
1899 ARRAY_LENGTH (device->vertex_pending_relocations),
1902 cairo_list_init (&device->flush);
1903 i965_device_reset (device);
1904 device->vs_offset = (uint32_t) -1;
1905 device->border_color_offset = (uint32_t) -1;
1906 device->general_state = NULL;
1908 return _cairo_drm_device_init (&device->intel.base,
1909 fd, dev, vendor_id, chip_id,
1913 _cairo_hash_table_destroy (device->wm_bindings);
1915 _cairo_hash_table_destroy (device->wm_states);
1917 _cairo_hash_table_destroy (device->wm_kernels);
1919 _cairo_hash_table_destroy (device->cc_states);
1921 _cairo_hash_table_destroy (device->sf_states);
1923 intel_device_fini (&device->intel);
1926 return (cairo_drm_device_t *) _cairo_device_create_in_error (status);