1 /* Cairo - a vector graphics library with display and print output
3 * Copyright © 2009 Chris Wilson
5 * This library is free software; you can redistribute it and/or
6 * modify it either under the terms of the GNU Lesser General Public
7 * License version 2.1 as published by the Free Software Foundation
8 * (the "LGPL") or, at your option, under the terms of the Mozilla
9 * Public License Version 1.1 (the "MPL"). If you do not alter this
10 * notice, a recipient may use your version of this file under either
11 * the MPL or the LGPL.
13 * You should have received a copy of the LGPL along with this library
14 * in the file COPYING-LGPL-2.1; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
16 * You should have received a copy of the MPL along with this library
17 * in the file COPYING-MPL-1.1
19 * The contents of this file are subject to the Mozilla Public License
20 * Version 1.1 (the "License"); you may not use this file except in
21 * compliance with the License. You may obtain a copy of the License at
22 * http://www.mozilla.org/MPL/
24 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
25 * OF ANY KIND, either express or implied. See the LGPL or the MPL for
26 * the specific language governing rights and limitations.
28 * **************************************************************************
29 * This work was initially based upon xf86-video-intel/src/i915_render.c:
30 * Copyright © 2006 Intel Corporation
32 * Permission is hereby granted, free of charge, to any person obtaining a
33 * copy of this software and associated documentation files (the "Software"),
34 * to deal in the Software without restriction, including without limitation
35 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
36 * and/or sell copies of the Software, and to permit persons to whom the
37 * Software is furnished to do so, subject to the following conditions:
39 * The above copyright notice and this permission notice (including the next
40 * paragraph) shall be included in all copies or substantial portions of the
43 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
44 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
45 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
46 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
47 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
48 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
52 * Wang Zhenyu <zhenyu.z.wang@intel.com>
53 * Eric Anholt <eric@anholt.net>
55 * **************************************************************************
56 * and also upon libdrm/intel/intel_bufmgr_gem.c:
57 * Copyright © 2007 Red Hat Inc.
58 * Copyright © 2007 Intel Corporation
59 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
60 * All Rights Reserved.
62 * Permission is hereby granted, free of charge, to any person obtaining a
63 * copy of this software and associated documentation files (the
64 * "Software"), to deal in the Software without restriction, including
65 * without limitation the rights to use, copy, modify, merge, publish,
66 * distribute, sub license, and/or sell copies of the Software, and to
67 * permit persons to whom the Software is furnished to do so, subject to
68 * the following conditions:
70 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
71 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
72 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
73 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
74 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
75 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
76 * USE OR OTHER DEALINGS IN THE SOFTWARE.
78 * The above copyright notice and this permission notice (including the
79 * next paragraph) shall be included in all copies or substantial portions
82 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
83 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
84 * Eric Anholt <eric@anholt.net>
85 * Dave Airlie <airlied@linux.ie>
90 * - Per thread context? Would it actually avoid many locks?
96 #include "cairo-drm-private.h"
97 #include "cairo-drm-ioctl-private.h"
98 #include "cairo-drm-intel-private.h"
99 #include "cairo-drm-intel-command-private.h"
100 #include "cairo-drm-intel-ioctl-private.h"
101 #include "cairo-drm-i915-private.h"
103 #include "cairo-boxes-private.h"
104 #include "cairo-cache-private.h"
105 #include "cairo-composite-rectangles-private.h"
106 #include "cairo-default-context-private.h"
107 #include "cairo-error-private.h"
108 #include "cairo-freelist-private.h"
109 #include "cairo-list-private.h"
110 #include "cairo-path-fixed-private.h"
111 #include "cairo-region-private.h"
112 #include "cairo-surface-offset-private.h"
114 #include <sys/ioctl.h>
115 #include <sys/mman.h>
118 static const uint32_t i915_batch_setup[] = {
119 /* Disable line anti-aliasing */
122 /* Disable independent alpha blend */
123 _3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD |
125 IAB_MODIFY_FUNC | (BLENDFUNC_ADD << IAB_FUNC_SHIFT) |
126 IAB_MODIFY_SRC_FACTOR | (BLENDFACT_ONE << IAB_SRC_FACTOR_SHIFT) |
127 IAB_MODIFY_DST_FACTOR | (BLENDFACT_ZERO << IAB_DST_FACTOR_SHIFT),
129 /* Disable texture crossbar */
130 _3DSTATE_COORD_SET_BINDINGS |
140 _3DSTATE_MODES_4_CMD | ENABLE_LOGIC_OP_FUNC | LOGIC_OP_FUNC (LOGICOP_COPY),
142 _3DSTATE_LOAD_STATE_IMMEDIATE_1 |
150 0, /* Disable texture coordinate wrap-shortest */
151 (1 << S4_POINT_WIDTH_SHIFT) |
155 S4_FLATSHADE_SPECULAR |
159 0, /* Disable stencil buffer */
160 S6_COLOR_WRITE_ENABLE,
162 _3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT,
164 /* disable indirect state */
165 _3DSTATE_LOAD_INDIRECT,
169 static const cairo_surface_backend_t i915_surface_backend;
171 static cairo_surface_t *
172 i915_surface_create_from_cacheable_image (cairo_drm_device_t *base_dev,
173 cairo_surface_t *source);
175 static cairo_status_t
176 i915_bo_exec (i915_device_t *device, intel_bo_t *bo, uint32_t offset)
178 struct drm_i915_gem_execbuffer2 execbuf;
181 /* Add the batch buffer to the validation list. */
182 cnt = device->batch.exec_count;
183 if (cnt > 0 && bo->base.handle == device->batch.exec[cnt-1].handle)
186 i = device->batch.exec_count++;
187 device->batch.exec[i].handle = bo->base.handle;
188 device->batch.exec[i].relocation_count = device->batch.reloc_count;
189 device->batch.exec[i].relocs_ptr = (uintptr_t) device->batch.reloc;
190 device->batch.exec[i].alignment = 0;
191 device->batch.exec[i].offset = 0;
192 device->batch.exec[i].flags = 0;
193 device->batch.exec[i].rsvd1 = 0;
194 device->batch.exec[i].rsvd2 = 0;
196 execbuf.buffers_ptr = (uintptr_t) device->batch.exec;
197 execbuf.buffer_count = device->batch.exec_count;
198 execbuf.batch_start_offset = offset;
199 execbuf.batch_len = (device->batch.used << 2) + sizeof (device->batch_header);
202 execbuf.num_cliprects = 0;
203 execbuf.cliprects_ptr = 0;
209 ret = ioctl (device->intel.base.fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
210 } while (ret != 0 && errno == EINTR);
212 if (device->debug & I915_DEBUG_SYNC && ret == 0)
213 ret = ! intel_bo_wait (&device->intel, bo);
218 fprintf (stderr, "Batch submission failed: %d\n", errno);
219 fprintf (stderr, " relocation entries: %d/%d\n",
220 device->batch.reloc_count, I915_MAX_RELOCS);
221 fprintf (stderr, " gtt size: (%zd/%zd), (%zd/%zd)\n",
222 device->batch.est_gtt_size, device->batch.gtt_avail_size,
223 device->batch.total_gtt_size, device->intel.gtt_avail_size);
225 fprintf (stderr, " buffers:\n");
226 for (n = 0; n < device->batch.exec_count; n++) {
227 fprintf (stderr, " exec[%d] = %d, %d/%d bytes, gtt = %qx\n",
229 device->batch.exec[n].handle,
230 n == device->batch.exec_count - 1 ? bo->base.size : device->batch.target_bo[n]->base.size,
231 n == device->batch.exec_count - 1 ? bo->full_size : device->batch.target_bo[n]->full_size,
232 device->batch.exec[n].offset);
234 for (n = 0; n < device->batch.reloc_count; n++) {
235 for (m = 0; m < device->batch.exec_count; m++)
236 if (device->batch.exec[m].handle == device->batch.reloc[n].target_handle)
239 fprintf (stderr, " reloc[%d] = %d @ %qx -> %qx + %qx\n", n,
240 device->batch.reloc[n].target_handle,
241 device->batch.reloc[n].offset,
242 (unsigned long long) device->batch.exec[m].offset,
243 (unsigned long long) device->batch.reloc[n].delta);
245 device->batch_base[(device->batch.reloc[n].offset - sizeof (device->batch_header)) / 4] =
246 device->batch.exec[m].offset + device->batch.reloc[n].delta;
249 intel_dump_batchbuffer (device->batch_header,
251 device->intel.base.chip_id);
255 VG (VALGRIND_MAKE_MEM_DEFINED (device->batch.exec, sizeof (device->batch.exec[0]) * i));
257 bo->offset = device->batch.exec[i].offset;
264 intel_bo_t *bo = device->batch.target_bo[cnt];
266 bo->offset = device->batch.exec[cnt].offset;
269 bo->batch_read_domains = 0;
270 bo->batch_write_domain = 0;
271 cairo_list_del (&bo->cache_list);
277 intel_bo_destroy (&device->intel, bo);
279 assert (cairo_list_is_empty (&device->intel.bo_in_flight));
281 device->batch.exec_count = 0;
282 device->batch.reloc_count = 0;
283 device->batch.fences = 0;
285 device->batch.est_gtt_size = I915_BATCH_SIZE;
286 device->batch.total_gtt_size = I915_BATCH_SIZE;
288 return ret == 0 ? CAIRO_STATUS_SUCCESS : _cairo_error (CAIRO_STATUS_NO_MEMORY);
292 i915_batch_add_reloc (i915_device_t *device,
296 uint32_t read_domains,
297 uint32_t write_domain,
298 cairo_bool_t needs_fence)
302 assert (offset < bo->base.size);
304 if (bo->exec == NULL) {
305 device->batch.total_gtt_size += bo->base.size;
308 device->batch.est_gtt_size += bo->base.size;
310 assert (device->batch.exec_count < ARRAY_LENGTH (device->batch.exec));
312 index = device->batch.exec_count++;
313 device->batch.exec[index].handle = bo->base.handle;
314 device->batch.exec[index].relocation_count = 0;
315 device->batch.exec[index].relocs_ptr = 0;
316 device->batch.exec[index].alignment = 0;
317 device->batch.exec[index].offset = 0;
318 device->batch.exec[index].flags = 0;
319 device->batch.exec[index].rsvd1 = 0;
320 device->batch.exec[index].rsvd2 = 0;
322 device->batch.target_bo[index] = intel_bo_reference (bo);
324 bo->exec = &device->batch.exec[index];
327 if (bo->tiling != I915_TILING_NONE) {
331 /* We presume that we will want to use a fence with X tiled objects... */
332 if (needs_fence || bo->tiling == I915_TILING_X)
333 alignment = bo->full_size;
335 alignment = 2*((bo->stride + 4095) & -4096);
337 alignment = bo->full_size;
339 if (bo->exec->alignment < alignment)
340 bo->exec->alignment = alignment;
342 if (needs_fence && (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
343 bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE;
344 device->batch.fences++;
346 intel_bo_set_tiling (&device->intel, bo);
350 assert (device->batch.reloc_count < ARRAY_LENGTH (device->batch.reloc));
352 index = device->batch.reloc_count++;
353 device->batch.reloc[index].offset = (pos << 2) + sizeof (device->batch_header);
354 device->batch.reloc[index].delta = offset;
355 device->batch.reloc[index].target_handle = bo->base.handle;
356 device->batch.reloc[index].read_domains = read_domains;
357 device->batch.reloc[index].write_domain = write_domain;
358 device->batch.reloc[index].presumed_offset = bo->offset;
360 assert (write_domain == 0 || bo->batch_write_domain == 0 || bo->batch_write_domain == write_domain);
361 bo->batch_read_domains |= read_domains;
362 bo->batch_write_domain |= write_domain;
366 i915_vbo_finish (i915_device_t *device)
370 assert (CAIRO_MUTEX_IS_LOCKED (device->intel.base.base.mutex));
371 assert (device->vbo_used);
373 if (device->vertex_count) {
374 if (device->vbo == 0) {
375 OUT_DWORD (_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
379 device->vbo = device->batch.used++;
380 device->vbo_max_index = device->batch.used;
381 OUT_DWORD ((device->floats_per_vertex << S1_VERTEX_WIDTH_SHIFT) |
382 (device->floats_per_vertex << S1_VERTEX_PITCH_SHIFT));
385 OUT_DWORD (PRIM3D_RECTLIST |
386 PRIM3D_INDIRECT_SEQUENTIAL |
387 device->vertex_count);
388 OUT_DWORD (device->vertex_index);
391 if (device->last_vbo != NULL) {
392 intel_bo_in_flight_add (&device->intel, device->last_vbo);
393 intel_bo_destroy (&device->intel, device->last_vbo);
396 device->batch_base[device->vbo_max_index] |= device->vertex_index + device->vertex_count;
398 /* will include a few bytes of inter-array padding */
399 vbo = intel_bo_create (&device->intel,
400 device->vbo_used, device->vbo_used,
401 FALSE, I915_TILING_NONE, 0);
402 i915_batch_fill_reloc (device, device->vbo, vbo, 0,
403 I915_GEM_DOMAIN_VERTEX, 0);
404 intel_bo_write (&device->intel, vbo, 0, device->vbo_used, device->vbo_base);
405 device->last_vbo = vbo;
406 device->last_vbo_offset = (device->vbo_used+7)&-8;
407 device->last_vbo_space = vbo->base.size - device->last_vbo_offset;
411 device->vbo_used = device->vbo_offset = 0;
412 device->vertex_index = device->vertex_count = 0;
414 if (! i915_check_aperture_size (device, 1, I915_VBO_SIZE, I915_VBO_SIZE)) {
415 cairo_status_t status;
417 status = i915_batch_flush (device);
418 if (unlikely (status))
419 longjmp (device->shader->unwind, status);
421 status = i915_shader_commit (device->shader, device);
422 if (unlikely (status))
423 longjmp (device->shader->unwind, status);
427 /* XXX improve state tracker/difference and flush state on vertex emission */
429 i915_device_reset (i915_device_t *device)
431 if (device->current_source != NULL)
432 *device->current_source = 0;
433 if (device->current_mask != NULL)
434 *device->current_mask = 0;
435 if (device->current_clip != NULL)
436 *device->current_clip = 0;
438 device->current_target = NULL;
439 device->current_size = 0;
440 device->current_source = NULL;
441 device->current_mask = NULL;
442 device->current_clip = NULL;
443 device->current_texcoords = ~0;
444 device->current_blend = 0;
445 device->current_n_constants = 0;
446 device->current_n_samplers = 0;
447 device->current_n_maps = 0;
448 device->current_colorbuf = 0;
449 device->current_diffuse = 0;
450 device->current_program = ~0;
451 device->clear_alpha = ~0;
453 device->last_source_fragment = ~0;
457 i915_batch_cleanup (i915_device_t *device)
461 for (i = 0; i < device->batch.exec_count; i++) {
462 intel_bo_t *bo = device->batch.target_bo[i];
465 bo->batch_read_domains = 0;
466 bo->batch_write_domain = 0;
467 cairo_list_del (&bo->cache_list);
469 intel_bo_destroy (&device->intel, bo);
472 device->batch.exec_count = 0;
473 device->batch.reloc_count = 0;
477 i915_batch_vbo_finish (i915_device_t *device)
479 assert (CAIRO_MUTEX_IS_LOCKED (device->intel.base.base.mutex));
481 if (device->vbo || i915_batch_space (device) < (int32_t) device->vbo_used) {
484 if (device->vertex_count) {
485 if (device->vbo == 0) {
486 OUT_DWORD (_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
490 device->vbo = device->batch.used++;
491 device->vbo_max_index = device->batch.used;
492 OUT_DWORD ((device->floats_per_vertex << S1_VERTEX_WIDTH_SHIFT) |
493 (device->floats_per_vertex << S1_VERTEX_PITCH_SHIFT));
496 OUT_DWORD (PRIM3D_RECTLIST |
497 PRIM3D_INDIRECT_SEQUENTIAL |
498 device->vertex_count);
499 OUT_DWORD (device->vertex_index);
502 if (device->last_vbo != NULL)
503 intel_bo_destroy (&device->intel, device->last_vbo);
505 device->batch_base[device->vbo_max_index] |= device->vertex_index + device->vertex_count;
507 /* will include a few bytes of inter-array padding */
508 vbo = intel_bo_create (&device->intel,
509 device->vbo_used, device->vbo_used,
510 FALSE, I915_TILING_NONE, 0);
511 i915_batch_fill_reloc (device, device->vbo,
513 I915_GEM_DOMAIN_VERTEX, 0);
514 intel_bo_write (&device->intel, vbo, 0, device->vbo_used, device->vbo_base);
515 device->last_vbo = vbo;
516 device->last_vbo_offset = (device->vbo_used+7)&-8;
517 device->last_vbo_space = vbo->base.size - device->last_vbo_offset;
523 /* Only a single rectlist in this batch, and no active vertex buffer. */
524 OUT_DWORD (PRIM3D_RECTLIST | (device->vbo_used / 4 - 1));
526 memcpy (BATCH_PTR (device), device->vbo_base, device->vbo_used);
527 device->batch.used += device->vbo_used >> 2;
530 device->vbo_used = device->vbo_offset = 0;
531 device->vertex_index = device->vertex_count = 0;
535 i915_batch_flush (i915_device_t *device)
538 cairo_status_t status;
539 uint32_t length, offset;
542 assert (CAIRO_MUTEX_IS_LOCKED (device->intel.base.base.mutex));
544 if (device->vbo_used)
545 i915_batch_vbo_finish (device);
547 if (device->batch.used == 0)
548 return CAIRO_STATUS_SUCCESS;
550 i915_batch_emit_dword (device, MI_BATCH_BUFFER_END);
551 if ((device->batch.used & 1) != ((sizeof (device->batch_header)>>2) & 1))
552 i915_batch_emit_dword (device, MI_NOOP);
554 length = (device->batch.used << 2) + sizeof (device->batch_header);
556 /* NB: it is faster to copy the data then map/unmap the batch,
557 * presumably because we frequently only use a small part of the buffer.
560 if (device->last_vbo) {
561 if (length <= device->last_vbo_space) {
562 batch = device->last_vbo;
563 offset = device->last_vbo_offset;
565 /* fixup the relocations */
566 for (n = 0; n < device->batch.reloc_count; n++)
567 device->batch.reloc[n].offset += offset;
569 intel_bo_destroy (&device->intel, device->last_vbo);
570 device->last_vbo = NULL;
573 batch = intel_bo_create (&device->intel,
575 FALSE, I915_TILING_NONE, 0);
576 if (unlikely (batch == NULL)) {
577 status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
578 i915_batch_cleanup (device);
584 intel_bo_write (&device->intel, batch, offset, length, device->batch_header);
585 status = i915_bo_exec (device, batch, offset);
586 intel_bo_destroy (&device->intel, batch);
589 device->batch.used = 0;
591 intel_glyph_cache_unpin (&device->intel);
592 intel_snapshot_cache_thaw (&device->intel);
594 i915_device_reset (device);
601 i915_add_rectangles (i915_device_t *device, int num_rects, int *count)
607 assert (device->floats_per_vertex);
609 size = device->rectangle_size;
610 if (unlikely (device->vbo_offset + size > I915_VBO_SIZE))
611 i915_vbo_finish (device);
613 vertices = (float *) (device->vbo_base + device->vbo_offset);
614 cnt = (I915_VBO_SIZE - device->vbo_offset) / size;
617 device->vbo_used = device->vbo_offset += size * cnt;
618 device->vertex_count += 3 * cnt;
624 static cairo_surface_t *
625 i915_surface_create_similar (void *abstract_other,
626 cairo_content_t content,
627 int width, int height)
629 i915_surface_t *other;
630 cairo_format_t format;
631 uint32_t tiling = I915_TILING_DEFAULT;
633 other = abstract_other;
634 if (content == other->intel.drm.base.content)
635 format = other->intel.drm.format;
637 format = _cairo_format_from_content (content);
639 if (width * _cairo_format_bits_per_pixel (format) > 8 * 32*1024 || height > 64*1024)
642 /* we presume that a similar surface will be used for blitting */
643 if (i915_surface_needs_tiling (other))
644 tiling = I915_TILING_X;
646 return i915_surface_create_internal ((cairo_drm_device_t *) other->intel.drm.base.device,
652 static cairo_status_t
653 i915_surface_finish (void *abstract_surface)
655 i915_surface_t *surface = abstract_surface;
656 i915_device_t *device = i915_device (surface);
658 if (surface->stencil != NULL) {
659 intel_bo_in_flight_add (&device->intel, surface->stencil);
660 intel_bo_destroy (&device->intel, surface->stencil);
663 if (surface->is_current_texture) {
664 if (surface->is_current_texture & CURRENT_SOURCE)
665 device->current_source = NULL;
666 if (surface->is_current_texture & CURRENT_MASK)
667 device->current_mask = NULL;
668 if (surface->is_current_texture & CURRENT_CLIP)
669 device->current_clip = NULL;
670 device->current_n_samplers = 0;
673 if (surface == device->current_target)
674 device->current_target = NULL;
676 if (surface->cache != NULL) {
677 i915_image_private_t *node = surface->cache;
678 intel_buffer_cache_t *cache = node->container;
680 if (--cache->ref_count == 0) {
681 intel_bo_in_flight_add (&device->intel, cache->buffer.bo);
682 intel_bo_destroy (&device->intel, cache->buffer.bo);
683 _cairo_rtree_fini (&cache->rtree);
684 cairo_list_del (&cache->link);
687 node->node.state = CAIRO_RTREE_NODE_AVAILABLE;
688 cairo_list_move (&node->node.link, &cache->rtree.available);
689 _cairo_rtree_node_collapse (&cache->rtree, node->node.parent);
693 return intel_surface_finish (&surface->intel);
696 static cairo_status_t
697 i915_surface_batch_flush (i915_surface_t *surface)
699 cairo_status_t status;
702 assert (surface->intel.drm.fallback == NULL);
704 bo = to_intel_bo (surface->intel.drm.bo);
705 if (bo == NULL || bo->batch_write_domain == 0)
706 return CAIRO_STATUS_SUCCESS;
708 status = cairo_device_acquire (surface->intel.drm.base.device);
709 if (unlikely (status))
712 status = i915_batch_flush (i915_device (surface));
713 cairo_device_release (surface->intel.drm.base.device);
718 static cairo_status_t
719 i915_surface_flush (void *abstract_surface,
722 i915_surface_t *surface = abstract_surface;
723 cairo_status_t status;
726 return CAIRO_STATUS_SUCCESS;
728 if (surface->intel.drm.fallback == NULL) {
729 if (surface->intel.drm.base.finished) {
730 /* Forgo flushing on finish as the user cannot access the surface directly. */
731 return CAIRO_STATUS_SUCCESS;
734 if (surface->deferred_clear) {
735 status = i915_surface_clear (surface);
736 if (unlikely (status))
740 return i915_surface_batch_flush (surface);
743 return intel_surface_flush (abstract_surface, flags);
748 static cairo_status_t
749 _composite_boxes_spans (void *closure,
750 cairo_span_renderer_t *renderer,
751 const cairo_rectangle_int_t *extents)
753 cairo_boxes_t *boxes = closure;
754 cairo_rectangular_scan_converter_t converter;
755 struct _cairo_boxes_chunk *chunk;
756 cairo_status_t status;
759 _cairo_rectangular_scan_converter_init (&converter, extents);
760 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
761 cairo_box_t *box = chunk->base;
762 for (i = 0; i < chunk->count; i++) {
763 status = _cairo_rectangular_scan_converter_add_box (&converter, &box[i], 1);
764 if (unlikely (status))
769 status = converter.base.generate (&converter.base, renderer);
772 converter.base.destroy (&converter.base);
777 i915_fixup_unbounded (i915_surface_t *dst,
778 const cairo_composite_rectangles_t *extents,
781 i915_shader_t shader;
782 i915_device_t *device;
783 cairo_status_t status;
786 cairo_region_t *clip_region = NULL;
788 status = _cairo_clip_get_region (clip, &clip_region);
789 assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
790 assert (clip_region == NULL);
792 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
795 if (extents->bounded.width == extents->unbounded.width &&
796 extents->bounded.height == extents->unbounded.height)
798 return CAIRO_STATUS_SUCCESS;
803 i915_shader_init (&shader, dst, CAIRO_OPERATOR_DEST_OVER, 1.);
804 i915_shader_set_clip (&shader, clip);
805 status = i915_shader_acquire_pattern (&shader,
807 &_cairo_pattern_white.base,
808 &extents->unbounded);
809 assert (status == CAIRO_STATUS_SUCCESS);
811 i915_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR, 1.);
812 status = i915_shader_acquire_pattern (&shader,
814 &_cairo_pattern_clear.base,
815 &extents->unbounded);
816 assert (status == CAIRO_STATUS_SUCCESS);
819 device = i915_device (dst);
820 status = cairo_device_acquire (&device->intel.base.base);
821 if (unlikely (status))
824 status = i915_shader_commit (&shader, device);
825 if (unlikely (status))
828 if (extents->bounded.width == 0 || extents->bounded.height == 0) {
829 shader.add_rectangle (&shader,
830 extents->unbounded.x,
831 extents->unbounded.y,
832 extents->unbounded.width,
833 extents->unbounded.height);
836 if (extents->bounded.y != extents->unbounded.y) {
837 shader.add_rectangle (&shader,
838 extents->unbounded.x,
839 extents->unbounded.y,
840 extents->unbounded.width,
841 extents->bounded.y - extents->unbounded.y);
845 if (extents->bounded.x != extents->unbounded.x) {
846 shader.add_rectangle (&shader,
847 extents->unbounded.x,
849 extents->bounded.x - extents->unbounded.x,
850 extents->bounded.height);
854 if (extents->bounded.x + extents->bounded.width != extents->unbounded.x + extents->unbounded.width) {
855 shader.add_rectangle (&shader,
856 extents->bounded.x + extents->bounded.width,
858 extents->unbounded.x + extents->unbounded.width - (extents->bounded.x + extents->bounded.width),
859 extents->bounded.height);
863 if (extents->bounded.y + extents->bounded.height != extents->unbounded.y + extents->unbounded.height) {
864 shader.add_rectangle (&shader,
865 extents->unbounded.x,
866 extents->bounded.y + extents->bounded.height,
867 extents->unbounded.width,
868 extents->unbounded.y + extents->unbounded.height - (extents->bounded.y + extents->bounded.height));
872 i915_shader_fini (&shader);
874 cairo_device_release (&device->intel.base.base);
878 static cairo_status_t
879 i915_fixup_unbounded_boxes (i915_surface_t *dst,
880 const cairo_composite_rectangles_t *extents,
882 cairo_boxes_t *boxes)
886 cairo_region_t *clip_region = NULL;
887 cairo_status_t status;
888 struct _cairo_boxes_chunk *chunk;
891 if (boxes->num_boxes <= 1)
892 return i915_fixup_unbounded (dst, extents, clip);
894 _cairo_boxes_init (&clear);
896 box.p1.x = _cairo_fixed_from_int (extents->unbounded.x + extents->unbounded.width);
897 box.p1.y = _cairo_fixed_from_int (extents->unbounded.y);
898 box.p2.x = _cairo_fixed_from_int (extents->unbounded.x);
899 box.p2.y = _cairo_fixed_from_int (extents->unbounded.y + extents->unbounded.height);
902 status = _cairo_clip_get_region (clip, &clip_region);
903 assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
904 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
908 if (clip_region == NULL) {
911 _cairo_boxes_init (&tmp);
913 status = _cairo_boxes_add (&tmp, &box);
914 assert (status == CAIRO_STATUS_SUCCESS);
916 tmp.chunks.next = &boxes->chunks;
917 tmp.num_boxes += boxes->num_boxes;
919 status = _cairo_bentley_ottmann_tessellate_boxes (&tmp,
920 CAIRO_FILL_RULE_WINDING,
923 tmp.chunks.next = NULL;
925 pixman_box32_t *pbox;
927 pbox = pixman_region32_rectangles (&clip_region->rgn, &i);
928 _cairo_boxes_limit (&clear, (cairo_box_t *) pbox, i);
930 status = _cairo_boxes_add (&clear, &box);
931 assert (status == CAIRO_STATUS_SUCCESS);
933 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
934 for (i = 0; i < chunk->count; i++) {
935 status = _cairo_boxes_add (&clear, &chunk->base[i]);
936 if (unlikely (status)) {
937 _cairo_boxes_fini (&clear);
943 status = _cairo_bentley_ottmann_tessellate_boxes (&clear,
944 CAIRO_FILL_RULE_WINDING,
948 if (likely (status == CAIRO_STATUS_SUCCESS && clear.num_boxes)) {
949 i915_shader_t shader;
950 i915_device_t *device;
953 i915_shader_init (&shader, dst, CAIRO_OPERATOR_DEST_OVER, 1.);
954 i915_shader_set_clip (&shader, clip);
955 status = i915_shader_acquire_pattern (&shader,
957 &_cairo_pattern_white.base,
958 &extents->unbounded);
959 assert (status == CAIRO_STATUS_SUCCESS);
961 i915_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR, 1.);
962 status = i915_shader_acquire_pattern (&shader,
964 &_cairo_pattern_clear.base,
965 &extents->unbounded);
966 assert (status == CAIRO_STATUS_SUCCESS);
969 device = i915_device (dst);
970 status = cairo_device_acquire (&device->intel.base.base);
971 if (unlikely (status))
974 status = i915_shader_commit (&shader, device);
975 if (unlikely (status))
978 for (chunk = &clear.chunks; chunk != NULL; chunk = chunk->next) {
979 for (i = 0; i < chunk->count; i++) {
980 int x1 = _cairo_fixed_integer_part (chunk->base[i].p1.x);
981 int y1 = _cairo_fixed_integer_part (chunk->base[i].p1.y);
982 int x2 = _cairo_fixed_integer_part (chunk->base[i].p2.x);
983 int y2 = _cairo_fixed_integer_part (chunk->base[i].p2.y);
985 shader.add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
989 cairo_device_release (&device->intel.base.base);
991 i915_shader_fini (&shader);
994 _cairo_boxes_fini (&clear);
1000 i915_can_blt (i915_surface_t *dst,
1001 const cairo_pattern_t *pattern)
1003 const cairo_surface_pattern_t *spattern;
1004 i915_surface_t *src;
1006 spattern = (const cairo_surface_pattern_t *) pattern;
1007 src = (i915_surface_t *) spattern->surface;
1009 if (src->intel.drm.base.device != dst->intel.drm.base.device)
1012 if (! i915_surface_needs_tiling (dst))
1015 if (! _cairo_matrix_is_translation (&pattern->matrix))
1018 if (! (pattern->filter == CAIRO_FILTER_NEAREST ||
1019 pattern->filter == CAIRO_FILTER_FAST))
1021 if (! _cairo_fixed_is_integer (_cairo_fixed_from_double (pattern->matrix.x0)) ||
1022 ! _cairo_fixed_is_integer (_cairo_fixed_from_double (pattern->matrix.y0)))
1028 return _cairo_format_bits_per_pixel (src->intel.drm.format) ==
1029 _cairo_format_bits_per_pixel (dst->intel.drm.format);
1032 static cairo_status_t
1033 i915_blt (i915_surface_t *src,
1034 i915_surface_t *dst,
1035 int src_x, int src_y,
1036 int width, int height,
1037 int dst_x, int dst_y,
1040 i915_device_t *device;
1041 intel_bo_t *bo_array[2];
1042 cairo_status_t status;
1045 bo_array[0] = to_intel_bo (dst->intel.drm.bo);
1046 bo_array[1] = to_intel_bo (src->intel.drm.bo);
1048 status = i915_surface_fallback_flush (src);
1049 if (unlikely (status))
1052 device = i915_device (dst);
1053 status = cairo_device_acquire (&device->intel.base.base);
1054 if (unlikely (status))
1057 if (! i915_check_aperture_and_fences (device, bo_array, 2) ||
1058 i915_batch_space (device) < 9)
1060 status = i915_batch_flush (device);
1061 if (unlikely (status))
1065 cmd = XY_SRC_COPY_BLT_CMD;
1066 br13 = (0xCC << 16) | dst->intel.drm.stride;
1067 switch (dst->intel.drm.format) {
1069 case CAIRO_FORMAT_INVALID:
1070 case CAIRO_FORMAT_A1:
1072 case CAIRO_FORMAT_A8:
1074 case CAIRO_FORMAT_RGB16_565:
1077 case CAIRO_FORMAT_RGB24:
1078 case CAIRO_FORMAT_ARGB32:
1080 cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1086 OUT_DWORD ((dst_y << 16) | dst_x);
1087 OUT_DWORD (((dst_y + height - 1) << 16) | (dst_x + width - 1));
1088 OUT_RELOC_FENCED (dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1089 OUT_DWORD ((src_y << 16) | src_x);
1090 OUT_DWORD (src->intel.drm.stride);
1091 OUT_RELOC_FENCED (src, I915_GEM_DOMAIN_RENDER, 0);
1092 /* require explicit RenderCache flush for 2D -> 3D sampler? */
1094 OUT_DWORD (MI_FLUSH);
1097 cairo_device_release (&device->intel.base.base);
1098 return CAIRO_STATUS_SUCCESS;
1102 i915_surface_copy_subimage (i915_device_t *device,
1103 i915_surface_t *src,
1104 const cairo_rectangle_int_t *extents,
1106 i915_surface_t **clone_out)
1108 i915_surface_t *clone;
1109 cairo_status_t status;
1111 clone = (i915_surface_t *)
1112 i915_surface_create_internal (&device->intel.base,
1113 src->intel.drm.format,
1116 I915_TILING_X, TRUE);
1117 if (unlikely (clone->intel.drm.base.status))
1118 return clone->intel.drm.base.status;
1120 status = i915_blt (src, clone,
1121 extents->x, extents->y,
1122 extents->width, extents->height,
1126 if (unlikely (status)) {
1127 cairo_surface_destroy (&clone->intel.drm.base);
1132 return CAIRO_STATUS_SUCCESS;
1135 static cairo_status_t
1136 i915_clear_boxes (i915_surface_t *dst,
1137 const cairo_boxes_t *boxes)
1139 i915_device_t *device = i915_device (dst);
1140 const struct _cairo_boxes_chunk *chunk;
1141 cairo_status_t status;
1142 intel_bo_t *bo_array[1] = { to_intel_bo (dst->intel.drm.bo) };
1143 int cmd, br13, clear = 0, i;
1145 cmd = XY_COLOR_BLT_CMD;
1146 br13 = (0xCC << 16) | dst->intel.drm.stride;
1147 switch (dst->intel.drm.format) {
1149 case CAIRO_FORMAT_INVALID:
1150 case CAIRO_FORMAT_A1:
1152 case CAIRO_FORMAT_A8:
1154 case CAIRO_FORMAT_RGB16_565:
1157 case CAIRO_FORMAT_RGB24:
1159 case CAIRO_FORMAT_ARGB32:
1161 cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1165 status = cairo_device_acquire (&device->intel.base.base);
1166 if (unlikely (status))
1169 if (! i915_check_aperture_and_fences (device, bo_array, 1) ||
1170 i915_batch_space (device) < 6 * boxes->num_boxes)
1172 status = i915_batch_flush (device);
1173 if (unlikely (status))
1177 if (device->vertex_count)
1178 i915_vbo_flush (device);
1180 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1181 const cairo_box_t *box = chunk->base;
1182 for (i = 0; i < chunk->count; i++) {
1183 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1184 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1185 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1186 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1188 if (x2 <= x1 || y2 <= y1)
1193 OUT_DWORD ((y1 << 16) | x1);
1194 OUT_DWORD (((y2 - 1) << 16) | (x2 - 1));
1195 OUT_RELOC_FENCED (dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1201 cairo_device_release (&device->intel.base.base);
1205 static cairo_status_t
1206 i915_surface_extract_X_from_Y (i915_device_t *device,
1207 i915_surface_t *src,
1208 const cairo_rectangle_int_t *extents,
1209 i915_surface_t **clone_out)
1211 i915_surface_t *clone;
1212 i915_shader_t shader;
1213 cairo_surface_pattern_t pattern;
1214 cairo_rectangle_int_t rect;
1215 cairo_status_t status;
1217 status = i915_surface_fallback_flush (src);
1218 if (unlikely (status))
1221 clone = (i915_surface_t *)
1222 i915_surface_create_internal (&device->intel.base,
1223 src->intel.drm.format,
1226 I915_TILING_X, TRUE);
1227 if (unlikely (clone->intel.drm.base.status))
1228 return clone->intel.drm.base.status;
1230 i915_shader_init (&shader, clone, CAIRO_OPERATOR_SOURCE, 1.);
1232 _cairo_pattern_init_for_surface (&pattern, &src->intel.drm.base);
1233 pattern.base.filter = CAIRO_FILTER_NEAREST;
1234 cairo_matrix_init_translate (&pattern.base.matrix, extents->x, extents->y);
1236 rect.x = rect.y = 0;
1237 rect.width = extents->width;
1238 rect.height = extents->height;
1239 status = i915_shader_acquire_pattern (&shader, &shader.source, &pattern.base, &rect);
1240 _cairo_pattern_fini (&pattern.base);
1242 if (unlikely (status))
1245 status = cairo_device_acquire (&device->intel.base.base);
1246 if (unlikely (status))
1249 status = i915_shader_commit (&shader, device);
1250 if (unlikely (status))
1253 shader.add_rectangle (&shader, 0, 0, extents->width, extents->height);
1255 cairo_device_release (&device->intel.base.base);
1256 i915_shader_fini (&shader);
1259 return CAIRO_STATUS_SUCCESS;
1262 cairo_device_release (&device->intel.base.base);
1264 i915_shader_fini (&shader);
1265 cairo_surface_destroy (&clone->intel.drm.base);
1269 static cairo_status_t
1270 i915_blt_boxes (i915_surface_t *dst,
1271 const cairo_pattern_t *pattern,
1272 const cairo_rectangle_int_t *extents,
1273 const cairo_boxes_t *boxes)
1275 const cairo_surface_pattern_t *spattern;
1276 i915_device_t *device;
1277 i915_surface_t *src;
1278 cairo_surface_t *free_me = NULL;
1279 const struct _cairo_boxes_chunk *chunk;
1280 cairo_status_t status;
1281 int br13, cmd, tx, ty;
1282 intel_bo_t *bo_array[2];
1285 if (! i915_can_blt (dst, pattern))
1286 return CAIRO_INT_STATUS_UNSUPPORTED;
1288 spattern = (const cairo_surface_pattern_t *) pattern;
1289 src = (i915_surface_t *) spattern->surface;
1291 if (src->intel.drm.base.is_clear)
1292 return i915_clear_boxes (dst, boxes);
1294 if (pattern->extend != CAIRO_EXTEND_NONE &&
1295 (extents->x + tx < 0 ||
1296 extents->y + ty < 0 ||
1297 extents->x + tx + extents->width > src->intel.drm.width ||
1298 extents->y + ty + extents->height > src->intel.drm.height))
1300 return CAIRO_INT_STATUS_UNSUPPORTED;
1303 status = i915_surface_fallback_flush (src);
1304 if (unlikely (status))
1307 tx = _cairo_lround (pattern->matrix.x0);
1308 ty = _cairo_lround (pattern->matrix.y0);
1310 device = i915_device (dst);
1311 if (to_intel_bo (src->intel.drm.bo)->tiling == I915_TILING_Y) {
1312 cairo_rectangle_int_t extents;
1314 _cairo_boxes_extents (boxes, &extents);
1318 status = i915_surface_extract_X_from_Y (device, src, &extents, &src);
1319 if (unlikely (status))
1322 free_me = &src->intel.drm.base;
1327 bo_array[0] = to_intel_bo (dst->intel.drm.bo);
1328 bo_array[1] = to_intel_bo (src->intel.drm.bo);
1330 status = cairo_device_acquire (&device->intel.base.base);
1331 if (unlikely (status))
1332 goto CLEANUP_SURFACE;
1334 if (! i915_check_aperture_and_fences (device, bo_array, 2) ||
1335 i915_batch_space (device) < 8 * boxes->num_boxes)
1337 status = i915_batch_flush (device);
1338 if (unlikely (status))
1339 goto CLEANUP_DEVICE;
1342 cmd = XY_SRC_COPY_BLT_CMD;
1343 br13 = (0xCC << 16) | dst->intel.drm.stride;
1344 switch (dst->intel.drm.format) {
1346 case CAIRO_FORMAT_INVALID:
1347 case CAIRO_FORMAT_A1:
1349 case CAIRO_FORMAT_A8:
1351 case CAIRO_FORMAT_RGB16_565:
1354 case CAIRO_FORMAT_RGB24:
1355 case CAIRO_FORMAT_ARGB32:
1357 cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1361 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1362 const cairo_box_t *box = chunk->base;
1363 for (i = 0; i < chunk->count; i++) {
1364 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1365 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1366 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1367 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1371 if (x2 + tx > src->intel.drm.width)
1372 x2 = src->intel.drm.width - tx;
1376 if (y2 + ty > src->intel.drm.height)
1377 y2 = src->intel.drm.height - ty;
1379 if (x2 <= x1 || y2 <= y1)
1381 if (x2 < 0 || y2 < 0)
1383 if (x1 >= dst->intel.drm.width || y2 >= dst->intel.drm.height)
1388 OUT_DWORD ((y1 << 16) | x1);
1389 OUT_DWORD (((y2 - 1) << 16) | (x2 - 1));
1390 OUT_RELOC_FENCED (dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1391 OUT_DWORD (((y1 + ty) << 16) | (x1 + tx));
1392 OUT_DWORD (src->intel.drm.stride);
1393 OUT_RELOC_FENCED (src, I915_GEM_DOMAIN_RENDER, 0);
1397 /* XXX fixup blank portions */
1400 cairo_device_release (&device->intel.base.base);
1402 cairo_surface_destroy (free_me);
1406 static cairo_status_t
1407 _upload_image_inplace (i915_surface_t *surface,
1408 const cairo_pattern_t *source,
1409 const cairo_rectangle_int_t *extents,
1410 const cairo_boxes_t *boxes)
1412 i915_device_t *device;
1413 const cairo_surface_pattern_t *pattern;
1414 cairo_image_surface_t *image;
1415 const struct _cairo_boxes_chunk *chunk;
1419 if (source->type != CAIRO_PATTERN_TYPE_SURFACE)
1420 return CAIRO_INT_STATUS_UNSUPPORTED;
1422 pattern = (const cairo_surface_pattern_t *) source;
1423 if (pattern->surface->type != CAIRO_SURFACE_TYPE_IMAGE)
1424 return CAIRO_INT_STATUS_UNSUPPORTED;
1426 if (! _cairo_matrix_is_integer_translation (&source->matrix, &tx, &ty))
1427 return CAIRO_INT_STATUS_UNSUPPORTED;
1429 image = (cairo_image_surface_t *) pattern->surface;
1430 if (source->extend != CAIRO_EXTEND_NONE &&
1431 (extents->x + tx < 0 ||
1432 extents->y + ty < 0 ||
1433 extents->x + tx + extents->width > image->width ||
1434 extents->y + ty + extents->height > image->height))
1436 return CAIRO_INT_STATUS_UNSUPPORTED;
1439 device = i915_device (surface);
1440 bo = to_intel_bo (surface->intel.drm.bo);
1441 if (bo->exec != NULL || ! intel_bo_is_inactive (&device->intel, bo)) {
1443 cairo_bool_t need_clear = FALSE;
1445 if (boxes->num_boxes != 1 ||
1446 extents->width < surface->intel.drm.width ||
1447 extents->height < surface->intel.drm.height)
1449 if (! surface->intel.drm.base.is_clear)
1450 return CAIRO_INT_STATUS_UNSUPPORTED;
1455 new_bo = intel_bo_create (&device->intel,
1456 bo->full_size, bo->base.size,
1457 FALSE, bo->tiling, bo->stride);
1458 if (unlikely (new_bo == NULL))
1459 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1461 intel_bo_in_flight_add (&device->intel, bo);
1462 intel_bo_destroy (&device->intel, bo);
1465 surface->intel.drm.bo = &bo->base;
1468 memset (intel_bo_map (&device->intel, bo), 0,
1469 bo->stride * surface->intel.drm.height);
1473 if (image->format == surface->intel.drm.format) {
1474 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1475 cairo_box_t *box = chunk->base;
1476 for (i = 0; i < chunk->count; i++) {
1477 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1478 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1479 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1480 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1481 cairo_status_t status;
1485 if (x2 + tx > image->width)
1486 x2 = image->width - tx;
1490 if (y2 + ty > image->height)
1491 y2 = image->height - ty;
1493 if (x2 <= x1 || y2 <= y1)
1495 if (x2 < 0 || y2 < 0)
1497 if (x1 >= surface->intel.drm.width || y2 >= surface->intel.drm.height)
1500 status = intel_bo_put_image (&device->intel,
1506 if (unlikely (status))
1511 pixman_image_t *dst;
1514 ptr = intel_bo_map (&device->intel, bo);
1515 if (unlikely (ptr == NULL))
1516 return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
1518 dst = pixman_image_create_bits (_cairo_format_to_pixman_format_code (surface->intel.drm.format),
1519 surface->intel.drm.width,
1520 surface->intel.drm.height,
1522 surface->intel.drm.stride);
1523 if (unlikely (dst == NULL))
1524 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1526 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1527 cairo_box_t *box = chunk->base;
1528 for (i = 0; i < chunk->count; i++) {
1529 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1530 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1531 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1532 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1536 if (x2 + tx > image->width)
1537 x2 = image->width - tx;
1541 if (y2 + ty > image->height)
1542 y2 = image->height - ty;
1544 if (x2 <= x1 || y2 <= y1)
1546 if (x2 < 0 || y2 < 0)
1548 if (x1 >= surface->intel.drm.width || y2 >= surface->intel.drm.height)
1551 pixman_image_composite32 (PIXMAN_OP_SRC,
1552 image->pixman_image, NULL, dst,
1560 pixman_image_unref (dst);
1563 return CAIRO_STATUS_SUCCESS;
1566 static cairo_status_t
1567 _composite_boxes (i915_surface_t *dst,
1568 cairo_operator_t op,
1569 const cairo_pattern_t *pattern,
1570 cairo_boxes_t *boxes,
1571 cairo_antialias_t antialias,
1574 const cairo_composite_rectangles_t *extents)
1576 cairo_bool_t need_clip_surface = FALSE;
1577 cairo_region_t *clip_region = NULL;
1578 const struct _cairo_boxes_chunk *chunk;
1579 cairo_status_t status;
1580 i915_shader_t shader;
1581 i915_device_t *device;
1584 /* If the boxes are not pixel-aligned, we will need to compute a real mask */
1585 if (antialias != CAIRO_ANTIALIAS_NONE) {
1586 if (! boxes->is_pixel_aligned)
1587 return CAIRO_INT_STATUS_UNSUPPORTED;
1590 if (clip == NULL && op == CAIRO_OPERATOR_SOURCE && opacity == 1.) {
1591 if (pattern->type == CAIRO_PATTERN_TYPE_SURFACE) {
1592 status = i915_blt_boxes (dst, pattern, &extents->bounded, boxes);
1593 if (status == CAIRO_INT_STATUS_UNSUPPORTED) {
1594 status = _upload_image_inplace (dst, pattern,
1595 &extents->bounded, boxes);
1597 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1602 if (i915_surface_needs_tiling (dst)) {
1604 return CAIRO_INT_STATUS_UNSUPPORTED;
1607 i915_shader_init (&shader, dst, op, opacity);
1609 status = i915_shader_acquire_pattern (&shader,
1613 if (unlikely (status))
1617 status = _cairo_clip_get_region (clip, &clip_region);
1618 assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
1619 need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
1620 if (need_clip_surface)
1621 i915_shader_set_clip (&shader, clip);
1624 device = i915_device (dst);
1625 status = cairo_device_acquire (&device->intel.base.base);
1626 if (unlikely (status))
1629 status = i915_shader_commit (&shader, device);
1630 if (unlikely (status))
1633 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1634 cairo_box_t *box = chunk->base;
1635 for (i = 0; i < chunk->count; i++) {
1636 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1637 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1638 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1639 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1641 if (x2 > x1 && y2 > y1)
1642 shader.add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
1646 if (! extents->is_bounded)
1647 status = i915_fixup_unbounded_boxes (dst, extents, clip, boxes);
1650 cairo_device_release (&device->intel.base.base);
1652 i915_shader_fini (&shader);
1658 i915_surface_clear (i915_surface_t *dst)
1660 i915_device_t *device;
1661 cairo_status_t status;
1662 intel_bo_t *bo_array[1] = { to_intel_bo (dst->intel.drm.bo) };
1664 device = i915_device (dst);
1665 status = cairo_device_acquire (&device->intel.base.base);
1666 if (unlikely (status))
1669 if (i915_surface_needs_tiling (dst)) {
1670 int cmd, br13, clear = 0;
1672 if (! i915_check_aperture_and_fences (device, bo_array, 1) ||
1673 i915_batch_space (device) < 6)
1675 status = i915_batch_flush (device);
1676 if (unlikely (status)) {
1677 cairo_device_release (&device->intel.base.base);
1682 if (device->vertex_count)
1683 i915_vbo_flush (device);
1685 cmd = XY_COLOR_BLT_CMD;
1686 br13 = (0xCC << 16) | dst->intel.drm.stride;
1687 switch (dst->intel.drm.format) {
1689 case CAIRO_FORMAT_INVALID:
1690 case CAIRO_FORMAT_A1:
1692 case CAIRO_FORMAT_A8:
1694 case CAIRO_FORMAT_RGB16_565:
1697 case CAIRO_FORMAT_RGB24:
1699 case CAIRO_FORMAT_ARGB32:
1701 cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1708 OUT_DWORD (((dst->intel.drm.height - 1) << 16) |
1709 (dst->intel.drm.width - 1));
1710 OUT_RELOC_FENCED (dst,
1711 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1714 if (! i915_check_aperture (device, bo_array, 1) ||
1715 i915_batch_space (device) < 24)
1717 status = i915_batch_flush (device);
1718 if (unlikely (status)) {
1719 cairo_device_release (&device->intel.base.base);
1724 if (device->vertex_count)
1725 i915_vbo_flush (device);
1727 i915_set_dst (device, dst);
1729 /* set clear parameters */
1730 if (device->clear_alpha != (dst->intel.drm.base.content & CAIRO_CONTENT_ALPHA)) {
1731 device->clear_alpha = dst->intel.drm.base.content & CAIRO_CONTENT_ALPHA;
1732 OUT_DWORD (_3DSTATE_CLEAR_PARAMETERS);
1733 OUT_DWORD (CLEARPARAM_CLEAR_RECT | CLEARPARAM_WRITE_COLOR);
1734 /* ZONE_INIT color */
1735 if (device->clear_alpha) /* XXX depends on pixel format, 16bit needs replication, 8bit? */
1736 OUT_DWORD (0x00000000);
1738 OUT_DWORD (0xff000000);
1739 OUT_DWORD (0); /* ZONE_INIT depth */
1740 /* CLEAR_RECT color */
1741 if (device->clear_alpha)
1742 OUT_DWORD (0x00000000);
1744 OUT_DWORD (0xff000000);
1745 OUT_DWORD (0); /* CLEAR_RECT depth */
1746 OUT_DWORD (0); /* CLEAR_RECT stencil */
1749 OUT_DWORD (PRIM3D_CLEAR_RECT | 5);
1750 OUT_DWORD (pack_float (dst->intel.drm.width));
1751 OUT_DWORD (pack_float (dst->intel.drm.height));
1753 OUT_DWORD (pack_float (dst->intel.drm.height));
1758 cairo_device_release (&device->intel.base.base);
1760 dst->deferred_clear = FALSE;
1764 static cairo_status_t
1765 _clip_and_composite_boxes (i915_surface_t *dst,
1766 cairo_operator_t op,
1767 const cairo_pattern_t *src,
1768 cairo_boxes_t *boxes,
1769 cairo_antialias_t antialias,
1770 const cairo_composite_rectangles_t *extents,
1774 cairo_status_t status;
1776 if (boxes->num_boxes == 0) {
1777 if (extents->is_bounded)
1778 return CAIRO_STATUS_SUCCESS;
1780 return i915_fixup_unbounded (dst, extents, clip);
1784 (op == CAIRO_OPERATOR_SOURCE || (op == CAIRO_OPERATOR_OVER && dst->intel.drm.base.is_clear)) &&
1786 boxes->num_boxes == 1 &&
1787 extents->bounded.width == dst->intel.drm.width &&
1788 extents->bounded.height == dst->intel.drm.height)
1790 op = CAIRO_OPERATOR_SOURCE;
1791 dst->deferred_clear = FALSE;
1793 status = _upload_image_inplace (dst, src,
1794 &extents->bounded, boxes);
1795 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1799 if (dst->deferred_clear) {
1800 status = i915_surface_clear (dst);
1801 if (unlikely (status))
1805 /* Use a fast path if the boxes are pixel aligned */
1806 status = _composite_boxes (dst, op, src, boxes, antialias, clip, opacity, extents);
1807 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1810 /* Otherwise render the boxes via an implicit mask and composite in the usual
1813 return i915_clip_and_composite_spans (dst, op, src, antialias,
1814 _composite_boxes_spans, boxes,
1815 extents, clip, opacity);
1818 static cairo_clip_path_t *
1819 _clip_get_solitary_path (cairo_clip_t *clip)
1821 cairo_clip_path_t *iter = clip->path;
1822 cairo_clip_path_t *path = NULL;
1825 if ((iter->flags & CAIRO_CLIP_PATH_IS_BOX) == 0) {
1832 } while (iter != NULL);
1838 cairo_polygon_t polygon;
1839 cairo_fill_rule_t fill_rule;
1840 cairo_antialias_t antialias;
1841 } composite_polygon_info_t;
1843 static cairo_status_t
1844 _composite_polygon_spans (void *closure,
1845 cairo_span_renderer_t *renderer,
1846 const cairo_rectangle_int_t *extents)
1848 composite_polygon_info_t *info = closure;
1849 cairo_botor_scan_converter_t converter;
1850 cairo_status_t status;
1853 box.p1.x = _cairo_fixed_from_int (extents->x);
1854 box.p1.y = _cairo_fixed_from_int (extents->y);
1855 box.p2.x = _cairo_fixed_from_int (extents->x + extents->width);
1856 box.p2.y = _cairo_fixed_from_int (extents->y + extents->height);
1858 _cairo_botor_scan_converter_init (&converter, &box, info->fill_rule);
1860 status = converter.base.add_polygon (&converter.base, &info->polygon);
1861 if (likely (status == CAIRO_STATUS_SUCCESS))
1862 status = converter.base.generate (&converter.base, renderer);
1864 converter.base.destroy (&converter.base);
1869 static cairo_int_status_t
1870 i915_surface_fill_with_alpha (void *abstract_dst,
1871 cairo_operator_t op,
1872 const cairo_pattern_t *source,
1873 cairo_path_fixed_t *path,
1874 cairo_fill_rule_t fill_rule,
1876 cairo_antialias_t antialias,
1880 i915_surface_t *dst = abstract_dst;
1881 cairo_composite_rectangles_t extents;
1882 composite_polygon_info_t info;
1883 cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
1884 cairo_clip_t local_clip;
1885 cairo_bool_t have_clip = FALSE;
1886 int num_boxes = ARRAY_LENGTH (boxes_stack);
1887 cairo_status_t status;
1889 status = _cairo_composite_rectangles_init_for_fill (&extents,
1890 dst->intel.drm.width,
1891 dst->intel.drm.height,
1894 if (unlikely (status))
1897 if (_cairo_clip_contains_extents (clip, &extents))
1900 if (extents.is_bounded && clip != NULL) {
1901 cairo_clip_path_t *clip_path;
1903 if (((clip_path = _clip_get_solitary_path (clip)) != NULL) &&
1904 _cairo_path_fixed_equal (&clip_path->path, path))
1911 clip = _cairo_clip_init_copy (&local_clip, clip);
1915 status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1916 if (unlikely (status)) {
1918 _cairo_clip_fini (&local_clip);
1923 assert (! _cairo_path_fixed_fill_is_empty (path));
1925 if (_cairo_path_fixed_fill_is_rectilinear (path)) {
1926 cairo_boxes_t boxes;
1928 _cairo_boxes_init (&boxes);
1929 _cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
1930 status = _cairo_path_fixed_fill_rectilinear_to_boxes (path,
1933 if (likely (status == CAIRO_STATUS_SUCCESS)) {
1934 status = _clip_and_composite_boxes (dst, op, source,
1940 _cairo_boxes_fini (&boxes);
1942 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1946 _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
1948 status = _cairo_path_fixed_fill_to_polygon (path, tolerance, &info.polygon);
1949 if (unlikely (status))
1950 goto CLEANUP_POLYGON;
1952 if (extents.is_bounded) {
1953 cairo_rectangle_int_t rect;
1955 _cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
1956 if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
1957 goto CLEANUP_POLYGON;
1960 if (info.polygon.num_edges == 0) {
1961 if (! extents.is_bounded)
1962 status = i915_fixup_unbounded (dst, &extents, clip);
1964 goto CLEANUP_POLYGON;
1967 info.fill_rule = fill_rule;
1968 info.antialias = antialias;
1969 status = i915_clip_and_composite_spans (dst, op, source, antialias,
1970 _composite_polygon_spans, &info,
1971 &extents, clip, opacity);
1974 _cairo_polygon_fini (&info.polygon);
1977 if (clip_boxes != boxes_stack)
1981 _cairo_clip_fini (&local_clip);
1986 static cairo_int_status_t
1987 i915_surface_paint_with_alpha (void *abstract_dst,
1988 cairo_operator_t op,
1989 const cairo_pattern_t *source,
1993 i915_surface_t *dst = abstract_dst;
1994 cairo_composite_rectangles_t extents;
1995 cairo_clip_t local_clip;
1996 cairo_bool_t have_clip = FALSE;
1997 cairo_clip_path_t *clip_path;
1998 cairo_boxes_t boxes;
1999 int num_boxes = ARRAY_LENGTH (boxes.boxes_embedded);
2000 cairo_box_t *clip_boxes = boxes.boxes_embedded;
2001 cairo_status_t status;
2003 status = _cairo_composite_rectangles_init_for_paint (&extents,
2004 dst->intel.drm.width,
2005 dst->intel.drm.height,
2008 if (unlikely (status))
2011 if (_cairo_clip_contains_extents (clip, &extents))
2015 clip = _cairo_clip_init_copy (&local_clip, clip);
2019 status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
2020 if (unlikely (status)) {
2022 _cairo_clip_fini (&local_clip);
2027 /* If the clip cannot be reduced to a set of boxes, we will need to
2028 * use a clipmask. Paint is special as it is the only operation that
2029 * does not implicitly use a mask, so we may be able to reduce this
2030 * operation to a fill...
2033 extents.is_bounded &&
2034 (clip_path = _clip_get_solitary_path (clip)) != NULL)
2036 status = i915_surface_fill_with_alpha (dst, op, source,
2038 clip_path->fill_rule,
2039 clip_path->tolerance,
2040 clip_path->antialias,
2045 _cairo_boxes_init_for_array (&boxes, clip_boxes, num_boxes);
2046 status = _clip_and_composite_boxes (dst, op, source,
2047 &boxes, CAIRO_ANTIALIAS_DEFAULT,
2048 &extents, clip, opacity);
2050 if (clip_boxes != boxes.boxes_embedded)
2054 _cairo_clip_fini (&local_clip);
2059 static cairo_int_status_t
2060 i915_surface_paint (void *abstract_dst,
2061 cairo_operator_t op,
2062 const cairo_pattern_t *source,
2065 i915_surface_t *dst = abstract_dst;
2067 /* XXX unsupported operators? use pixel shader blending, eventually */
2069 if (op == CAIRO_OPERATOR_CLEAR && clip == NULL) {
2070 dst->deferred_clear = TRUE;
2071 return CAIRO_STATUS_SUCCESS;
2074 return i915_surface_paint_with_alpha (dst, op, source, clip, 1.);
2077 static cairo_int_status_t
2078 i915_surface_mask (void *abstract_dst,
2079 cairo_operator_t op,
2080 const cairo_pattern_t *source,
2081 const cairo_pattern_t *mask,
2084 i915_surface_t *dst = abstract_dst;
2085 i915_device_t *device;
2086 cairo_composite_rectangles_t extents;
2087 i915_shader_t shader;
2088 cairo_clip_t local_clip;
2089 cairo_region_t *clip_region = NULL;
2090 cairo_bool_t need_clip_surface = FALSE;
2091 cairo_bool_t have_clip = FALSE;
2092 cairo_status_t status;
2094 if (mask->type == CAIRO_PATTERN_TYPE_SOLID) {
2095 const cairo_solid_pattern_t *solid = (cairo_solid_pattern_t *) mask;
2096 return i915_surface_paint_with_alpha (dst, op, source, clip, solid->color.alpha);
2099 status = _cairo_composite_rectangles_init_for_mask (&extents,
2100 dst->intel.drm.width,
2101 dst->intel.drm.height,
2102 op, source, mask, clip);
2103 if (unlikely (status))
2106 if (_cairo_clip_contains_extents (clip, &extents))
2109 if (clip != NULL && extents.is_bounded) {
2110 clip = _cairo_clip_init_copy (&local_clip, clip);
2111 status = _cairo_clip_rectangle (clip, &extents.bounded);
2112 if (unlikely (status)) {
2113 _cairo_clip_fini (&local_clip);
2120 i915_shader_init (&shader, dst, op, 1.);
2122 status = i915_shader_acquire_pattern (&shader,
2126 if (unlikely (status))
2129 status = i915_shader_acquire_pattern (&shader,
2133 if (unlikely (status))
2137 status = _cairo_clip_get_region (clip, &clip_region);
2138 if (unlikely (_cairo_status_is_error (status) ||
2139 status == CAIRO_INT_STATUS_NOTHING_TO_DO))
2144 need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
2145 if (need_clip_surface)
2146 i915_shader_set_clip (&shader, clip);
2148 if (clip_region != NULL) {
2149 cairo_rectangle_int_t rect;
2150 cairo_bool_t is_empty;
2152 status = CAIRO_STATUS_SUCCESS;
2153 cairo_region_get_extents (clip_region, &rect);
2154 is_empty = ! _cairo_rectangle_intersect (&extents.unbounded, &rect);
2155 if (unlikely (is_empty))
2158 is_empty = ! _cairo_rectangle_intersect (&extents.bounded, &rect);
2159 if (unlikely (is_empty && extents.is_bounded))
2162 if (cairo_region_num_rectangles (clip_region) == 1)
2167 if (i915_surface_needs_tiling (dst)) {
2169 return CAIRO_INT_STATUS_UNSUPPORTED;
2172 device = i915_device (dst);
2173 status = cairo_device_acquire (&device->intel.base.base);
2174 if (unlikely (status))
2177 if (dst->deferred_clear) {
2178 status = i915_surface_clear (dst);
2179 if (unlikely (status))
2183 status = i915_shader_commit (&shader, device);
2184 if (unlikely (status))
2187 if (clip_region != NULL) {
2188 unsigned int n, num_rectangles;
2190 num_rectangles = cairo_region_num_rectangles (clip_region);
2191 for (n = 0; n < num_rectangles; n++) {
2192 cairo_rectangle_int_t rect;
2194 cairo_region_get_rectangle (clip_region, n, &rect);
2196 shader.add_rectangle (&shader,
2198 rect.x + rect.width, rect.y + rect.height);
2201 shader.add_rectangle (&shader,
2202 extents.bounded.x, extents.bounded.y,
2203 extents.bounded.x + extents.bounded.width,
2204 extents.bounded.y + extents.bounded.height);
2207 if (! extents.is_bounded)
2208 status = i915_fixup_unbounded (dst, &extents, clip);
2211 cairo_device_release (&device->intel.base.base);
2213 i915_shader_fini (&shader);
2215 _cairo_clip_fini (&local_clip);
2220 static cairo_int_status_t
2221 i915_surface_stroke (void *abstract_dst,
2222 cairo_operator_t op,
2223 const cairo_pattern_t *source,
2224 cairo_path_fixed_t *path,
2225 const cairo_stroke_style_t *stroke_style,
2226 const cairo_matrix_t *ctm,
2227 const cairo_matrix_t *ctm_inverse,
2229 cairo_antialias_t antialias,
2232 i915_surface_t *dst = abstract_dst;
2233 cairo_composite_rectangles_t extents;
2234 composite_polygon_info_t info;
2235 cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
2236 int num_boxes = ARRAY_LENGTH (boxes_stack);
2237 cairo_clip_t local_clip;
2238 cairo_bool_t have_clip = FALSE;
2239 cairo_status_t status;
2241 status = _cairo_composite_rectangles_init_for_stroke (&extents,
2242 dst->intel.drm.width,
2243 dst->intel.drm.height,
2245 path, stroke_style, ctm,
2247 if (unlikely (status))
2250 if (_cairo_clip_contains_extents (clip, &extents))
2254 clip = _cairo_clip_init_copy (&local_clip, clip);
2258 status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
2259 if (unlikely (status)) {
2261 _cairo_clip_fini (&local_clip);
2266 if (_cairo_path_fixed_stroke_is_rectilinear (path)) {
2267 cairo_boxes_t boxes;
2269 _cairo_boxes_init (&boxes);
2270 _cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
2271 status = _cairo_path_fixed_stroke_rectilinear_to_boxes (path,
2275 if (likely (status == CAIRO_STATUS_SUCCESS)) {
2276 status = _clip_and_composite_boxes (dst, op, source,
2278 &extents, clip, 1.);
2281 _cairo_boxes_fini (&boxes);
2283 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
2287 _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
2289 status = _cairo_path_fixed_stroke_to_polygon (path,
2294 if (unlikely (status))
2295 goto CLEANUP_POLYGON;
2297 if (extents.is_bounded) {
2298 cairo_rectangle_int_t rect;
2300 _cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
2301 if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
2302 goto CLEANUP_POLYGON;
2305 if (info.polygon.num_edges == 0) {
2306 if (! extents.is_bounded)
2307 status = i915_fixup_unbounded (dst, &extents, clip);
2309 goto CLEANUP_POLYGON;
2312 info.fill_rule = CAIRO_FILL_RULE_WINDING;
2313 info.antialias = antialias;
2314 status = i915_clip_and_composite_spans (dst, op, source, antialias,
2315 _composite_polygon_spans, &info,
2316 &extents, clip, 1.);
2319 _cairo_polygon_fini (&info.polygon);
2322 if (clip_boxes != boxes_stack)
2326 _cairo_clip_fini (&local_clip);
2331 static cairo_int_status_t
2332 i915_surface_fill (void *abstract_dst,
2333 cairo_operator_t op,
2334 const cairo_pattern_t*source,
2335 cairo_path_fixed_t *path,
2336 cairo_fill_rule_t fill_rule,
2338 cairo_antialias_t antialias,
2341 return i915_surface_fill_with_alpha (abstract_dst, op, source, path, fill_rule, tolerance, antialias, clip, 1.);
2344 static const cairo_surface_backend_t i915_surface_backend = {
2345 CAIRO_SURFACE_TYPE_DRM,
2346 _cairo_default_context_create,
2348 i915_surface_create_similar,
2349 i915_surface_finish,
2352 intel_surface_acquire_source_image,
2353 intel_surface_release_source_image,
2356 NULL, /* composite */
2358 NULL, /* trapezoids */
2360 NULL, /* check-span */
2362 NULL, /* copy_page */
2363 NULL, /* show_page */
2364 _cairo_drm_surface_get_extents,
2365 NULL, /* old-glyphs */
2366 _cairo_drm_surface_get_font_options,
2369 NULL, /* mark_dirty */
2370 intel_scaled_font_fini,
2371 intel_scaled_glyph_fini,
2375 i915_surface_stroke,
2377 i915_surface_glyphs,
2381 i915_surface_init (i915_surface_t *surface,
2382 cairo_drm_device_t *device,
2383 cairo_format_t format,
2384 int width, int height)
2386 intel_surface_init (&surface->intel, &i915_surface_backend, device,
2387 format, width, height);
2391 case CAIRO_FORMAT_INVALID:
2392 case CAIRO_FORMAT_A1:
2394 case CAIRO_FORMAT_ARGB32:
2395 surface->map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
2396 surface->colorbuf = COLR_BUF_ARGB8888 | DEPTH_FRMT_24_FIXED_8_OTHER;
2398 case CAIRO_FORMAT_RGB24:
2399 surface->map0 = MAPSURF_32BIT | MT_32BIT_XRGB8888;
2400 surface->colorbuf = COLR_BUF_ARGB8888 | DEPTH_FRMT_24_FIXED_8_OTHER;
2402 case CAIRO_FORMAT_RGB16_565:
2403 surface->map0 = MAPSURF_16BIT | MT_16BIT_RGB565;
2404 surface->colorbuf = COLR_BUF_RGB565;
2406 case CAIRO_FORMAT_A8:
2407 surface->map0 = MAPSURF_8BIT | MT_8BIT_A8;
2408 surface->colorbuf = COLR_BUF_8BIT | DEPTH_FRMT_24_FIXED_8_OTHER;
2411 surface->colorbuf |= DSTORG_HORT_BIAS (0x8) | DSTORG_VERT_BIAS (0x8);
2412 surface->map0 |= ((height - 1) << MS3_HEIGHT_SHIFT) |
2413 ((width - 1) << MS3_WIDTH_SHIFT);
2416 surface->is_current_texture = 0;
2417 surface->deferred_clear = FALSE;
2419 surface->offset = 0;
2421 surface->stencil = NULL;
2422 surface->cache = NULL;
2426 i915_surface_create_internal (cairo_drm_device_t *base_dev,
2427 cairo_format_t format,
2428 int width, int height,
2430 cairo_bool_t gpu_target)
2432 i915_surface_t *surface;
2433 cairo_status_t status_ignored;
2435 surface = malloc (sizeof (i915_surface_t));
2436 if (unlikely (surface == NULL))
2437 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2439 i915_surface_init (surface, base_dev, format, width, height);
2441 if (width && height) {
2442 uint32_t size, stride;
2445 width = (width + 3) & -4;
2446 stride = cairo_format_stride_for_width (surface->intel.drm.format, width);
2447 /* check for tiny surfaces for which tiling is irrelevant */
2448 if (height * stride <= 4096)
2449 tiling = I915_TILING_NONE;
2450 if (tiling != I915_TILING_NONE && stride <= 512)
2451 tiling = I915_TILING_NONE;
2452 if (tiling != I915_TILING_NONE) {
2454 tiling = I915_TILING_NONE;
2455 else if (height <= 16)
2456 tiling = I915_TILING_X;
2458 /* large surfaces we need to blt, so force TILING_X */
2460 tiling = I915_TILING_X;
2461 /* but there is a maximum limit to the tiling pitch */
2462 if (tiling != I915_TILING_NONE && stride > 8192)
2463 tiling = I915_TILING_NONE;
2465 stride = i915_tiling_stride (tiling, stride);
2466 assert (stride >= (uint32_t) cairo_format_stride_for_width (surface->intel.drm.format, width));
2467 assert (tiling == I915_TILING_NONE || stride <= 8192);
2468 height = i915_tiling_height (tiling, height);
2469 if (height > 64*1024) {
2471 cairo_device_destroy (&base_dev->base);
2472 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_SIZE));
2475 size = stride * height;
2476 bo = intel_bo_create (to_intel_device (&base_dev->base),
2477 i915_tiling_size (tiling, size), size,
2478 gpu_target, tiling, stride);
2480 status_ignored = _cairo_drm_surface_finish (&surface->intel.drm);
2482 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2484 assert (bo->base.size >= size);
2486 surface->intel.drm.bo = &bo->base;
2487 surface->intel.drm.stride = stride;
2489 surface->map0 |= MS3_tiling (tiling);
2490 surface->map1 = (stride/4 - 1) << MS4_PITCH_SHIFT;
2493 return &surface->intel.drm.base;
2496 static cairo_surface_t *
2497 i915_surface_create (cairo_drm_device_t *base_dev,
2498 cairo_format_t format,
2499 int width, int height)
2502 case CAIRO_FORMAT_ARGB32:
2503 case CAIRO_FORMAT_RGB16_565:
2504 case CAIRO_FORMAT_RGB24:
2505 case CAIRO_FORMAT_A8:
2507 case CAIRO_FORMAT_INVALID:
2509 case CAIRO_FORMAT_A1:
2510 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
2513 return i915_surface_create_internal (base_dev, format, width, height,
2514 I915_TILING_DEFAULT, TRUE);
2517 static cairo_surface_t *
2518 i915_surface_create_for_name (cairo_drm_device_t *base_dev,
2520 cairo_format_t format,
2521 int width, int height, int stride)
2523 i915_surface_t *surface;
2525 /* Vol I, p134: size restrictions for textures */
2526 /* Vol I, p129: destination surface stride must be a multiple of 32 bytes */
2527 if (stride < cairo_format_stride_for_width (format, (width + 3) & -4) ||
2530 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_STRIDE));
2535 case CAIRO_FORMAT_INVALID:
2536 case CAIRO_FORMAT_A1:
2537 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
2538 case CAIRO_FORMAT_ARGB32:
2539 case CAIRO_FORMAT_RGB16_565:
2540 case CAIRO_FORMAT_RGB24:
2541 case CAIRO_FORMAT_A8:
2545 surface = malloc (sizeof (i915_surface_t));
2546 if (unlikely (surface == NULL))
2547 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2549 i915_surface_init (surface, base_dev, format, width, height);
2551 if (width && height) {
2552 surface->intel.drm.stride = stride;
2553 surface->map1 = (surface->intel.drm.stride/4 - 1) << MS4_PITCH_SHIFT;
2555 surface->intel.drm.bo =
2556 &intel_bo_create_for_name (to_intel_device (&base_dev->base),
2558 if (unlikely (surface->intel.drm.bo == NULL)) {
2560 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2562 to_intel_bo (surface->intel.drm.bo)->stride = stride;
2564 surface->map0 |= MS3_tiling (to_intel_bo (surface->intel.drm.bo)->tiling);
2567 return &surface->intel.drm.base;
2570 static cairo_status_t
2571 i915_buffer_cache_init (intel_buffer_cache_t *cache,
2572 i915_device_t *device,
2573 cairo_format_t format,
2574 int width, int height)
2576 const uint32_t tiling = I915_TILING_DEFAULT;
2577 uint32_t stride, size;
2579 assert ((width & 3) == 0);
2580 assert ((height & 1) == 0);
2581 cache->buffer.width = width;
2582 cache->buffer.height = height;
2585 case CAIRO_FORMAT_INVALID:
2586 case CAIRO_FORMAT_A1:
2587 case CAIRO_FORMAT_RGB24:
2588 case CAIRO_FORMAT_RGB16_565:
2590 case CAIRO_FORMAT_ARGB32:
2591 cache->buffer.map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
2594 case CAIRO_FORMAT_A8:
2595 cache->buffer.map0 = MAPSURF_8BIT | MT_8BIT_I8;
2599 assert ((stride & 7) == 0);
2600 assert (i915_tiling_stride (tiling, stride) == stride);
2601 assert (i915_tiling_height (tiling, height) == height);
2603 size = height * stride;
2604 assert (i915_tiling_size (tiling, size) == size);
2605 cache->buffer.bo = intel_bo_create (&device->intel, size, size, FALSE, tiling, stride);
2606 if (unlikely (cache->buffer.bo == NULL))
2607 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
2609 cache->buffer.stride = cache->buffer.bo->stride;
2611 cache->buffer.map0 |= ((height - 1) << MS3_HEIGHT_SHIFT) |
2612 ((width - 1) << MS3_WIDTH_SHIFT);
2613 cache->buffer.map0 |= MS3_tiling (tiling);
2614 cache->buffer.map1 = ((stride / 4) - 1) << MS4_PITCH_SHIFT;
2616 cache->ref_count = 0;
2617 cairo_list_init (&cache->link);
2619 return CAIRO_STATUS_SUCCESS;
2623 i915_surface_create_from_cacheable_image_internal (i915_device_t *device,
2624 cairo_image_surface_t *image)
2626 i915_surface_t *surface;
2627 cairo_status_t status;
2628 cairo_list_t *caches;
2629 intel_buffer_cache_t *cache;
2630 cairo_rtree_node_t *node;
2631 cairo_format_t format;
2632 int width, height, bpp;
2634 format = image->format;
2635 if (format == CAIRO_FORMAT_A1)
2636 format = CAIRO_FORMAT_A8;
2638 width = image->width;
2639 height = image->height;
2640 if (width > IMAGE_CACHE_WIDTH/2 || height > IMAGE_CACHE_HEIGHT/2) {
2641 surface = (i915_surface_t *)
2642 i915_surface_create_internal (&device->intel.base,
2645 I915_TILING_NONE, FALSE);
2646 if (unlikely (surface->intel.drm.base.status))
2649 status = intel_bo_put_image (&device->intel,
2650 to_intel_bo (surface->intel.drm.bo),
2656 if (unlikely (status)) {
2657 cairo_surface_destroy (&surface->intel.drm.base);
2658 return (i915_surface_t *) _cairo_surface_create_in_error (status);
2664 status = cairo_device_acquire (&device->intel.base.base);
2665 if (unlikely (status))
2666 return (i915_surface_t *) _cairo_surface_create_in_error (status);
2668 switch (image->format) {
2669 case CAIRO_FORMAT_ARGB32:
2670 case CAIRO_FORMAT_RGB24:
2671 case CAIRO_FORMAT_RGB16_565:
2672 caches = &device->image_caches[0];
2673 format = CAIRO_FORMAT_ARGB32;
2676 case CAIRO_FORMAT_A8:
2677 case CAIRO_FORMAT_A1:
2678 caches = &device->image_caches[1];
2679 format = CAIRO_FORMAT_A8;
2682 case CAIRO_FORMAT_INVALID:
2685 status = _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
2686 goto CLEANUP_DEVICE;
2690 cairo_list_foreach_entry (cache, intel_buffer_cache_t, caches, link) {
2691 if (! intel_bo_is_inactive (&device->intel, cache->buffer.bo))
2694 status = _cairo_rtree_insert (&cache->rtree, width, height, &node);
2695 if (unlikely (_cairo_status_is_error (status)))
2696 goto CLEANUP_DEVICE;
2697 if (status == CAIRO_STATUS_SUCCESS)
2701 cache = malloc (sizeof (intel_buffer_cache_t));
2702 if (unlikely (cache == NULL)) {
2703 status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
2704 goto CLEANUP_DEVICE;
2707 status = i915_buffer_cache_init (cache, device, format,
2709 IMAGE_CACHE_HEIGHT);
2710 if (unlikely (status)) {
2712 goto CLEANUP_DEVICE;
2715 _cairo_rtree_init (&cache->rtree,
2719 sizeof (i915_image_private_t));
2721 status = _cairo_rtree_insert (&cache->rtree, width, height, &node);
2722 assert (status == CAIRO_STATUS_SUCCESS);
2724 cairo_list_init (&cache->link);
2726 cairo_list_move (&cache->link, caches);
2727 ((i915_image_private_t *) node)->container = cache;
2729 status = intel_bo_put_image (&device->intel,
2735 if (unlikely (status))
2738 surface = malloc (sizeof (i915_surface_t));
2739 if (unlikely (surface == NULL)) {
2740 status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
2744 i915_surface_init (surface, &device->intel.base,
2745 format, width, height);
2747 surface->intel.drm.stride = cache->buffer.stride;
2749 surface->map0 |= MS3_tiling (cache->buffer.bo->tiling);
2750 surface->map1 = (surface->intel.drm.stride/4 - 1) << MS4_PITCH_SHIFT;
2752 surface->intel.drm.bo = &intel_bo_reference (cache->buffer.bo)->base;
2753 surface->offset = node->y * cache->buffer.stride + bpp * node->x;
2755 surface->cache = (i915_image_private_t *) node;
2758 cairo_device_release (&device->intel.base.base);
2763 _cairo_rtree_node_destroy (&cache->rtree, node);
2764 if (cache->ref_count == 0) {
2765 intel_bo_destroy (&device->intel, cache->buffer.bo);
2766 _cairo_rtree_fini (&cache->rtree);
2767 cairo_list_del (&cache->link);
2771 cairo_device_release (&device->intel.base.base);
2772 return (i915_surface_t *) _cairo_surface_create_in_error (status);
2775 static cairo_surface_t *
2776 i915_surface_create_from_cacheable_image (cairo_drm_device_t *device,
2777 cairo_surface_t *source)
2779 i915_surface_t *surface;
2780 cairo_image_surface_t *image;
2782 cairo_status_t status;
2784 status = _cairo_surface_acquire_source_image (source, &image, &image_extra);
2785 if (unlikely (status))
2786 return _cairo_surface_create_in_error (status);
2788 surface = i915_surface_create_from_cacheable_image_internal ((i915_device_t *) device, image);
2790 _cairo_surface_release_source_image (source, image, image_extra);
2792 return &surface->intel.drm.base;
2795 static cairo_status_t
2796 i915_surface_enable_scan_out (void *abstract_surface)
2798 i915_surface_t *surface = abstract_surface;
2800 cairo_status_t status;
2802 if (unlikely (surface->intel.drm.bo == NULL))
2803 return _cairo_error (CAIRO_STATUS_INVALID_SIZE);
2805 bo = to_intel_bo (surface->intel.drm.bo);
2806 if (bo->tiling == I915_TILING_Y) {
2807 status = i915_surface_batch_flush (surface);
2808 if (unlikely (status))
2811 bo->tiling = I915_TILING_X;
2812 surface->map0 &= ~MS3_tiling (I915_TILING_Y);
2813 surface->map0 |= MS3_tiling (I915_TILING_X);
2817 return CAIRO_STATUS_SUCCESS;
2820 static cairo_int_status_t
2821 i915_device_flush (cairo_drm_device_t *device)
2823 cairo_status_t status;
2825 if (unlikely (device->base.finished))
2826 return CAIRO_STATUS_SUCCESS;
2828 status = cairo_device_acquire (&device->base);
2829 if (likely (status == CAIRO_STATUS_SUCCESS)) {
2830 status = i915_batch_flush ((i915_device_t *) device);
2831 cairo_device_release (&device->base);
2837 static cairo_int_status_t
2838 i915_device_throttle (cairo_drm_device_t *device)
2840 cairo_status_t status;
2842 status = cairo_device_acquire (&device->base);
2843 if (unlikely (status))
2846 status = i915_batch_flush ((i915_device_t *) device);
2847 intel_throttle ((intel_device_t *) device);
2849 cairo_device_release (&device->base);
2855 i915_device_destroy (void *data)
2857 i915_device_t *device = data;
2859 if (device->last_vbo)
2860 intel_bo_destroy (&device->intel, device->last_vbo);
2862 i915_batch_cleanup (device);
2864 intel_device_fini (&device->intel);
2868 COMPILE_TIME_ASSERT (sizeof (i915_batch_setup) == sizeof (((i915_device_t *)0)->batch_header));
2869 COMPILE_TIME_ASSERT (offsetof (i915_device_t, batch_base) == offsetof (i915_device_t, batch_header) + sizeof (i915_batch_setup));
2871 cairo_drm_device_t *
2872 _cairo_drm_i915_device_create (int fd, dev_t dev_id, int vendor_id, int chip_id)
2874 i915_device_t *device;
2875 cairo_status_t status;
2879 if (! intel_info (fd, >t_size))
2882 device = malloc (sizeof (i915_device_t));
2884 return (cairo_drm_device_t *) _cairo_device_create_in_error (CAIRO_STATUS_NO_MEMORY);
2886 status = intel_device_init (&device->intel, fd);
2887 if (unlikely (status)) {
2889 return (cairo_drm_device_t *) _cairo_device_create_in_error (status);
2893 if (getenv ("CAIRO_DEBUG_DRM") != NULL)
2894 device->debug = I915_DEBUG_SYNC;
2896 n = intel_get (fd, I915_PARAM_NUM_FENCES_AVAIL);
2899 device->batch.fences_avail = n - 2; /* conservative */
2901 device->batch.gtt_avail_size = device->intel.gtt_avail_size / 4;
2902 device->batch.est_gtt_size = I915_BATCH_SIZE;
2903 device->batch.total_gtt_size = I915_BATCH_SIZE;
2904 device->batch.exec_count = 0;
2905 device->batch.reloc_count = 0;
2906 device->batch.used = 0;
2907 device->batch.fences = 0;
2909 memcpy (device->batch_header, i915_batch_setup, sizeof (i915_batch_setup));
2911 device->vbo_offset = 0;
2912 device->vbo_used = 0;
2913 device->vertex_index = 0;
2914 device->vertex_count = 0;
2915 device->last_vbo = NULL;
2917 for (n = 0; n < ARRAY_LENGTH (device->image_caches); n++)
2918 cairo_list_init (&device->image_caches[n]);
2920 device->intel.base.surface.create = i915_surface_create;
2921 device->intel.base.surface.create_for_name = i915_surface_create_for_name;
2922 device->intel.base.surface.create_from_cacheable_image = i915_surface_create_from_cacheable_image;
2924 device->intel.base.surface.flink = _cairo_drm_surface_flink;
2925 device->intel.base.surface.enable_scan_out = i915_surface_enable_scan_out;
2926 device->intel.base.surface.map_to_image = intel_surface_map_to_image;
2928 device->intel.base.device.flush = i915_device_flush;
2929 device->intel.base.device.throttle = i915_device_throttle;
2930 device->intel.base.device.destroy = i915_device_destroy;
2932 device->floats_per_vertex = 0;
2933 device->current_source = NULL;
2934 device->current_mask = NULL;
2935 device->current_clip = NULL;
2937 i915_device_reset (device);
2939 return _cairo_drm_device_init (&device->intel.base,
2940 fd, dev_id, vendor_id, chip_id,