1 /* Cairo - a vector graphics library with display and print output
3 * Copyright © 2009 Chris Wilson
5 * This library is free software; you can redistribute it and/or
6 * modify it either under the terms of the GNU Lesser General Public
7 * License version 2.1 as published by the Free Software Foundation
8 * (the "LGPL") or, at your option, under the terms of the Mozilla
9 * Public License Version 1.1 (the "MPL"). If you do not alter this
10 * notice, a recipient may use your version of this file under either
11 * the MPL or the LGPL.
13 * You should have received a copy of the LGPL along with this library
14 * in the file COPYING-LGPL-2.1; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
16 * You should have received a copy of the MPL along with this library
17 * in the file COPYING-MPL-1.1
19 * The contents of this file are subject to the Mozilla Public License
20 * Version 1.1 (the "License"); you may not use this file except in
21 * compliance with the License. You may obtain a copy of the License at
22 * http://www.mozilla.org/MPL/
24 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
25 * OF ANY KIND, either express or implied. See the LGPL or the MPL for
26 * the specific language governing rights and limitations.
28 * **************************************************************************
29 * This work was initially based upon xf86-video-intel/src/i915_render.c:
30 * Copyright © 2006 Intel Corporation
32 * Permission is hereby granted, free of charge, to any person obtaining a
33 * copy of this software and associated documentation files (the "Software"),
34 * to deal in the Software without restriction, including without limitation
35 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
36 * and/or sell copies of the Software, and to permit persons to whom the
37 * Software is furnished to do so, subject to the following conditions:
39 * The above copyright notice and this permission notice (including the next
40 * paragraph) shall be included in all copies or substantial portions of the
43 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
44 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
45 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
46 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
47 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
48 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
52 * Wang Zhenyu <zhenyu.z.wang@intel.com>
53 * Eric Anholt <eric@anholt.net>
55 * **************************************************************************
56 * and also upon libdrm/intel/intel_bufmgr_gem.c:
57 * Copyright © 2007 Red Hat Inc.
58 * Copyright © 2007 Intel Corporation
59 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
60 * All Rights Reserved.
62 * Permission is hereby granted, free of charge, to any person obtaining a
63 * copy of this software and associated documentation files (the
64 * "Software"), to deal in the Software without restriction, including
65 * without limitation the rights to use, copy, modify, merge, publish,
66 * distribute, sub license, and/or sell copies of the Software, and to
67 * permit persons to whom the Software is furnished to do so, subject to
68 * the following conditions:
70 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
71 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
72 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
73 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
74 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
75 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
76 * USE OR OTHER DEALINGS IN THE SOFTWARE.
78 * The above copyright notice and this permission notice (including the
79 * next paragraph) shall be included in all copies or substantial portions
82 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
83 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
84 * Eric Anholt <eric@anholt.net>
85 * Dave Airlie <airlied@linux.ie>
90 * - Per thread context? Would it actually avoid many locks?
96 #include "cairo-drm-private.h"
97 #include "cairo-drm-ioctl-private.h"
98 #include "cairo-drm-intel-private.h"
99 #include "cairo-drm-intel-command-private.h"
100 #include "cairo-drm-intel-ioctl-private.h"
101 #include "cairo-drm-i915-private.h"
103 #include "cairo-boxes-private.h"
104 #include "cairo-cache-private.h"
105 #include "cairo-composite-rectangles-private.h"
106 #include "cairo-default-context-private.h"
107 #include "cairo-error-private.h"
108 #include "cairo-freelist-private.h"
109 #include "cairo-list-private.h"
110 #include "cairo-path-fixed-private.h"
111 #include "cairo-region-private.h"
112 #include "cairo-surface-offset-private.h"
114 #include <sys/ioctl.h>
115 #include <sys/mman.h>
118 static const uint32_t i915_batch_setup[] = {
119 /* Disable line anti-aliasing */
122 /* Disable independent alpha blend */
123 _3DSTATE_INDEPENDENT_ALPHA_BLEND_CMD |
125 IAB_MODIFY_FUNC | (BLENDFUNC_ADD << IAB_FUNC_SHIFT) |
126 IAB_MODIFY_SRC_FACTOR | (BLENDFACT_ONE << IAB_SRC_FACTOR_SHIFT) |
127 IAB_MODIFY_DST_FACTOR | (BLENDFACT_ZERO << IAB_DST_FACTOR_SHIFT),
129 /* Disable texture crossbar */
130 _3DSTATE_COORD_SET_BINDINGS |
140 _3DSTATE_MODES_4_CMD | ENABLE_LOGIC_OP_FUNC | LOGIC_OP_FUNC (LOGICOP_COPY),
142 _3DSTATE_LOAD_STATE_IMMEDIATE_1 |
150 0, /* Disable texture coordinate wrap-shortest */
151 (1 << S4_POINT_WIDTH_SHIFT) |
155 S4_FLATSHADE_SPECULAR |
159 0, /* Disable stencil buffer */
160 S6_COLOR_WRITE_ENABLE,
162 _3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT,
164 /* disable indirect state */
165 _3DSTATE_LOAD_INDIRECT,
169 static const cairo_surface_backend_t i915_surface_backend;
171 static cairo_surface_t *
172 i915_surface_create_from_cacheable_image (cairo_drm_device_t *base_dev,
173 cairo_surface_t *source);
175 static cairo_status_t
176 i915_bo_exec (i915_device_t *device, intel_bo_t *bo, uint32_t offset)
178 struct drm_i915_gem_execbuffer2 execbuf;
181 /* Add the batch buffer to the validation list. */
182 cnt = device->batch.exec_count;
183 if (cnt > 0 && bo->base.handle == device->batch.exec[cnt-1].handle)
186 i = device->batch.exec_count++;
187 device->batch.exec[i].handle = bo->base.handle;
188 device->batch.exec[i].relocation_count = device->batch.reloc_count;
189 device->batch.exec[i].relocs_ptr = (uintptr_t) device->batch.reloc;
190 device->batch.exec[i].alignment = 0;
191 device->batch.exec[i].offset = 0;
192 device->batch.exec[i].flags = 0;
193 device->batch.exec[i].rsvd1 = 0;
194 device->batch.exec[i].rsvd2 = 0;
196 execbuf.buffers_ptr = (uintptr_t) device->batch.exec;
197 execbuf.buffer_count = device->batch.exec_count;
198 execbuf.batch_start_offset = offset;
199 execbuf.batch_len = (device->batch.used << 2) + sizeof (device->batch_header);
202 execbuf.num_cliprects = 0;
203 execbuf.cliprects_ptr = 0;
209 ret = ioctl (device->intel.base.fd, DRM_IOCTL_I915_GEM_EXECBUFFER2, &execbuf);
210 } while (ret != 0 && errno == EINTR);
212 if (device->debug & I915_DEBUG_SYNC && ret == 0)
213 ret = ! intel_bo_wait (&device->intel, bo);
218 fprintf (stderr, "Batch submission failed: %d\n", errno);
219 fprintf (stderr, " relocation entries: %d/%d\n",
220 device->batch.reloc_count, I915_MAX_RELOCS);
221 fprintf (stderr, " gtt size: (%zd/%zd), (%zd/%zd)\n",
222 device->batch.est_gtt_size, device->batch.gtt_avail_size,
223 device->batch.total_gtt_size, device->intel.gtt_avail_size);
225 fprintf (stderr, " buffers:\n");
226 for (n = 0; n < device->batch.exec_count; n++) {
227 fprintf (stderr, " exec[%d] = %d, %d/%d bytes, gtt = %qx\n",
229 device->batch.exec[n].handle,
230 n == device->batch.exec_count - 1 ? bo->base.size : device->batch.target_bo[n]->base.size,
231 n == device->batch.exec_count - 1 ? bo->full_size : device->batch.target_bo[n]->full_size,
232 device->batch.exec[n].offset);
234 for (n = 0; n < device->batch.reloc_count; n++) {
235 for (m = 0; m < device->batch.exec_count; m++)
236 if (device->batch.exec[m].handle == device->batch.reloc[n].target_handle)
239 fprintf (stderr, " reloc[%d] = %d @ %qx -> %qx + %qx\n", n,
240 device->batch.reloc[n].target_handle,
241 device->batch.reloc[n].offset,
242 (unsigned long long) device->batch.exec[m].offset,
243 (unsigned long long) device->batch.reloc[n].delta);
245 device->batch_base[(device->batch.reloc[n].offset - sizeof (device->batch_header)) / 4] =
246 device->batch.exec[m].offset + device->batch.reloc[n].delta;
249 intel_dump_batchbuffer (device->batch_header,
251 device->intel.base.chip_id);
255 VG (VALGRIND_MAKE_MEM_DEFINED (device->batch.exec, sizeof (device->batch.exec[0]) * i));
257 bo->offset = device->batch.exec[i].offset;
264 intel_bo_t *bo = device->batch.target_bo[cnt];
266 bo->offset = device->batch.exec[cnt].offset;
269 bo->batch_read_domains = 0;
270 bo->batch_write_domain = 0;
271 cairo_list_del (&bo->cache_list);
277 intel_bo_destroy (&device->intel, bo);
279 assert (cairo_list_is_empty (&device->intel.bo_in_flight));
281 device->batch.exec_count = 0;
282 device->batch.reloc_count = 0;
283 device->batch.fences = 0;
285 device->batch.est_gtt_size = I915_BATCH_SIZE;
286 device->batch.total_gtt_size = I915_BATCH_SIZE;
288 return ret == 0 ? CAIRO_STATUS_SUCCESS : _cairo_error (CAIRO_STATUS_NO_MEMORY);
292 i915_batch_add_reloc (i915_device_t *device,
296 uint32_t read_domains,
297 uint32_t write_domain,
298 cairo_bool_t needs_fence)
302 assert (offset < bo->base.size);
304 if (bo->exec == NULL) {
305 device->batch.total_gtt_size += bo->base.size;
308 device->batch.est_gtt_size += bo->base.size;
310 assert (device->batch.exec_count < ARRAY_LENGTH (device->batch.exec));
312 index = device->batch.exec_count++;
313 device->batch.exec[index].handle = bo->base.handle;
314 device->batch.exec[index].relocation_count = 0;
315 device->batch.exec[index].relocs_ptr = 0;
316 device->batch.exec[index].alignment = 0;
317 device->batch.exec[index].offset = 0;
318 device->batch.exec[index].flags = 0;
319 device->batch.exec[index].rsvd1 = 0;
320 device->batch.exec[index].rsvd2 = 0;
322 device->batch.target_bo[index] = intel_bo_reference (bo);
324 bo->exec = &device->batch.exec[index];
327 if (bo->tiling != I915_TILING_NONE) {
331 /* We presume that we will want to use a fence with X tiled objects... */
332 if (needs_fence || bo->tiling == I915_TILING_X)
333 alignment = bo->full_size;
335 alignment = 2*((bo->stride + 4095) & -4096);
337 alignment = bo->full_size;
339 if (bo->exec->alignment < alignment)
340 bo->exec->alignment = alignment;
342 if (needs_fence && (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) {
343 bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE;
344 device->batch.fences++;
346 intel_bo_set_tiling (&device->intel, bo);
350 assert (device->batch.reloc_count < ARRAY_LENGTH (device->batch.reloc));
352 index = device->batch.reloc_count++;
353 device->batch.reloc[index].offset = (pos << 2) + sizeof (device->batch_header);
354 device->batch.reloc[index].delta = offset;
355 device->batch.reloc[index].target_handle = bo->base.handle;
356 device->batch.reloc[index].read_domains = read_domains;
357 device->batch.reloc[index].write_domain = write_domain;
358 device->batch.reloc[index].presumed_offset = bo->offset;
360 assert (write_domain == 0 || bo->batch_write_domain == 0 || bo->batch_write_domain == write_domain);
361 bo->batch_read_domains |= read_domains;
362 bo->batch_write_domain |= write_domain;
366 i915_vbo_finish (i915_device_t *device)
370 assert (CAIRO_MUTEX_IS_LOCKED (device->intel.base.base.mutex));
371 assert (device->vbo_used);
373 if (device->vertex_count) {
374 if (device->vbo == 0) {
375 OUT_DWORD (_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
379 device->vbo = device->batch.used++;
380 device->vbo_max_index = device->batch.used;
381 OUT_DWORD ((device->floats_per_vertex << S1_VERTEX_WIDTH_SHIFT) |
382 (device->floats_per_vertex << S1_VERTEX_PITCH_SHIFT));
385 OUT_DWORD (PRIM3D_RECTLIST |
386 PRIM3D_INDIRECT_SEQUENTIAL |
387 device->vertex_count);
388 OUT_DWORD (device->vertex_index);
391 if (device->last_vbo != NULL) {
392 intel_bo_in_flight_add (&device->intel, device->last_vbo);
393 intel_bo_destroy (&device->intel, device->last_vbo);
396 device->batch_base[device->vbo_max_index] |= device->vertex_index + device->vertex_count;
398 /* will include a few bytes of inter-array padding */
399 vbo = intel_bo_create (&device->intel,
400 device->vbo_used, device->vbo_used,
401 FALSE, I915_TILING_NONE, 0);
402 i915_batch_fill_reloc (device, device->vbo, vbo, 0,
403 I915_GEM_DOMAIN_VERTEX, 0);
404 intel_bo_write (&device->intel, vbo, 0, device->vbo_used, device->vbo_base);
405 device->last_vbo = vbo;
406 device->last_vbo_offset = (device->vbo_used+7)&-8;
407 device->last_vbo_space = vbo->base.size - device->last_vbo_offset;
411 device->vbo_used = device->vbo_offset = 0;
412 device->vertex_index = device->vertex_count = 0;
414 if (! i915_check_aperture_size (device, 1, I915_VBO_SIZE, I915_VBO_SIZE)) {
415 cairo_status_t status;
417 status = i915_batch_flush (device);
418 if (unlikely (status))
419 longjmp (device->shader->unwind, status);
421 status = i915_shader_commit (device->shader, device);
422 if (unlikely (status))
423 longjmp (device->shader->unwind, status);
427 /* XXX improve state tracker/difference and flush state on vertex emission */
429 i915_device_reset (i915_device_t *device)
431 if (device->current_source != NULL)
432 *device->current_source = 0;
433 if (device->current_mask != NULL)
434 *device->current_mask = 0;
435 if (device->current_clip != NULL)
436 *device->current_clip = 0;
438 device->current_target = NULL;
439 device->current_size = 0;
440 device->current_source = NULL;
441 device->current_mask = NULL;
442 device->current_clip = NULL;
443 device->current_texcoords = ~0;
444 device->current_blend = 0;
445 device->current_n_constants = 0;
446 device->current_n_samplers = 0;
447 device->current_n_maps = 0;
448 device->current_colorbuf = 0;
449 device->current_diffuse = 0;
450 device->current_program = ~0;
451 device->clear_alpha = ~0;
453 device->last_source_fragment = ~0;
457 i915_batch_cleanup (i915_device_t *device)
461 for (i = 0; i < device->batch.exec_count; i++) {
462 intel_bo_t *bo = device->batch.target_bo[i];
465 bo->batch_read_domains = 0;
466 bo->batch_write_domain = 0;
467 cairo_list_del (&bo->cache_list);
469 intel_bo_destroy (&device->intel, bo);
472 device->batch.exec_count = 0;
473 device->batch.reloc_count = 0;
477 i915_batch_vbo_finish (i915_device_t *device)
479 assert (CAIRO_MUTEX_IS_LOCKED (device->intel.base.base.mutex));
481 if (device->vbo || i915_batch_space (device) < (int32_t) device->vbo_used) {
484 if (device->vertex_count) {
485 if (device->vbo == 0) {
486 OUT_DWORD (_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
490 device->vbo = device->batch.used++;
491 device->vbo_max_index = device->batch.used;
492 OUT_DWORD ((device->floats_per_vertex << S1_VERTEX_WIDTH_SHIFT) |
493 (device->floats_per_vertex << S1_VERTEX_PITCH_SHIFT));
496 OUT_DWORD (PRIM3D_RECTLIST |
497 PRIM3D_INDIRECT_SEQUENTIAL |
498 device->vertex_count);
499 OUT_DWORD (device->vertex_index);
502 if (device->last_vbo != NULL)
503 intel_bo_destroy (&device->intel, device->last_vbo);
505 device->batch_base[device->vbo_max_index] |= device->vertex_index + device->vertex_count;
507 /* will include a few bytes of inter-array padding */
508 vbo = intel_bo_create (&device->intel,
509 device->vbo_used, device->vbo_used,
510 FALSE, I915_TILING_NONE, 0);
511 i915_batch_fill_reloc (device, device->vbo,
513 I915_GEM_DOMAIN_VERTEX, 0);
514 intel_bo_write (&device->intel, vbo, 0, device->vbo_used, device->vbo_base);
515 device->last_vbo = vbo;
516 device->last_vbo_offset = (device->vbo_used+7)&-8;
517 device->last_vbo_space = vbo->base.size - device->last_vbo_offset;
523 /* Only a single rectlist in this batch, and no active vertex buffer. */
524 OUT_DWORD (PRIM3D_RECTLIST | (device->vbo_used / 4 - 1));
526 memcpy (BATCH_PTR (device), device->vbo_base, device->vbo_used);
527 device->batch.used += device->vbo_used >> 2;
530 device->vbo_used = device->vbo_offset = 0;
531 device->vertex_index = device->vertex_count = 0;
535 i915_batch_flush (i915_device_t *device)
538 cairo_status_t status;
539 uint32_t length, offset;
542 assert (CAIRO_MUTEX_IS_LOCKED (device->intel.base.base.mutex));
544 if (device->vbo_used)
545 i915_batch_vbo_finish (device);
547 if (device->batch.used == 0)
548 return CAIRO_STATUS_SUCCESS;
550 i915_batch_emit_dword (device, MI_BATCH_BUFFER_END);
551 if ((device->batch.used & 1) != ((sizeof (device->batch_header)>>2) & 1))
552 i915_batch_emit_dword (device, MI_NOOP);
554 length = (device->batch.used << 2) + sizeof (device->batch_header);
556 /* NB: it is faster to copy the data then map/unmap the batch,
557 * presumably because we frequently only use a small part of the buffer.
560 if (device->last_vbo) {
561 if (length <= device->last_vbo_space) {
562 batch = device->last_vbo;
563 offset = device->last_vbo_offset;
565 /* fixup the relocations */
566 for (n = 0; n < device->batch.reloc_count; n++)
567 device->batch.reloc[n].offset += offset;
569 intel_bo_destroy (&device->intel, device->last_vbo);
570 device->last_vbo = NULL;
573 batch = intel_bo_create (&device->intel,
575 FALSE, I915_TILING_NONE, 0);
576 if (unlikely (batch == NULL)) {
577 status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
578 i915_batch_cleanup (device);
584 intel_bo_write (&device->intel, batch, offset, length, device->batch_header);
585 status = i915_bo_exec (device, batch, offset);
586 intel_bo_destroy (&device->intel, batch);
589 device->batch.used = 0;
591 intel_glyph_cache_unpin (&device->intel);
592 intel_snapshot_cache_thaw (&device->intel);
594 i915_device_reset (device);
601 i915_add_rectangles (i915_device_t *device, int num_rects, int *count)
607 assert (device->floats_per_vertex);
609 size = device->rectangle_size;
610 if (unlikely (device->vbo_offset + size > I915_VBO_SIZE))
611 i915_vbo_finish (device);
613 vertices = (float *) (device->vbo_base + device->vbo_offset);
614 cnt = (I915_VBO_SIZE - device->vbo_offset) / size;
617 device->vbo_used = device->vbo_offset += size * cnt;
618 device->vertex_count += 3 * cnt;
624 static cairo_surface_t *
625 i915_surface_create_similar (void *abstract_other,
626 cairo_content_t content,
627 int width, int height)
629 i915_surface_t *other;
630 cairo_format_t format;
631 uint32_t tiling = I915_TILING_DEFAULT;
633 other = abstract_other;
634 if (content == other->intel.drm.base.content)
635 format = other->intel.drm.format;
637 format = _cairo_format_from_content (content);
639 if (width * _cairo_format_bits_per_pixel (format) > 8 * 32*1024 || height > 64*1024)
642 /* we presume that a similar surface will be used for blitting */
643 if (i915_surface_needs_tiling (other))
644 tiling = I915_TILING_X;
646 return i915_surface_create_internal ((cairo_drm_device_t *) other->intel.drm.base.device,
652 static cairo_status_t
653 i915_surface_finish (void *abstract_surface)
655 i915_surface_t *surface = abstract_surface;
656 i915_device_t *device = i915_device (surface);
658 if (surface->stencil != NULL) {
659 intel_bo_in_flight_add (&device->intel, surface->stencil);
660 intel_bo_destroy (&device->intel, surface->stencil);
663 if (surface->is_current_texture) {
664 if (surface->is_current_texture & CURRENT_SOURCE)
665 device->current_source = NULL;
666 if (surface->is_current_texture & CURRENT_MASK)
667 device->current_mask = NULL;
668 if (surface->is_current_texture & CURRENT_CLIP)
669 device->current_clip = NULL;
670 device->current_n_samplers = 0;
673 if (surface == device->current_target)
674 device->current_target = NULL;
676 if (surface->cache != NULL) {
677 i915_image_private_t *node = surface->cache;
678 intel_buffer_cache_t *cache = node->container;
680 if (--cache->ref_count == 0) {
681 intel_bo_in_flight_add (&device->intel, cache->buffer.bo);
682 intel_bo_destroy (&device->intel, cache->buffer.bo);
683 _cairo_rtree_fini (&cache->rtree);
684 cairo_list_del (&cache->link);
687 node->node.state = CAIRO_RTREE_NODE_AVAILABLE;
688 cairo_list_move (&node->node.link, &cache->rtree.available);
689 _cairo_rtree_node_collapse (&cache->rtree, node->node.parent);
693 return intel_surface_finish (&surface->intel);
696 static cairo_status_t
697 i915_surface_batch_flush (i915_surface_t *surface)
699 cairo_status_t status;
702 assert (surface->intel.drm.fallback == NULL);
704 bo = to_intel_bo (surface->intel.drm.bo);
705 if (bo == NULL || bo->batch_write_domain == 0)
706 return CAIRO_STATUS_SUCCESS;
708 status = cairo_device_acquire (surface->intel.drm.base.device);
709 if (unlikely (status))
712 status = i915_batch_flush (i915_device (surface));
713 cairo_device_release (surface->intel.drm.base.device);
718 static cairo_status_t
719 i915_surface_flush (void *abstract_surface)
721 i915_surface_t *surface = abstract_surface;
722 cairo_status_t status;
724 if (surface->intel.drm.fallback == NULL) {
725 if (surface->intel.drm.base.finished) {
726 /* Forgo flushing on finish as the user cannot access the surface directly. */
727 return CAIRO_STATUS_SUCCESS;
730 if (surface->deferred_clear) {
731 status = i915_surface_clear (surface);
732 if (unlikely (status))
736 return i915_surface_batch_flush (surface);
739 return intel_surface_flush (abstract_surface);
744 static cairo_status_t
745 _composite_boxes_spans (void *closure,
746 cairo_span_renderer_t *renderer,
747 const cairo_rectangle_int_t *extents)
749 cairo_boxes_t *boxes = closure;
750 cairo_rectangular_scan_converter_t converter;
751 struct _cairo_boxes_chunk *chunk;
752 cairo_status_t status;
755 _cairo_rectangular_scan_converter_init (&converter, extents);
756 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
757 cairo_box_t *box = chunk->base;
758 for (i = 0; i < chunk->count; i++) {
759 status = _cairo_rectangular_scan_converter_add_box (&converter, &box[i], 1);
760 if (unlikely (status))
765 status = converter.base.generate (&converter.base, renderer);
768 converter.base.destroy (&converter.base);
773 i915_fixup_unbounded (i915_surface_t *dst,
774 const cairo_composite_rectangles_t *extents,
777 i915_shader_t shader;
778 i915_device_t *device;
779 cairo_status_t status;
782 cairo_region_t *clip_region = NULL;
784 status = _cairo_clip_get_region (clip, &clip_region);
785 assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
786 assert (clip_region == NULL);
788 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
791 if (extents->bounded.width == extents->unbounded.width &&
792 extents->bounded.height == extents->unbounded.height)
794 return CAIRO_STATUS_SUCCESS;
799 i915_shader_init (&shader, dst, CAIRO_OPERATOR_DEST_OVER, 1.);
800 i915_shader_set_clip (&shader, clip);
801 status = i915_shader_acquire_pattern (&shader,
803 &_cairo_pattern_white.base,
804 &extents->unbounded);
805 assert (status == CAIRO_STATUS_SUCCESS);
807 i915_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR, 1.);
808 status = i915_shader_acquire_pattern (&shader,
810 &_cairo_pattern_clear.base,
811 &extents->unbounded);
812 assert (status == CAIRO_STATUS_SUCCESS);
815 device = i915_device (dst);
816 status = cairo_device_acquire (&device->intel.base.base);
817 if (unlikely (status))
820 status = i915_shader_commit (&shader, device);
821 if (unlikely (status))
824 if (extents->bounded.width == 0 || extents->bounded.height == 0) {
825 shader.add_rectangle (&shader,
826 extents->unbounded.x,
827 extents->unbounded.y,
828 extents->unbounded.width,
829 extents->unbounded.height);
832 if (extents->bounded.y != extents->unbounded.y) {
833 shader.add_rectangle (&shader,
834 extents->unbounded.x,
835 extents->unbounded.y,
836 extents->unbounded.width,
837 extents->bounded.y - extents->unbounded.y);
841 if (extents->bounded.x != extents->unbounded.x) {
842 shader.add_rectangle (&shader,
843 extents->unbounded.x,
845 extents->bounded.x - extents->unbounded.x,
846 extents->bounded.height);
850 if (extents->bounded.x + extents->bounded.width != extents->unbounded.x + extents->unbounded.width) {
851 shader.add_rectangle (&shader,
852 extents->bounded.x + extents->bounded.width,
854 extents->unbounded.x + extents->unbounded.width - (extents->bounded.x + extents->bounded.width),
855 extents->bounded.height);
859 if (extents->bounded.y + extents->bounded.height != extents->unbounded.y + extents->unbounded.height) {
860 shader.add_rectangle (&shader,
861 extents->unbounded.x,
862 extents->bounded.y + extents->bounded.height,
863 extents->unbounded.width,
864 extents->unbounded.y + extents->unbounded.height - (extents->bounded.y + extents->bounded.height));
868 i915_shader_fini (&shader);
870 cairo_device_release (&device->intel.base.base);
874 static cairo_status_t
875 i915_fixup_unbounded_boxes (i915_surface_t *dst,
876 const cairo_composite_rectangles_t *extents,
878 cairo_boxes_t *boxes)
882 cairo_region_t *clip_region = NULL;
883 cairo_status_t status;
884 struct _cairo_boxes_chunk *chunk;
887 if (boxes->num_boxes <= 1)
888 return i915_fixup_unbounded (dst, extents, clip);
890 _cairo_boxes_init (&clear);
892 box.p1.x = _cairo_fixed_from_int (extents->unbounded.x + extents->unbounded.width);
893 box.p1.y = _cairo_fixed_from_int (extents->unbounded.y);
894 box.p2.x = _cairo_fixed_from_int (extents->unbounded.x);
895 box.p2.y = _cairo_fixed_from_int (extents->unbounded.y + extents->unbounded.height);
898 status = _cairo_clip_get_region (clip, &clip_region);
899 assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
900 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
904 if (clip_region == NULL) {
907 _cairo_boxes_init (&tmp);
909 status = _cairo_boxes_add (&tmp, &box);
910 assert (status == CAIRO_STATUS_SUCCESS);
912 tmp.chunks.next = &boxes->chunks;
913 tmp.num_boxes += boxes->num_boxes;
915 status = _cairo_bentley_ottmann_tessellate_boxes (&tmp,
916 CAIRO_FILL_RULE_WINDING,
919 tmp.chunks.next = NULL;
921 pixman_box32_t *pbox;
923 pbox = pixman_region32_rectangles (&clip_region->rgn, &i);
924 _cairo_boxes_limit (&clear, (cairo_box_t *) pbox, i);
926 status = _cairo_boxes_add (&clear, &box);
927 assert (status == CAIRO_STATUS_SUCCESS);
929 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
930 for (i = 0; i < chunk->count; i++) {
931 status = _cairo_boxes_add (&clear, &chunk->base[i]);
932 if (unlikely (status)) {
933 _cairo_boxes_fini (&clear);
939 status = _cairo_bentley_ottmann_tessellate_boxes (&clear,
940 CAIRO_FILL_RULE_WINDING,
944 if (likely (status == CAIRO_STATUS_SUCCESS && clear.num_boxes)) {
945 i915_shader_t shader;
946 i915_device_t *device;
949 i915_shader_init (&shader, dst, CAIRO_OPERATOR_DEST_OVER, 1.);
950 i915_shader_set_clip (&shader, clip);
951 status = i915_shader_acquire_pattern (&shader,
953 &_cairo_pattern_white.base,
954 &extents->unbounded);
955 assert (status == CAIRO_STATUS_SUCCESS);
957 i915_shader_init (&shader, dst, CAIRO_OPERATOR_CLEAR, 1.);
958 status = i915_shader_acquire_pattern (&shader,
960 &_cairo_pattern_clear.base,
961 &extents->unbounded);
962 assert (status == CAIRO_STATUS_SUCCESS);
965 device = i915_device (dst);
966 status = cairo_device_acquire (&device->intel.base.base);
967 if (unlikely (status))
970 status = i915_shader_commit (&shader, device);
971 if (unlikely (status))
974 for (chunk = &clear.chunks; chunk != NULL; chunk = chunk->next) {
975 for (i = 0; i < chunk->count; i++) {
976 int x1 = _cairo_fixed_integer_part (chunk->base[i].p1.x);
977 int y1 = _cairo_fixed_integer_part (chunk->base[i].p1.y);
978 int x2 = _cairo_fixed_integer_part (chunk->base[i].p2.x);
979 int y2 = _cairo_fixed_integer_part (chunk->base[i].p2.y);
981 shader.add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
985 cairo_device_release (&device->intel.base.base);
987 i915_shader_fini (&shader);
990 _cairo_boxes_fini (&clear);
996 i915_can_blt (i915_surface_t *dst,
997 const cairo_pattern_t *pattern)
999 const cairo_surface_pattern_t *spattern;
1000 i915_surface_t *src;
1002 spattern = (const cairo_surface_pattern_t *) pattern;
1003 src = (i915_surface_t *) spattern->surface;
1005 if (src->intel.drm.base.device != dst->intel.drm.base.device)
1008 if (! i915_surface_needs_tiling (dst))
1011 if (! _cairo_matrix_is_translation (&pattern->matrix))
1014 if (! (pattern->filter == CAIRO_FILTER_NEAREST ||
1015 pattern->filter == CAIRO_FILTER_FAST))
1017 if (! _cairo_fixed_is_integer (_cairo_fixed_from_double (pattern->matrix.x0)) ||
1018 ! _cairo_fixed_is_integer (_cairo_fixed_from_double (pattern->matrix.y0)))
1024 return _cairo_format_bits_per_pixel (src->intel.drm.format) ==
1025 _cairo_format_bits_per_pixel (dst->intel.drm.format);
1028 static cairo_status_t
1029 i915_blt (i915_surface_t *src,
1030 i915_surface_t *dst,
1031 int src_x, int src_y,
1032 int width, int height,
1033 int dst_x, int dst_y,
1036 i915_device_t *device;
1037 intel_bo_t *bo_array[2];
1038 cairo_status_t status;
1041 bo_array[0] = to_intel_bo (dst->intel.drm.bo);
1042 bo_array[1] = to_intel_bo (src->intel.drm.bo);
1044 status = i915_surface_fallback_flush (src);
1045 if (unlikely (status))
1048 device = i915_device (dst);
1049 status = cairo_device_acquire (&device->intel.base.base);
1050 if (unlikely (status))
1053 if (! i915_check_aperture_and_fences (device, bo_array, 2) ||
1054 i915_batch_space (device) < 9)
1056 status = i915_batch_flush (device);
1057 if (unlikely (status))
1061 cmd = XY_SRC_COPY_BLT_CMD;
1062 br13 = (0xCC << 16) | dst->intel.drm.stride;
1063 switch (dst->intel.drm.format) {
1065 case CAIRO_FORMAT_INVALID:
1066 case CAIRO_FORMAT_A1:
1068 case CAIRO_FORMAT_A8:
1070 case CAIRO_FORMAT_RGB16_565:
1073 case CAIRO_FORMAT_RGB24:
1074 case CAIRO_FORMAT_ARGB32:
1076 cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1082 OUT_DWORD ((dst_y << 16) | dst_x);
1083 OUT_DWORD (((dst_y + height - 1) << 16) | (dst_x + width - 1));
1084 OUT_RELOC_FENCED (dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1085 OUT_DWORD ((src_y << 16) | src_x);
1086 OUT_DWORD (src->intel.drm.stride);
1087 OUT_RELOC_FENCED (src, I915_GEM_DOMAIN_RENDER, 0);
1088 /* require explicit RenderCache flush for 2D -> 3D sampler? */
1090 OUT_DWORD (MI_FLUSH);
1093 cairo_device_release (&device->intel.base.base);
1094 return CAIRO_STATUS_SUCCESS;
1098 i915_surface_copy_subimage (i915_device_t *device,
1099 i915_surface_t *src,
1100 const cairo_rectangle_int_t *extents,
1102 i915_surface_t **clone_out)
1104 i915_surface_t *clone;
1105 cairo_status_t status;
1107 clone = (i915_surface_t *)
1108 i915_surface_create_internal (&device->intel.base,
1109 src->intel.drm.format,
1112 I915_TILING_X, TRUE);
1113 if (unlikely (clone->intel.drm.base.status))
1114 return clone->intel.drm.base.status;
1116 status = i915_blt (src, clone,
1117 extents->x, extents->y,
1118 extents->width, extents->height,
1122 if (unlikely (status)) {
1123 cairo_surface_destroy (&clone->intel.drm.base);
1128 return CAIRO_STATUS_SUCCESS;
1131 static cairo_status_t
1132 i915_clear_boxes (i915_surface_t *dst,
1133 const cairo_boxes_t *boxes)
1135 i915_device_t *device = i915_device (dst);
1136 const struct _cairo_boxes_chunk *chunk;
1137 cairo_status_t status;
1138 intel_bo_t *bo_array[1] = { to_intel_bo (dst->intel.drm.bo) };
1139 int cmd, br13, clear = 0, i;
1141 cmd = XY_COLOR_BLT_CMD;
1142 br13 = (0xCC << 16) | dst->intel.drm.stride;
1143 switch (dst->intel.drm.format) {
1145 case CAIRO_FORMAT_INVALID:
1146 case CAIRO_FORMAT_A1:
1148 case CAIRO_FORMAT_A8:
1150 case CAIRO_FORMAT_RGB16_565:
1153 case CAIRO_FORMAT_RGB24:
1155 case CAIRO_FORMAT_ARGB32:
1157 cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1161 status = cairo_device_acquire (&device->intel.base.base);
1162 if (unlikely (status))
1165 if (! i915_check_aperture_and_fences (device, bo_array, 1) ||
1166 i915_batch_space (device) < 6 * boxes->num_boxes)
1168 status = i915_batch_flush (device);
1169 if (unlikely (status))
1173 if (device->vertex_count)
1174 i915_vbo_flush (device);
1176 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1177 const cairo_box_t *box = chunk->base;
1178 for (i = 0; i < chunk->count; i++) {
1179 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1180 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1181 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1182 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1184 if (x2 <= x1 || y2 <= y1)
1189 OUT_DWORD ((y1 << 16) | x1);
1190 OUT_DWORD (((y2 - 1) << 16) | (x2 - 1));
1191 OUT_RELOC_FENCED (dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1197 cairo_device_release (&device->intel.base.base);
1201 static cairo_status_t
1202 i915_surface_extract_X_from_Y (i915_device_t *device,
1203 i915_surface_t *src,
1204 const cairo_rectangle_int_t *extents,
1205 i915_surface_t **clone_out)
1207 i915_surface_t *clone;
1208 i915_shader_t shader;
1209 cairo_surface_pattern_t pattern;
1210 cairo_rectangle_int_t rect;
1211 cairo_status_t status;
1213 status = i915_surface_fallback_flush (src);
1214 if (unlikely (status))
1217 clone = (i915_surface_t *)
1218 i915_surface_create_internal (&device->intel.base,
1219 src->intel.drm.format,
1222 I915_TILING_X, TRUE);
1223 if (unlikely (clone->intel.drm.base.status))
1224 return clone->intel.drm.base.status;
1226 i915_shader_init (&shader, clone, CAIRO_OPERATOR_SOURCE, 1.);
1228 _cairo_pattern_init_for_surface (&pattern, &src->intel.drm.base);
1229 pattern.base.filter = CAIRO_FILTER_NEAREST;
1230 cairo_matrix_init_translate (&pattern.base.matrix, extents->x, extents->y);
1232 rect.x = rect.y = 0;
1233 rect.width = extents->width;
1234 rect.height = extents->height;
1235 status = i915_shader_acquire_pattern (&shader, &shader.source, &pattern.base, &rect);
1236 _cairo_pattern_fini (&pattern.base);
1238 if (unlikely (status))
1241 status = cairo_device_acquire (&device->intel.base.base);
1242 if (unlikely (status))
1245 status = i915_shader_commit (&shader, device);
1246 if (unlikely (status))
1249 shader.add_rectangle (&shader, 0, 0, extents->width, extents->height);
1251 cairo_device_release (&device->intel.base.base);
1252 i915_shader_fini (&shader);
1255 return CAIRO_STATUS_SUCCESS;
1258 cairo_device_release (&device->intel.base.base);
1260 i915_shader_fini (&shader);
1261 cairo_surface_destroy (&clone->intel.drm.base);
1265 static cairo_status_t
1266 i915_blt_boxes (i915_surface_t *dst,
1267 const cairo_pattern_t *pattern,
1268 const cairo_rectangle_int_t *extents,
1269 const cairo_boxes_t *boxes)
1271 const cairo_surface_pattern_t *spattern;
1272 i915_device_t *device;
1273 i915_surface_t *src;
1274 cairo_surface_t *free_me = NULL;
1275 const struct _cairo_boxes_chunk *chunk;
1276 cairo_status_t status;
1277 int br13, cmd, tx, ty;
1278 intel_bo_t *bo_array[2];
1281 if (! i915_can_blt (dst, pattern))
1282 return CAIRO_INT_STATUS_UNSUPPORTED;
1284 spattern = (const cairo_surface_pattern_t *) pattern;
1285 src = (i915_surface_t *) spattern->surface;
1287 if (src->intel.drm.base.is_clear)
1288 return i915_clear_boxes (dst, boxes);
1290 if (pattern->extend != CAIRO_EXTEND_NONE &&
1291 (extents->x + tx < 0 ||
1292 extents->y + ty < 0 ||
1293 extents->x + tx + extents->width > src->intel.drm.width ||
1294 extents->y + ty + extents->height > src->intel.drm.height))
1296 return CAIRO_INT_STATUS_UNSUPPORTED;
1299 status = i915_surface_fallback_flush (src);
1300 if (unlikely (status))
1303 tx = _cairo_lround (pattern->matrix.x0);
1304 ty = _cairo_lround (pattern->matrix.y0);
1306 device = i915_device (dst);
1307 if (to_intel_bo (src->intel.drm.bo)->tiling == I915_TILING_Y) {
1308 cairo_rectangle_int_t extents;
1310 _cairo_boxes_extents (boxes, &extents);
1314 status = i915_surface_extract_X_from_Y (device, src, &extents, &src);
1315 if (unlikely (status))
1318 free_me = &src->intel.drm.base;
1323 bo_array[0] = to_intel_bo (dst->intel.drm.bo);
1324 bo_array[1] = to_intel_bo (src->intel.drm.bo);
1326 status = cairo_device_acquire (&device->intel.base.base);
1327 if (unlikely (status))
1328 goto CLEANUP_SURFACE;
1330 if (! i915_check_aperture_and_fences (device, bo_array, 2) ||
1331 i915_batch_space (device) < 8 * boxes->num_boxes)
1333 status = i915_batch_flush (device);
1334 if (unlikely (status))
1335 goto CLEANUP_DEVICE;
1338 cmd = XY_SRC_COPY_BLT_CMD;
1339 br13 = (0xCC << 16) | dst->intel.drm.stride;
1340 switch (dst->intel.drm.format) {
1342 case CAIRO_FORMAT_INVALID:
1343 case CAIRO_FORMAT_A1:
1345 case CAIRO_FORMAT_A8:
1347 case CAIRO_FORMAT_RGB16_565:
1350 case CAIRO_FORMAT_RGB24:
1351 case CAIRO_FORMAT_ARGB32:
1353 cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1357 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1358 const cairo_box_t *box = chunk->base;
1359 for (i = 0; i < chunk->count; i++) {
1360 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1361 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1362 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1363 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1367 if (x2 + tx > src->intel.drm.width)
1368 x2 = src->intel.drm.width - tx;
1372 if (y2 + ty > src->intel.drm.height)
1373 y2 = src->intel.drm.height - ty;
1375 if (x2 <= x1 || y2 <= y1)
1377 if (x2 < 0 || y2 < 0)
1379 if (x1 >= dst->intel.drm.width || y2 >= dst->intel.drm.height)
1384 OUT_DWORD ((y1 << 16) | x1);
1385 OUT_DWORD (((y2 - 1) << 16) | (x2 - 1));
1386 OUT_RELOC_FENCED (dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1387 OUT_DWORD (((y1 + ty) << 16) | (x1 + tx));
1388 OUT_DWORD (src->intel.drm.stride);
1389 OUT_RELOC_FENCED (src, I915_GEM_DOMAIN_RENDER, 0);
1393 /* XXX fixup blank portions */
1396 cairo_device_release (&device->intel.base.base);
1398 cairo_surface_destroy (free_me);
1402 static cairo_status_t
1403 _upload_image_inplace (i915_surface_t *surface,
1404 const cairo_pattern_t *source,
1405 const cairo_rectangle_int_t *extents,
1406 const cairo_boxes_t *boxes)
1408 i915_device_t *device;
1409 const cairo_surface_pattern_t *pattern;
1410 cairo_image_surface_t *image;
1411 const struct _cairo_boxes_chunk *chunk;
1415 if (source->type != CAIRO_PATTERN_TYPE_SURFACE)
1416 return CAIRO_INT_STATUS_UNSUPPORTED;
1418 pattern = (const cairo_surface_pattern_t *) source;
1419 if (pattern->surface->type != CAIRO_SURFACE_TYPE_IMAGE)
1420 return CAIRO_INT_STATUS_UNSUPPORTED;
1422 if (! _cairo_matrix_is_integer_translation (&source->matrix, &tx, &ty))
1423 return CAIRO_INT_STATUS_UNSUPPORTED;
1425 image = (cairo_image_surface_t *) pattern->surface;
1426 if (source->extend != CAIRO_EXTEND_NONE &&
1427 (extents->x + tx < 0 ||
1428 extents->y + ty < 0 ||
1429 extents->x + tx + extents->width > image->width ||
1430 extents->y + ty + extents->height > image->height))
1432 return CAIRO_INT_STATUS_UNSUPPORTED;
1435 device = i915_device (surface);
1436 bo = to_intel_bo (surface->intel.drm.bo);
1437 if (bo->exec != NULL || ! intel_bo_is_inactive (&device->intel, bo)) {
1439 cairo_bool_t need_clear = FALSE;
1441 if (boxes->num_boxes != 1 ||
1442 extents->width < surface->intel.drm.width ||
1443 extents->height < surface->intel.drm.height)
1445 if (! surface->intel.drm.base.is_clear)
1446 return CAIRO_INT_STATUS_UNSUPPORTED;
1451 new_bo = intel_bo_create (&device->intel,
1452 bo->full_size, bo->base.size,
1453 FALSE, bo->tiling, bo->stride);
1454 if (unlikely (new_bo == NULL))
1455 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1457 intel_bo_in_flight_add (&device->intel, bo);
1458 intel_bo_destroy (&device->intel, bo);
1461 surface->intel.drm.bo = &bo->base;
1464 memset (intel_bo_map (&device->intel, bo), 0,
1465 bo->stride * surface->intel.drm.height);
1469 if (image->format == surface->intel.drm.format) {
1470 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1471 cairo_box_t *box = chunk->base;
1472 for (i = 0; i < chunk->count; i++) {
1473 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1474 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1475 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1476 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1477 cairo_status_t status;
1481 if (x2 + tx > image->width)
1482 x2 = image->width - tx;
1486 if (y2 + ty > image->height)
1487 y2 = image->height - ty;
1489 if (x2 <= x1 || y2 <= y1)
1491 if (x2 < 0 || y2 < 0)
1493 if (x1 >= surface->intel.drm.width || y2 >= surface->intel.drm.height)
1496 status = intel_bo_put_image (&device->intel,
1502 if (unlikely (status))
1507 pixman_image_t *dst;
1510 ptr = intel_bo_map (&device->intel, bo);
1511 if (unlikely (ptr == NULL))
1512 return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
1514 dst = pixman_image_create_bits (_cairo_format_to_pixman_format_code (surface->intel.drm.format),
1515 surface->intel.drm.width,
1516 surface->intel.drm.height,
1518 surface->intel.drm.stride);
1519 if (unlikely (dst == NULL))
1520 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1522 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1523 cairo_box_t *box = chunk->base;
1524 for (i = 0; i < chunk->count; i++) {
1525 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1526 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1527 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1528 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1532 if (x2 + tx > image->width)
1533 x2 = image->width - tx;
1537 if (y2 + ty > image->height)
1538 y2 = image->height - ty;
1540 if (x2 <= x1 || y2 <= y1)
1542 if (x2 < 0 || y2 < 0)
1544 if (x1 >= surface->intel.drm.width || y2 >= surface->intel.drm.height)
1547 pixman_image_composite32 (PIXMAN_OP_SRC,
1548 image->pixman_image, NULL, dst,
1556 pixman_image_unref (dst);
1559 return CAIRO_STATUS_SUCCESS;
1562 static cairo_status_t
1563 _composite_boxes (i915_surface_t *dst,
1564 cairo_operator_t op,
1565 const cairo_pattern_t *pattern,
1566 cairo_boxes_t *boxes,
1567 cairo_antialias_t antialias,
1570 const cairo_composite_rectangles_t *extents)
1572 cairo_bool_t need_clip_surface = FALSE;
1573 cairo_region_t *clip_region = NULL;
1574 const struct _cairo_boxes_chunk *chunk;
1575 cairo_status_t status;
1576 i915_shader_t shader;
1577 i915_device_t *device;
1580 /* If the boxes are not pixel-aligned, we will need to compute a real mask */
1581 if (antialias != CAIRO_ANTIALIAS_NONE) {
1582 if (! boxes->is_pixel_aligned)
1583 return CAIRO_INT_STATUS_UNSUPPORTED;
1586 if (clip == NULL && op == CAIRO_OPERATOR_SOURCE && opacity == 1.) {
1587 if (pattern->type == CAIRO_PATTERN_TYPE_SURFACE) {
1588 status = i915_blt_boxes (dst, pattern, &extents->bounded, boxes);
1589 if (status == CAIRO_INT_STATUS_UNSUPPORTED) {
1590 status = _upload_image_inplace (dst, pattern,
1591 &extents->bounded, boxes);
1593 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1598 if (i915_surface_needs_tiling (dst)) {
1600 return CAIRO_INT_STATUS_UNSUPPORTED;
1603 i915_shader_init (&shader, dst, op, opacity);
1605 status = i915_shader_acquire_pattern (&shader,
1609 if (unlikely (status))
1613 status = _cairo_clip_get_region (clip, &clip_region);
1614 assert (status == CAIRO_STATUS_SUCCESS || status == CAIRO_INT_STATUS_UNSUPPORTED);
1615 need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
1616 if (need_clip_surface)
1617 i915_shader_set_clip (&shader, clip);
1620 device = i915_device (dst);
1621 status = cairo_device_acquire (&device->intel.base.base);
1622 if (unlikely (status))
1625 status = i915_shader_commit (&shader, device);
1626 if (unlikely (status))
1629 for (chunk = &boxes->chunks; chunk != NULL; chunk = chunk->next) {
1630 cairo_box_t *box = chunk->base;
1631 for (i = 0; i < chunk->count; i++) {
1632 int x1 = _cairo_fixed_integer_round (box[i].p1.x);
1633 int y1 = _cairo_fixed_integer_round (box[i].p1.y);
1634 int x2 = _cairo_fixed_integer_round (box[i].p2.x);
1635 int y2 = _cairo_fixed_integer_round (box[i].p2.y);
1637 if (x2 > x1 && y2 > y1)
1638 shader.add_rectangle (&shader, x1, y1, x2 - x1, y2 - y1);
1642 if (! extents->is_bounded)
1643 status = i915_fixup_unbounded_boxes (dst, extents, clip, boxes);
1646 cairo_device_release (&device->intel.base.base);
1648 i915_shader_fini (&shader);
1654 i915_surface_clear (i915_surface_t *dst)
1656 i915_device_t *device;
1657 cairo_status_t status;
1658 intel_bo_t *bo_array[1] = { to_intel_bo (dst->intel.drm.bo) };
1660 device = i915_device (dst);
1661 status = cairo_device_acquire (&device->intel.base.base);
1662 if (unlikely (status))
1665 if (i915_surface_needs_tiling (dst)) {
1666 int cmd, br13, clear = 0;
1668 if (! i915_check_aperture_and_fences (device, bo_array, 1) ||
1669 i915_batch_space (device) < 6)
1671 status = i915_batch_flush (device);
1672 if (unlikely (status)) {
1673 cairo_device_release (&device->intel.base.base);
1678 if (device->vertex_count)
1679 i915_vbo_flush (device);
1681 cmd = XY_COLOR_BLT_CMD;
1682 br13 = (0xCC << 16) | dst->intel.drm.stride;
1683 switch (dst->intel.drm.format) {
1685 case CAIRO_FORMAT_INVALID:
1686 case CAIRO_FORMAT_A1:
1688 case CAIRO_FORMAT_A8:
1690 case CAIRO_FORMAT_RGB16_565:
1693 case CAIRO_FORMAT_RGB24:
1695 case CAIRO_FORMAT_ARGB32:
1697 cmd |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
1704 OUT_DWORD (((dst->intel.drm.height - 1) << 16) |
1705 (dst->intel.drm.width - 1));
1706 OUT_RELOC_FENCED (dst,
1707 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
1710 if (! i915_check_aperture (device, bo_array, 1) ||
1711 i915_batch_space (device) < 24)
1713 status = i915_batch_flush (device);
1714 if (unlikely (status)) {
1715 cairo_device_release (&device->intel.base.base);
1720 if (device->vertex_count)
1721 i915_vbo_flush (device);
1723 i915_set_dst (device, dst);
1725 /* set clear parameters */
1726 if (device->clear_alpha != (dst->intel.drm.base.content & CAIRO_CONTENT_ALPHA)) {
1727 device->clear_alpha = dst->intel.drm.base.content & CAIRO_CONTENT_ALPHA;
1728 OUT_DWORD (_3DSTATE_CLEAR_PARAMETERS);
1729 OUT_DWORD (CLEARPARAM_CLEAR_RECT | CLEARPARAM_WRITE_COLOR);
1730 /* ZONE_INIT color */
1731 if (device->clear_alpha) /* XXX depends on pixel format, 16bit needs replication, 8bit? */
1732 OUT_DWORD (0x00000000);
1734 OUT_DWORD (0xff000000);
1735 OUT_DWORD (0); /* ZONE_INIT depth */
1736 /* CLEAR_RECT color */
1737 if (device->clear_alpha)
1738 OUT_DWORD (0x00000000);
1740 OUT_DWORD (0xff000000);
1741 OUT_DWORD (0); /* CLEAR_RECT depth */
1742 OUT_DWORD (0); /* CLEAR_RECT stencil */
1745 OUT_DWORD (PRIM3D_CLEAR_RECT | 5);
1746 OUT_DWORD (pack_float (dst->intel.drm.width));
1747 OUT_DWORD (pack_float (dst->intel.drm.height));
1749 OUT_DWORD (pack_float (dst->intel.drm.height));
1754 cairo_device_release (&device->intel.base.base);
1756 dst->deferred_clear = FALSE;
1760 static cairo_status_t
1761 _clip_and_composite_boxes (i915_surface_t *dst,
1762 cairo_operator_t op,
1763 const cairo_pattern_t *src,
1764 cairo_boxes_t *boxes,
1765 cairo_antialias_t antialias,
1766 const cairo_composite_rectangles_t *extents,
1770 cairo_status_t status;
1772 if (boxes->num_boxes == 0) {
1773 if (extents->is_bounded)
1774 return CAIRO_STATUS_SUCCESS;
1776 return i915_fixup_unbounded (dst, extents, clip);
1780 (op == CAIRO_OPERATOR_SOURCE || (op == CAIRO_OPERATOR_OVER && dst->intel.drm.base.is_clear)) &&
1782 boxes->num_boxes == 1 &&
1783 extents->bounded.width == dst->intel.drm.width &&
1784 extents->bounded.height == dst->intel.drm.height)
1786 op = CAIRO_OPERATOR_SOURCE;
1787 dst->deferred_clear = FALSE;
1789 status = _upload_image_inplace (dst, src,
1790 &extents->bounded, boxes);
1791 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1795 if (dst->deferred_clear) {
1796 status = i915_surface_clear (dst);
1797 if (unlikely (status))
1801 /* Use a fast path if the boxes are pixel aligned */
1802 status = _composite_boxes (dst, op, src, boxes, antialias, clip, opacity, extents);
1803 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1806 /* Otherwise render the boxes via an implicit mask and composite in the usual
1809 return i915_clip_and_composite_spans (dst, op, src, antialias,
1810 _composite_boxes_spans, boxes,
1811 extents, clip, opacity);
1814 static cairo_clip_path_t *
1815 _clip_get_solitary_path (cairo_clip_t *clip)
1817 cairo_clip_path_t *iter = clip->path;
1818 cairo_clip_path_t *path = NULL;
1821 if ((iter->flags & CAIRO_CLIP_PATH_IS_BOX) == 0) {
1828 } while (iter != NULL);
1834 cairo_polygon_t polygon;
1835 cairo_fill_rule_t fill_rule;
1836 cairo_antialias_t antialias;
1837 } composite_polygon_info_t;
1839 static cairo_status_t
1840 _composite_polygon_spans (void *closure,
1841 cairo_span_renderer_t *renderer,
1842 const cairo_rectangle_int_t *extents)
1844 composite_polygon_info_t *info = closure;
1845 cairo_botor_scan_converter_t converter;
1846 cairo_status_t status;
1849 box.p1.x = _cairo_fixed_from_int (extents->x);
1850 box.p1.y = _cairo_fixed_from_int (extents->y);
1851 box.p2.x = _cairo_fixed_from_int (extents->x + extents->width);
1852 box.p2.y = _cairo_fixed_from_int (extents->y + extents->height);
1854 _cairo_botor_scan_converter_init (&converter, &box, info->fill_rule);
1856 status = converter.base.add_polygon (&converter.base, &info->polygon);
1857 if (likely (status == CAIRO_STATUS_SUCCESS))
1858 status = converter.base.generate (&converter.base, renderer);
1860 converter.base.destroy (&converter.base);
1865 static cairo_int_status_t
1866 i915_surface_fill_with_alpha (void *abstract_dst,
1867 cairo_operator_t op,
1868 const cairo_pattern_t *source,
1869 cairo_path_fixed_t *path,
1870 cairo_fill_rule_t fill_rule,
1872 cairo_antialias_t antialias,
1876 i915_surface_t *dst = abstract_dst;
1877 cairo_composite_rectangles_t extents;
1878 composite_polygon_info_t info;
1879 cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
1880 cairo_clip_t local_clip;
1881 cairo_bool_t have_clip = FALSE;
1882 int num_boxes = ARRAY_LENGTH (boxes_stack);
1883 cairo_status_t status;
1885 status = _cairo_composite_rectangles_init_for_fill (&extents,
1886 dst->intel.drm.width,
1887 dst->intel.drm.height,
1890 if (unlikely (status))
1893 if (_cairo_clip_contains_extents (clip, &extents))
1896 if (extents.is_bounded && clip != NULL) {
1897 cairo_clip_path_t *clip_path;
1899 if (((clip_path = _clip_get_solitary_path (clip)) != NULL) &&
1900 _cairo_path_fixed_equal (&clip_path->path, path))
1907 clip = _cairo_clip_init_copy (&local_clip, clip);
1911 status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
1912 if (unlikely (status)) {
1914 _cairo_clip_fini (&local_clip);
1919 assert (! _cairo_path_fixed_fill_is_empty (path));
1921 if (_cairo_path_fixed_fill_is_rectilinear (path)) {
1922 cairo_boxes_t boxes;
1924 _cairo_boxes_init (&boxes);
1925 _cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
1926 status = _cairo_path_fixed_fill_rectilinear_to_boxes (path,
1929 if (likely (status == CAIRO_STATUS_SUCCESS)) {
1930 status = _clip_and_composite_boxes (dst, op, source,
1936 _cairo_boxes_fini (&boxes);
1938 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
1942 _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
1944 status = _cairo_path_fixed_fill_to_polygon (path, tolerance, &info.polygon);
1945 if (unlikely (status))
1946 goto CLEANUP_POLYGON;
1948 if (extents.is_bounded) {
1949 cairo_rectangle_int_t rect;
1951 _cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
1952 if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
1953 goto CLEANUP_POLYGON;
1956 if (info.polygon.num_edges == 0) {
1957 if (! extents.is_bounded)
1958 status = i915_fixup_unbounded (dst, &extents, clip);
1960 goto CLEANUP_POLYGON;
1963 info.fill_rule = fill_rule;
1964 info.antialias = antialias;
1965 status = i915_clip_and_composite_spans (dst, op, source, antialias,
1966 _composite_polygon_spans, &info,
1967 &extents, clip, opacity);
1970 _cairo_polygon_fini (&info.polygon);
1973 if (clip_boxes != boxes_stack)
1977 _cairo_clip_fini (&local_clip);
1982 static cairo_int_status_t
1983 i915_surface_paint_with_alpha (void *abstract_dst,
1984 cairo_operator_t op,
1985 const cairo_pattern_t *source,
1989 i915_surface_t *dst = abstract_dst;
1990 cairo_composite_rectangles_t extents;
1991 cairo_clip_t local_clip;
1992 cairo_bool_t have_clip = FALSE;
1993 cairo_clip_path_t *clip_path;
1994 cairo_boxes_t boxes;
1995 int num_boxes = ARRAY_LENGTH (boxes.boxes_embedded);
1996 cairo_box_t *clip_boxes = boxes.boxes_embedded;
1997 cairo_status_t status;
1999 status = _cairo_composite_rectangles_init_for_paint (&extents,
2000 dst->intel.drm.width,
2001 dst->intel.drm.height,
2004 if (unlikely (status))
2007 if (_cairo_clip_contains_extents (clip, &extents))
2011 clip = _cairo_clip_init_copy (&local_clip, clip);
2015 status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
2016 if (unlikely (status)) {
2018 _cairo_clip_fini (&local_clip);
2023 /* If the clip cannot be reduced to a set of boxes, we will need to
2024 * use a clipmask. Paint is special as it is the only operation that
2025 * does not implicitly use a mask, so we may be able to reduce this
2026 * operation to a fill...
2029 extents.is_bounded &&
2030 (clip_path = _clip_get_solitary_path (clip)) != NULL)
2032 status = i915_surface_fill_with_alpha (dst, op, source,
2034 clip_path->fill_rule,
2035 clip_path->tolerance,
2036 clip_path->antialias,
2041 _cairo_boxes_init_for_array (&boxes, clip_boxes, num_boxes);
2042 status = _clip_and_composite_boxes (dst, op, source,
2043 &boxes, CAIRO_ANTIALIAS_DEFAULT,
2044 &extents, clip, opacity);
2046 if (clip_boxes != boxes.boxes_embedded)
2050 _cairo_clip_fini (&local_clip);
2055 static cairo_int_status_t
2056 i915_surface_paint (void *abstract_dst,
2057 cairo_operator_t op,
2058 const cairo_pattern_t *source,
2061 i915_surface_t *dst = abstract_dst;
2063 /* XXX unsupported operators? use pixel shader blending, eventually */
2065 if (op == CAIRO_OPERATOR_CLEAR && clip == NULL) {
2066 dst->deferred_clear = TRUE;
2067 return CAIRO_STATUS_SUCCESS;
2070 return i915_surface_paint_with_alpha (dst, op, source, clip, 1.);
2073 static cairo_int_status_t
2074 i915_surface_mask (void *abstract_dst,
2075 cairo_operator_t op,
2076 const cairo_pattern_t *source,
2077 const cairo_pattern_t *mask,
2080 i915_surface_t *dst = abstract_dst;
2081 i915_device_t *device;
2082 cairo_composite_rectangles_t extents;
2083 i915_shader_t shader;
2084 cairo_clip_t local_clip;
2085 cairo_region_t *clip_region = NULL;
2086 cairo_bool_t need_clip_surface = FALSE;
2087 cairo_bool_t have_clip = FALSE;
2088 cairo_status_t status;
2090 if (mask->type == CAIRO_PATTERN_TYPE_SOLID) {
2091 const cairo_solid_pattern_t *solid = (cairo_solid_pattern_t *) mask;
2092 return i915_surface_paint_with_alpha (dst, op, source, clip, solid->color.alpha);
2095 status = _cairo_composite_rectangles_init_for_mask (&extents,
2096 dst->intel.drm.width,
2097 dst->intel.drm.height,
2098 op, source, mask, clip);
2099 if (unlikely (status))
2102 if (_cairo_clip_contains_extents (clip, &extents))
2105 if (clip != NULL && extents.is_bounded) {
2106 clip = _cairo_clip_init_copy (&local_clip, clip);
2107 status = _cairo_clip_rectangle (clip, &extents.bounded);
2108 if (unlikely (status)) {
2109 _cairo_clip_fini (&local_clip);
2116 i915_shader_init (&shader, dst, op, 1.);
2118 status = i915_shader_acquire_pattern (&shader,
2122 if (unlikely (status))
2125 status = i915_shader_acquire_pattern (&shader,
2129 if (unlikely (status))
2133 status = _cairo_clip_get_region (clip, &clip_region);
2134 if (unlikely (_cairo_status_is_error (status) ||
2135 status == CAIRO_INT_STATUS_NOTHING_TO_DO))
2140 need_clip_surface = status == CAIRO_INT_STATUS_UNSUPPORTED;
2141 if (need_clip_surface)
2142 i915_shader_set_clip (&shader, clip);
2144 if (clip_region != NULL) {
2145 cairo_rectangle_int_t rect;
2146 cairo_bool_t is_empty;
2148 status = CAIRO_STATUS_SUCCESS;
2149 cairo_region_get_extents (clip_region, &rect);
2150 is_empty = ! _cairo_rectangle_intersect (&extents.unbounded, &rect);
2151 if (unlikely (is_empty))
2154 is_empty = ! _cairo_rectangle_intersect (&extents.bounded, &rect);
2155 if (unlikely (is_empty && extents.is_bounded))
2158 if (cairo_region_num_rectangles (clip_region) == 1)
2163 if (i915_surface_needs_tiling (dst)) {
2165 return CAIRO_INT_STATUS_UNSUPPORTED;
2168 device = i915_device (dst);
2169 status = cairo_device_acquire (&device->intel.base.base);
2170 if (unlikely (status))
2173 if (dst->deferred_clear) {
2174 status = i915_surface_clear (dst);
2175 if (unlikely (status))
2179 status = i915_shader_commit (&shader, device);
2180 if (unlikely (status))
2183 if (clip_region != NULL) {
2184 unsigned int n, num_rectangles;
2186 num_rectangles = cairo_region_num_rectangles (clip_region);
2187 for (n = 0; n < num_rectangles; n++) {
2188 cairo_rectangle_int_t rect;
2190 cairo_region_get_rectangle (clip_region, n, &rect);
2192 shader.add_rectangle (&shader,
2194 rect.x + rect.width, rect.y + rect.height);
2197 shader.add_rectangle (&shader,
2198 extents.bounded.x, extents.bounded.y,
2199 extents.bounded.x + extents.bounded.width,
2200 extents.bounded.y + extents.bounded.height);
2203 if (! extents.is_bounded)
2204 status = i915_fixup_unbounded (dst, &extents, clip);
2207 cairo_device_release (&device->intel.base.base);
2209 i915_shader_fini (&shader);
2211 _cairo_clip_fini (&local_clip);
2216 static cairo_int_status_t
2217 i915_surface_stroke (void *abstract_dst,
2218 cairo_operator_t op,
2219 const cairo_pattern_t *source,
2220 cairo_path_fixed_t *path,
2221 const cairo_stroke_style_t *stroke_style,
2222 const cairo_matrix_t *ctm,
2223 const cairo_matrix_t *ctm_inverse,
2225 cairo_antialias_t antialias,
2228 i915_surface_t *dst = abstract_dst;
2229 cairo_composite_rectangles_t extents;
2230 composite_polygon_info_t info;
2231 cairo_box_t boxes_stack[32], *clip_boxes = boxes_stack;
2232 int num_boxes = ARRAY_LENGTH (boxes_stack);
2233 cairo_clip_t local_clip;
2234 cairo_bool_t have_clip = FALSE;
2235 cairo_status_t status;
2237 status = _cairo_composite_rectangles_init_for_stroke (&extents,
2238 dst->intel.drm.width,
2239 dst->intel.drm.height,
2241 path, stroke_style, ctm,
2243 if (unlikely (status))
2246 if (_cairo_clip_contains_extents (clip, &extents))
2250 clip = _cairo_clip_init_copy (&local_clip, clip);
2254 status = _cairo_clip_to_boxes (&clip, &extents, &clip_boxes, &num_boxes);
2255 if (unlikely (status)) {
2257 _cairo_clip_fini (&local_clip);
2262 if (_cairo_path_fixed_stroke_is_rectilinear (path)) {
2263 cairo_boxes_t boxes;
2265 _cairo_boxes_init (&boxes);
2266 _cairo_boxes_limit (&boxes, clip_boxes, num_boxes);
2267 status = _cairo_path_fixed_stroke_rectilinear_to_boxes (path,
2271 if (likely (status == CAIRO_STATUS_SUCCESS)) {
2272 status = _clip_and_composite_boxes (dst, op, source,
2274 &extents, clip, 1.);
2277 _cairo_boxes_fini (&boxes);
2279 if (status != CAIRO_INT_STATUS_UNSUPPORTED)
2283 _cairo_polygon_init (&info.polygon, clip_boxes, num_boxes);
2285 status = _cairo_path_fixed_stroke_to_polygon (path,
2290 if (unlikely (status))
2291 goto CLEANUP_POLYGON;
2293 if (extents.is_bounded) {
2294 cairo_rectangle_int_t rect;
2296 _cairo_box_round_to_rectangle (&info.polygon.extents, &rect);
2297 if (! _cairo_rectangle_intersect (&extents.bounded, &rect))
2298 goto CLEANUP_POLYGON;
2301 if (info.polygon.num_edges == 0) {
2302 if (! extents.is_bounded)
2303 status = i915_fixup_unbounded (dst, &extents, clip);
2305 goto CLEANUP_POLYGON;
2308 info.fill_rule = CAIRO_FILL_RULE_WINDING;
2309 info.antialias = antialias;
2310 status = i915_clip_and_composite_spans (dst, op, source, antialias,
2311 _composite_polygon_spans, &info,
2312 &extents, clip, 1.);
2315 _cairo_polygon_fini (&info.polygon);
2318 if (clip_boxes != boxes_stack)
2322 _cairo_clip_fini (&local_clip);
2327 static cairo_int_status_t
2328 i915_surface_fill (void *abstract_dst,
2329 cairo_operator_t op,
2330 const cairo_pattern_t*source,
2331 cairo_path_fixed_t *path,
2332 cairo_fill_rule_t fill_rule,
2334 cairo_antialias_t antialias,
2337 return i915_surface_fill_with_alpha (abstract_dst, op, source, path, fill_rule, tolerance, antialias, clip, 1.);
2340 static const cairo_surface_backend_t i915_surface_backend = {
2341 CAIRO_SURFACE_TYPE_DRM,
2342 _cairo_default_context_create,
2344 i915_surface_create_similar,
2345 i915_surface_finish,
2346 intel_surface_acquire_source_image,
2347 intel_surface_release_source_image,
2350 NULL, /* composite */
2352 NULL, /* trapezoids */
2354 NULL, /* check-span */
2356 NULL, /* copy_page */
2357 NULL, /* show_page */
2358 _cairo_drm_surface_get_extents,
2359 NULL, /* old-glyphs */
2360 _cairo_drm_surface_get_font_options,
2363 NULL, /* mark_dirty */
2364 intel_scaled_font_fini,
2365 intel_scaled_glyph_fini,
2369 i915_surface_stroke,
2371 i915_surface_glyphs,
2375 i915_surface_init (i915_surface_t *surface,
2376 cairo_drm_device_t *device,
2377 cairo_format_t format,
2378 int width, int height)
2380 intel_surface_init (&surface->intel, &i915_surface_backend, device,
2381 format, width, height);
2385 case CAIRO_FORMAT_INVALID:
2386 case CAIRO_FORMAT_A1:
2388 case CAIRO_FORMAT_ARGB32:
2389 surface->map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
2390 surface->colorbuf = COLR_BUF_ARGB8888 | DEPTH_FRMT_24_FIXED_8_OTHER;
2392 case CAIRO_FORMAT_RGB24:
2393 surface->map0 = MAPSURF_32BIT | MT_32BIT_XRGB8888;
2394 surface->colorbuf = COLR_BUF_ARGB8888 | DEPTH_FRMT_24_FIXED_8_OTHER;
2396 case CAIRO_FORMAT_RGB16_565:
2397 surface->map0 = MAPSURF_16BIT | MT_16BIT_RGB565;
2398 surface->colorbuf = COLR_BUF_RGB565;
2400 case CAIRO_FORMAT_A8:
2401 surface->map0 = MAPSURF_8BIT | MT_8BIT_A8;
2402 surface->colorbuf = COLR_BUF_8BIT | DEPTH_FRMT_24_FIXED_8_OTHER;
2405 surface->colorbuf |= DSTORG_HORT_BIAS (0x8) | DSTORG_VERT_BIAS (0x8);
2406 surface->map0 |= ((height - 1) << MS3_HEIGHT_SHIFT) |
2407 ((width - 1) << MS3_WIDTH_SHIFT);
2410 surface->is_current_texture = 0;
2411 surface->deferred_clear = FALSE;
2413 surface->offset = 0;
2415 surface->stencil = NULL;
2416 surface->cache = NULL;
2420 i915_surface_create_internal (cairo_drm_device_t *base_dev,
2421 cairo_format_t format,
2422 int width, int height,
2424 cairo_bool_t gpu_target)
2426 i915_surface_t *surface;
2427 cairo_status_t status_ignored;
2429 surface = malloc (sizeof (i915_surface_t));
2430 if (unlikely (surface == NULL))
2431 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2433 i915_surface_init (surface, base_dev, format, width, height);
2435 if (width && height) {
2436 uint32_t size, stride;
2439 width = (width + 3) & -4;
2440 stride = cairo_format_stride_for_width (surface->intel.drm.format, width);
2441 /* check for tiny surfaces for which tiling is irrelevant */
2442 if (height * stride <= 4096)
2443 tiling = I915_TILING_NONE;
2444 if (tiling != I915_TILING_NONE && stride <= 512)
2445 tiling = I915_TILING_NONE;
2446 if (tiling != I915_TILING_NONE) {
2448 tiling = I915_TILING_NONE;
2449 else if (height <= 16)
2450 tiling = I915_TILING_X;
2452 /* large surfaces we need to blt, so force TILING_X */
2454 tiling = I915_TILING_X;
2455 /* but there is a maximum limit to the tiling pitch */
2456 if (tiling != I915_TILING_NONE && stride > 8192)
2457 tiling = I915_TILING_NONE;
2459 stride = i915_tiling_stride (tiling, stride);
2460 assert (stride >= (uint32_t) cairo_format_stride_for_width (surface->intel.drm.format, width));
2461 assert (tiling == I915_TILING_NONE || stride <= 8192);
2462 height = i915_tiling_height (tiling, height);
2463 if (height > 64*1024) {
2465 cairo_device_destroy (&base_dev->base);
2466 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_SIZE));
2469 size = stride * height;
2470 bo = intel_bo_create (to_intel_device (&base_dev->base),
2471 i915_tiling_size (tiling, size), size,
2472 gpu_target, tiling, stride);
2474 status_ignored = _cairo_drm_surface_finish (&surface->intel.drm);
2476 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2478 assert (bo->base.size >= size);
2480 surface->intel.drm.bo = &bo->base;
2481 surface->intel.drm.stride = stride;
2483 surface->map0 |= MS3_tiling (tiling);
2484 surface->map1 = (stride/4 - 1) << MS4_PITCH_SHIFT;
2487 return &surface->intel.drm.base;
2490 static cairo_surface_t *
2491 i915_surface_create (cairo_drm_device_t *base_dev,
2492 cairo_format_t format,
2493 int width, int height)
2496 case CAIRO_FORMAT_ARGB32:
2497 case CAIRO_FORMAT_RGB16_565:
2498 case CAIRO_FORMAT_RGB24:
2499 case CAIRO_FORMAT_A8:
2501 case CAIRO_FORMAT_INVALID:
2503 case CAIRO_FORMAT_A1:
2504 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
2507 return i915_surface_create_internal (base_dev, format, width, height,
2508 I915_TILING_DEFAULT, TRUE);
2511 static cairo_surface_t *
2512 i915_surface_create_for_name (cairo_drm_device_t *base_dev,
2514 cairo_format_t format,
2515 int width, int height, int stride)
2517 i915_surface_t *surface;
2519 /* Vol I, p134: size restrictions for textures */
2520 /* Vol I, p129: destination surface stride must be a multiple of 32 bytes */
2521 if (stride < cairo_format_stride_for_width (format, (width + 3) & -4) ||
2524 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_STRIDE));
2529 case CAIRO_FORMAT_INVALID:
2530 case CAIRO_FORMAT_A1:
2531 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
2532 case CAIRO_FORMAT_ARGB32:
2533 case CAIRO_FORMAT_RGB16_565:
2534 case CAIRO_FORMAT_RGB24:
2535 case CAIRO_FORMAT_A8:
2539 surface = malloc (sizeof (i915_surface_t));
2540 if (unlikely (surface == NULL))
2541 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2543 i915_surface_init (surface, base_dev, format, width, height);
2545 if (width && height) {
2546 surface->intel.drm.stride = stride;
2547 surface->map1 = (surface->intel.drm.stride/4 - 1) << MS4_PITCH_SHIFT;
2549 surface->intel.drm.bo =
2550 &intel_bo_create_for_name (to_intel_device (&base_dev->base),
2552 if (unlikely (surface->intel.drm.bo == NULL)) {
2554 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
2556 to_intel_bo (surface->intel.drm.bo)->stride = stride;
2558 surface->map0 |= MS3_tiling (to_intel_bo (surface->intel.drm.bo)->tiling);
2561 return &surface->intel.drm.base;
2564 static cairo_status_t
2565 i915_buffer_cache_init (intel_buffer_cache_t *cache,
2566 i915_device_t *device,
2567 cairo_format_t format,
2568 int width, int height)
2570 const uint32_t tiling = I915_TILING_DEFAULT;
2571 uint32_t stride, size;
2573 assert ((width & 3) == 0);
2574 assert ((height & 1) == 0);
2575 cache->buffer.width = width;
2576 cache->buffer.height = height;
2579 case CAIRO_FORMAT_INVALID:
2580 case CAIRO_FORMAT_A1:
2581 case CAIRO_FORMAT_RGB24:
2582 case CAIRO_FORMAT_RGB16_565:
2584 case CAIRO_FORMAT_ARGB32:
2585 cache->buffer.map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
2588 case CAIRO_FORMAT_A8:
2589 cache->buffer.map0 = MAPSURF_8BIT | MT_8BIT_I8;
2593 assert ((stride & 7) == 0);
2594 assert (i915_tiling_stride (tiling, stride) == stride);
2595 assert (i915_tiling_height (tiling, height) == height);
2597 size = height * stride;
2598 assert (i915_tiling_size (tiling, size) == size);
2599 cache->buffer.bo = intel_bo_create (&device->intel, size, size, FALSE, tiling, stride);
2600 if (unlikely (cache->buffer.bo == NULL))
2601 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
2603 cache->buffer.stride = cache->buffer.bo->stride;
2605 cache->buffer.map0 |= ((height - 1) << MS3_HEIGHT_SHIFT) |
2606 ((width - 1) << MS3_WIDTH_SHIFT);
2607 cache->buffer.map0 |= MS3_tiling (tiling);
2608 cache->buffer.map1 = ((stride / 4) - 1) << MS4_PITCH_SHIFT;
2610 cache->ref_count = 0;
2611 cairo_list_init (&cache->link);
2613 return CAIRO_STATUS_SUCCESS;
2617 i915_surface_create_from_cacheable_image_internal (i915_device_t *device,
2618 cairo_image_surface_t *image)
2620 i915_surface_t *surface;
2621 cairo_status_t status;
2622 cairo_list_t *caches;
2623 intel_buffer_cache_t *cache;
2624 cairo_rtree_node_t *node;
2625 cairo_format_t format;
2626 int width, height, bpp;
2628 format = image->format;
2629 if (format == CAIRO_FORMAT_A1)
2630 format = CAIRO_FORMAT_A8;
2632 width = image->width;
2633 height = image->height;
2634 if (width > IMAGE_CACHE_WIDTH/2 || height > IMAGE_CACHE_HEIGHT/2) {
2635 surface = (i915_surface_t *)
2636 i915_surface_create_internal (&device->intel.base,
2639 I915_TILING_NONE, FALSE);
2640 if (unlikely (surface->intel.drm.base.status))
2643 status = intel_bo_put_image (&device->intel,
2644 to_intel_bo (surface->intel.drm.bo),
2650 if (unlikely (status)) {
2651 cairo_surface_destroy (&surface->intel.drm.base);
2652 return (i915_surface_t *) _cairo_surface_create_in_error (status);
2658 status = cairo_device_acquire (&device->intel.base.base);
2659 if (unlikely (status))
2660 return (i915_surface_t *) _cairo_surface_create_in_error (status);
2662 switch (image->format) {
2663 case CAIRO_FORMAT_ARGB32:
2664 case CAIRO_FORMAT_RGB24:
2665 case CAIRO_FORMAT_RGB16_565:
2666 caches = &device->image_caches[0];
2667 format = CAIRO_FORMAT_ARGB32;
2670 case CAIRO_FORMAT_A8:
2671 case CAIRO_FORMAT_A1:
2672 caches = &device->image_caches[1];
2673 format = CAIRO_FORMAT_A8;
2676 case CAIRO_FORMAT_INVALID:
2679 status = _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
2680 goto CLEANUP_DEVICE;
2684 cairo_list_foreach_entry (cache, intel_buffer_cache_t, caches, link) {
2685 if (! intel_bo_is_inactive (&device->intel, cache->buffer.bo))
2688 status = _cairo_rtree_insert (&cache->rtree, width, height, &node);
2689 if (unlikely (_cairo_status_is_error (status)))
2690 goto CLEANUP_DEVICE;
2691 if (status == CAIRO_STATUS_SUCCESS)
2695 cache = malloc (sizeof (intel_buffer_cache_t));
2696 if (unlikely (cache == NULL)) {
2697 status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
2698 goto CLEANUP_DEVICE;
2701 status = i915_buffer_cache_init (cache, device, format,
2703 IMAGE_CACHE_HEIGHT);
2704 if (unlikely (status)) {
2706 goto CLEANUP_DEVICE;
2709 _cairo_rtree_init (&cache->rtree,
2713 sizeof (i915_image_private_t));
2715 status = _cairo_rtree_insert (&cache->rtree, width, height, &node);
2716 assert (status == CAIRO_STATUS_SUCCESS);
2718 cairo_list_init (&cache->link);
2720 cairo_list_move (&cache->link, caches);
2721 ((i915_image_private_t *) node)->container = cache;
2723 status = intel_bo_put_image (&device->intel,
2729 if (unlikely (status))
2732 surface = malloc (sizeof (i915_surface_t));
2733 if (unlikely (surface == NULL)) {
2734 status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
2738 i915_surface_init (surface, &device->intel.base,
2739 format, width, height);
2741 surface->intel.drm.stride = cache->buffer.stride;
2743 surface->map0 |= MS3_tiling (cache->buffer.bo->tiling);
2744 surface->map1 = (surface->intel.drm.stride/4 - 1) << MS4_PITCH_SHIFT;
2746 surface->intel.drm.bo = &intel_bo_reference (cache->buffer.bo)->base;
2747 surface->offset = node->y * cache->buffer.stride + bpp * node->x;
2749 surface->cache = (i915_image_private_t *) node;
2752 cairo_device_release (&device->intel.base.base);
2757 _cairo_rtree_node_destroy (&cache->rtree, node);
2758 if (cache->ref_count == 0) {
2759 intel_bo_destroy (&device->intel, cache->buffer.bo);
2760 _cairo_rtree_fini (&cache->rtree);
2761 cairo_list_del (&cache->link);
2765 cairo_device_release (&device->intel.base.base);
2766 return (i915_surface_t *) _cairo_surface_create_in_error (status);
2769 static cairo_surface_t *
2770 i915_surface_create_from_cacheable_image (cairo_drm_device_t *device,
2771 cairo_surface_t *source)
2773 i915_surface_t *surface;
2774 cairo_image_surface_t *image;
2776 cairo_status_t status;
2778 status = _cairo_surface_acquire_source_image (source, &image, &image_extra);
2779 if (unlikely (status))
2780 return _cairo_surface_create_in_error (status);
2782 surface = i915_surface_create_from_cacheable_image_internal ((i915_device_t *) device, image);
2784 _cairo_surface_release_source_image (source, image, image_extra);
2786 return &surface->intel.drm.base;
2789 static cairo_status_t
2790 i915_surface_enable_scan_out (void *abstract_surface)
2792 i915_surface_t *surface = abstract_surface;
2794 cairo_status_t status;
2796 if (unlikely (surface->intel.drm.bo == NULL))
2797 return _cairo_error (CAIRO_STATUS_INVALID_SIZE);
2799 bo = to_intel_bo (surface->intel.drm.bo);
2800 if (bo->tiling == I915_TILING_Y) {
2801 status = i915_surface_batch_flush (surface);
2802 if (unlikely (status))
2805 bo->tiling = I915_TILING_X;
2806 surface->map0 &= ~MS3_tiling (I915_TILING_Y);
2807 surface->map0 |= MS3_tiling (I915_TILING_X);
2811 return CAIRO_STATUS_SUCCESS;
2814 static cairo_int_status_t
2815 i915_device_flush (cairo_drm_device_t *device)
2817 cairo_status_t status;
2819 if (unlikely (device->base.finished))
2820 return CAIRO_STATUS_SUCCESS;
2822 status = cairo_device_acquire (&device->base);
2823 if (likely (status == CAIRO_STATUS_SUCCESS)) {
2824 status = i915_batch_flush ((i915_device_t *) device);
2825 cairo_device_release (&device->base);
2831 static cairo_int_status_t
2832 i915_device_throttle (cairo_drm_device_t *device)
2834 cairo_status_t status;
2836 status = cairo_device_acquire (&device->base);
2837 if (unlikely (status))
2840 status = i915_batch_flush ((i915_device_t *) device);
2841 intel_throttle ((intel_device_t *) device);
2843 cairo_device_release (&device->base);
2849 i915_device_destroy (void *data)
2851 i915_device_t *device = data;
2853 if (device->last_vbo)
2854 intel_bo_destroy (&device->intel, device->last_vbo);
2856 i915_batch_cleanup (device);
2858 intel_device_fini (&device->intel);
2862 COMPILE_TIME_ASSERT (sizeof (i915_batch_setup) == sizeof (((i915_device_t *)0)->batch_header));
2863 COMPILE_TIME_ASSERT (offsetof (i915_device_t, batch_base) == offsetof (i915_device_t, batch_header) + sizeof (i915_batch_setup));
2865 cairo_drm_device_t *
2866 _cairo_drm_i915_device_create (int fd, dev_t dev_id, int vendor_id, int chip_id)
2868 i915_device_t *device;
2869 cairo_status_t status;
2873 if (! intel_info (fd, >t_size))
2876 device = malloc (sizeof (i915_device_t));
2878 return (cairo_drm_device_t *) _cairo_device_create_in_error (CAIRO_STATUS_NO_MEMORY);
2880 status = intel_device_init (&device->intel, fd);
2881 if (unlikely (status)) {
2883 return (cairo_drm_device_t *) _cairo_device_create_in_error (status);
2887 if (getenv ("CAIRO_DEBUG_DRM") != NULL)
2888 device->debug = I915_DEBUG_SYNC;
2890 n = intel_get (fd, I915_PARAM_NUM_FENCES_AVAIL);
2893 device->batch.fences_avail = n - 2; /* conservative */
2895 device->batch.gtt_avail_size = device->intel.gtt_avail_size / 4;
2896 device->batch.est_gtt_size = I915_BATCH_SIZE;
2897 device->batch.total_gtt_size = I915_BATCH_SIZE;
2898 device->batch.exec_count = 0;
2899 device->batch.reloc_count = 0;
2900 device->batch.used = 0;
2901 device->batch.fences = 0;
2903 memcpy (device->batch_header, i915_batch_setup, sizeof (i915_batch_setup));
2905 device->vbo_offset = 0;
2906 device->vbo_used = 0;
2907 device->vertex_index = 0;
2908 device->vertex_count = 0;
2909 device->last_vbo = NULL;
2911 for (n = 0; n < ARRAY_LENGTH (device->image_caches); n++)
2912 cairo_list_init (&device->image_caches[n]);
2914 device->intel.base.surface.create = i915_surface_create;
2915 device->intel.base.surface.create_for_name = i915_surface_create_for_name;
2916 device->intel.base.surface.create_from_cacheable_image = i915_surface_create_from_cacheable_image;
2918 device->intel.base.surface.flink = _cairo_drm_surface_flink;
2919 device->intel.base.surface.enable_scan_out = i915_surface_enable_scan_out;
2920 device->intel.base.surface.map_to_image = intel_surface_map_to_image;
2922 device->intel.base.device.flush = i915_device_flush;
2923 device->intel.base.device.throttle = i915_device_throttle;
2924 device->intel.base.device.destroy = i915_device_destroy;
2926 device->floats_per_vertex = 0;
2927 device->current_source = NULL;
2928 device->current_mask = NULL;
2929 device->current_clip = NULL;
2931 i915_device_reset (device);
2933 return _cairo_drm_device_init (&device->intel.base,
2934 fd, dev_id, vendor_id, chip_id,