1 /* Cairo - a vector graphics library with display and print output
3 * Copyright © 2009 Chris Wilson
5 * This library is free software; you can redistribute it and/or
6 * modify it either under the terms of the GNU Lesser General Public
7 * License version 2.1 as published by the Free Software Foundation
8 * (the "LGPL") or, at your option, under the terms of the Mozilla
9 * Public License Version 1.1 (the "MPL"). If you do not alter this
10 * notice, a recipient may use your version of this file under either
11 * the MPL or the LGPL.
13 * You should have received a copy of the LGPL along with this library
14 * in the file COPYING-LGPL-2.1; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA
16 * You should have received a copy of the MPL along with this library
17 * in the file COPYING-MPL-1.1
19 * The contents of this file are subject to the Mozilla Public License
20 * Version 1.1 (the "License"); you may not use this file except in
21 * compliance with the License. You may obtain a copy of the License at
22 * http://www.mozilla.org/MPL/
24 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
25 * OF ANY KIND, either express or implied. See the LGPL or the MPL for
26 * the specific language governing rights and limitations.
32 #include "cairo-drm-private.h"
33 #include "cairo-drm-ioctl-private.h"
34 #include "cairo-drm-intel-private.h"
35 #include "cairo-drm-intel-ioctl-private.h"
37 #include "cairo-error-private.h"
38 #include "cairo-freelist-private.h"
40 #include <sys/ioctl.h>
44 #define GLYPH_CACHE_WIDTH 1024
45 #define GLYPH_CACHE_HEIGHT 1024
46 #define GLYPH_CACHE_MIN_SIZE 1
47 #define GLYPH_CACHE_MAX_SIZE 128
49 #define IMAGE_CACHE_WIDTH 1024
50 #define IMAGE_CACHE_HEIGHT 1024
53 intel_get (int fd, int param)
55 struct intel_getparam gp;
60 if (ioctl (fd, DRM_IOCTL_I915_GETPARAM, &gp) < 0)
63 VG (VALGRIND_MAKE_MEM_DEFINED (&value, sizeof (value)));
69 intel_info (int fd, uint64_t *gtt_size)
71 struct drm_i915_gem_get_aperture info;
73 if (! intel_get (fd, I915_PARAM_HAS_GEM))
76 if (! intel_get (fd, I915_PARAM_HAS_EXECBUF2))
79 if (ioctl (fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &info) < 0)
82 VG (VALGRIND_MAKE_MEM_DEFINED (&info, sizeof (info)));
85 *gtt_size = info.aper_size;
91 intel_bo_write (const intel_device_t *device,
97 struct drm_i915_gem_pwrite pwrite;
100 assert (bo->tiling == I915_TILING_NONE);
102 assert (offset < bo->base.size);
103 assert (size+offset <= bo->base.size);
105 intel_bo_set_tiling (device, bo);
107 assert (bo->_tiling == I915_TILING_NONE);
109 memset (&pwrite, 0, sizeof (pwrite));
110 pwrite.handle = bo->base.handle;
111 pwrite.offset = offset;
113 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
115 ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
116 } while (ret == -1 && errno == EINTR);
123 intel_bo_read (const intel_device_t *device,
125 unsigned long offset,
129 struct drm_i915_gem_pread pread;
132 assert (bo->tiling == I915_TILING_NONE);
134 assert (offset < bo->base.size);
135 assert (size+offset <= bo->base.size);
137 intel_bo_set_tiling (device, bo);
139 assert (bo->_tiling == I915_TILING_NONE);
141 memset (&pread, 0, sizeof (pread));
142 pread.handle = bo->base.handle;
143 pread.offset = offset;
145 pread.data_ptr = (uint64_t) (uintptr_t) data;
147 ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
148 } while (ret == -1 && errno == EINTR);
156 intel_bo_map (const intel_device_t *device, intel_bo_t *bo)
158 struct drm_i915_gem_set_domain set_domain;
162 intel_bo_set_tiling (device, bo);
164 if (bo->virtual != NULL)
167 if (bo->cpu && bo->tiling == I915_TILING_NONE) {
168 struct drm_i915_gem_mmap mmap_arg;
170 mmap_arg.handle = bo->base.handle;
172 mmap_arg.size = bo->base.size;
173 mmap_arg.addr_ptr = 0;
176 ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
177 } while (ret == -1 && errno == EINTR);
178 if (unlikely (ret != 0)) {
179 _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
183 bo->virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
184 domain = I915_GEM_DOMAIN_CPU;
186 struct drm_i915_gem_mmap_gtt mmap_arg;
189 /* Get the fake offset back... */
190 mmap_arg.handle = bo->base.handle;
192 ret = ioctl (device->base.fd,
193 DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
194 } while (ret == -1 && errno == EINTR);
195 if (unlikely (ret != 0)) {
196 _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
201 ptr = mmap (0, bo->base.size, PROT_READ | PROT_WRITE,
202 MAP_SHARED, device->base.fd,
204 if (unlikely (ptr == MAP_FAILED)) {
205 _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
210 domain = I915_GEM_DOMAIN_GTT;
213 VG (VALGRIND_MAKE_MEM_DEFINED (bo->virtual, bo->base.size));
215 set_domain.handle = bo->base.handle;
216 set_domain.read_domains = domain;
217 set_domain.write_domain = domain;
220 ret = ioctl (device->base.fd,
221 DRM_IOCTL_I915_GEM_SET_DOMAIN,
223 } while (ret == -1 && errno == EINTR);
227 _cairo_error_throw (CAIRO_STATUS_DEVICE_ERROR);
236 intel_bo_unmap (intel_bo_t *bo)
238 munmap (bo->virtual, bo->base.size);
243 intel_bo_is_inactive (const intel_device_t *device, intel_bo_t *bo)
245 struct drm_i915_gem_busy busy;
250 /* Is this buffer busy for our intended usage pattern? */
251 busy.handle = bo->base.handle;
253 ioctl (device->base.fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
255 bo->busy = busy.busy;
260 intel_bo_wait (const intel_device_t *device, const intel_bo_t *bo)
262 struct drm_i915_gem_set_domain set_domain;
265 set_domain.handle = bo->base.handle;
266 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
267 set_domain.write_domain = 0;
270 ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
271 } while (ret == -1 && errno == EINTR);
290 intel_bo_madvise (intel_device_t *device,
294 struct drm_i915_gem_madvise madv;
296 madv.handle = bo->base.handle;
298 madv.retained = TRUE;
299 ioctl (device->base.fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
300 return madv.retained;
304 intel_bo_set_real_size (intel_device_t *device,
308 struct drm_i915_gem_real_size arg;
313 if (size == bo->base.size)
316 arg.handle = bo->base.handle;
319 ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_REAL_SIZE, &arg);
320 } while (ret == -1 && errno == EINTR);
323 if (size > bo->base.size) {
324 assert (bo->exec == NULL);
329 bo->base.size = size;
334 intel_bo_create (intel_device_t *device,
337 cairo_bool_t gpu_target,
343 struct drm_i915_gem_create create;
347 max_size = (max_size + 4095) & -4096;
348 real_size = (real_size + 4095) & -4096;
349 cache_size = pot (max_size);
350 bucket = ffs (cache_size / 4096) - 1;
351 if (bucket >= INTEL_BO_CACHE_BUCKETS)
352 cache_size = max_size;
355 intel_bo_t *first = NULL;
357 cairo_list_foreach_entry (bo, intel_bo_t,
358 &device->bo_in_flight,
361 assert (bo->exec != NULL);
362 if (tiling && bo->_tiling &&
363 (bo->_tiling != tiling || bo->_stride != stride))
368 if (real_size <= bo->base.size) {
369 if (real_size >= bo->base.size/2) {
370 cairo_list_del (&bo->cache_list);
371 bo = intel_bo_reference (bo);
381 cairo_list_del (&first->cache_list);
382 bo = intel_bo_reference (first);
387 /* no cached buffer available, allocate fresh */
388 bo = _cairo_freepool_alloc (&device->bo_pool);
389 if (unlikely (bo == NULL)) {
390 _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
394 cairo_list_init (&bo->cache_list);
402 bo->_tiling = I915_TILING_NONE;
411 bo->batch_read_domains = 0;
412 bo->batch_write_domain = 0;
413 cairo_list_init (&bo->link);
415 create.size = cache_size;
417 ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_CREATE, &create);
418 if (unlikely (ret != 0)) {
419 _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
420 _cairo_freepool_free (&device->bo_pool, bo);
424 bo->base.handle = create.handle;
425 bo->full_size = bo->base.size = create.size;
427 intel_bo_set_real_size (device, bo, real_size);
428 CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
436 intel_bo_create_for_name (intel_device_t *device, uint32_t name)
438 struct drm_i915_gem_get_tiling get_tiling;
439 cairo_status_t status;
443 bo = _cairo_freepool_alloc (&device->bo_pool);
444 if (unlikely (bo == NULL)) {
445 _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
449 status = _cairo_drm_bo_open_for_name (&device->base, &bo->base, name);
450 if (unlikely (status))
453 CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
454 cairo_list_init (&bo->cache_list);
456 bo->full_size = bo->base.size;
467 bo->batch_read_domains = 0;
468 bo->batch_write_domain = 0;
469 cairo_list_init (&bo->link);
471 memset (&get_tiling, 0, sizeof (get_tiling));
472 get_tiling.handle = bo->base.handle;
474 ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
475 if (unlikely (ret != 0)) {
476 _cairo_error_throw (CAIRO_STATUS_DEVICE_ERROR);
477 _cairo_drm_bo_close (&device->base, &bo->base);
481 bo->_tiling = bo->tiling = get_tiling.tiling_mode;
482 // bo->stride = get_tiling.stride; /* XXX not available from get_tiling */
487 _cairo_freepool_free (&device->bo_pool, bo);
492 intel_bo_release (void *_dev, void *_bo)
494 intel_device_t *device = _dev;
495 intel_bo_t *bo = _bo;
497 if (bo->virtual != NULL)
500 assert (bo->exec == NULL);
501 assert (cairo_list_is_empty (&bo->cache_list));
503 _cairo_drm_bo_close (&device->base, &bo->base);
504 _cairo_freepool_free (&device->bo_pool, bo);
508 intel_bo_set_tiling (const intel_device_t *device,
511 struct drm_i915_gem_set_tiling set_tiling;
514 if (bo->tiling == bo->_tiling &&
515 (bo->tiling == I915_TILING_NONE || bo->stride == bo->_stride))
519 set_tiling.handle = bo->base.handle;
520 set_tiling.tiling_mode = bo->tiling;
521 set_tiling.stride = bo->stride;
523 ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
524 } while (ret == -1 && errno == EINTR);
527 bo->_tiling = bo->tiling;
528 bo->_stride = bo->stride;
532 intel_bo_get_image (const intel_device_t *device,
534 const cairo_drm_surface_t *surface)
536 cairo_image_surface_t *image;
540 image = (cairo_image_surface_t *)
541 cairo_image_surface_create (surface->format,
544 if (unlikely (image->base.status))
547 intel_bo_set_tiling (device, bo);
549 if (bo->tiling == I915_TILING_NONE && image->stride == surface->stride) {
550 size = surface->stride * surface->height;
551 intel_bo_read (device, bo, 0, size, image->data);
555 src = intel_bo_map (device, bo);
556 if (unlikely (src == NULL))
557 return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
559 size = surface->width;
560 if (surface->format != CAIRO_FORMAT_A8)
563 row = surface->height;
566 memcpy (dst, src, size);
567 dst += image->stride;
568 src += surface->stride;
575 static cairo_status_t
576 _intel_bo_put_a1_image (intel_device_t *device,
578 cairo_image_surface_t *src,
579 int src_x, int src_y,
580 int width, int height,
581 int dst_x, int dst_y)
583 uint8_t buf[CAIRO_STACK_BUFFER_SIZE];
588 data = src->data + src_y * src->stride;
590 if (bo->tiling == I915_TILING_NONE && width == bo->stride) {
594 size = bo->stride * height;
595 if (size > (int) sizeof (buf)) {
596 a8 = _cairo_malloc_ab (bo->stride, height);
598 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
603 for (x = 0; x < width; x++) {
607 p[x] = data[byte] & (1 << bit) ? 0xff : 0x00;
614 intel_bo_write (device, bo,
615 dst_y * bo->stride + dst_x, /* XXX bo_offset */
620 if (width > (int) sizeof (buf)) {
623 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
626 dst = intel_bo_map (device, bo);
630 return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
633 dst += dst_y * bo->stride + dst_x; /* XXX bo_offset */
635 for (x = 0; x < width; x++) {
639 a8[x] = data[byte] & (1 << bit) ? 0xff : 0x00;
642 memcpy (dst, a8, width);
651 return CAIRO_STATUS_SUCCESS;
655 intel_bo_put_image (intel_device_t *device,
657 cairo_image_surface_t *src,
658 int src_x, int src_y,
659 int width, int height,
660 int dst_x, int dst_y)
666 intel_bo_set_tiling (device, bo);
668 offset = dst_y * bo->stride;
669 data = src->data + src_y * src->stride;
670 switch (src->format) {
671 case CAIRO_FORMAT_ARGB32:
672 case CAIRO_FORMAT_RGB24:
677 case CAIRO_FORMAT_RGB16_565:
682 case CAIRO_FORMAT_A8:
687 case CAIRO_FORMAT_A1:
688 return _intel_bo_put_a1_image (device, bo, src,
693 case CAIRO_FORMAT_INVALID:
694 return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
697 if (bo->tiling == I915_TILING_NONE && src->stride == bo->stride) {
698 intel_bo_write (device, bo, offset, bo->stride * height, data);
702 dst = intel_bo_map (device, bo);
703 if (unlikely (dst == NULL))
704 return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
708 memcpy (dst, data, size);
714 return CAIRO_STATUS_SUCCESS;
718 _intel_snapshot_cache_entry_can_remove (const void *closure)
724 _intel_snapshot_cache_entry_destroy (void *closure)
726 intel_surface_t *surface = cairo_container_of (closure,
728 snapshot_cache_entry);
730 surface->snapshot_cache_entry.hash = 0;
734 intel_device_init (intel_device_t *device, int fd)
736 struct drm_i915_gem_get_aperture aperture;
737 cairo_status_t status;
742 ret = ioctl (fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
744 return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
746 CAIRO_MUTEX_INIT (device->mutex);
748 device->gtt_max_size = aperture.aper_size;
749 device->gtt_avail_size = aperture.aper_available_size;
750 device->gtt_avail_size -= device->gtt_avail_size >> 5;
752 size = aperture.aper_size / 8;
753 device->snapshot_cache_max_size = size / 4;
754 status = _cairo_cache_init (&device->snapshot_cache,
756 _intel_snapshot_cache_entry_can_remove,
757 _intel_snapshot_cache_entry_destroy,
759 if (unlikely (status))
762 for (n = 0; n < ARRAY_LENGTH (device->glyph_cache); n++) {
763 device->glyph_cache[n].buffer.bo = NULL;
764 cairo_list_init (&device->glyph_cache[n].rtree.pinned);
766 cairo_list_init (&device->fonts);
768 device->gradient_cache.size = 0;
770 device->base.bo.release = intel_bo_release;
772 return CAIRO_STATUS_SUCCESS;
776 _intel_gradient_cache_fini (intel_device_t *device)
780 for (n = 0; n < device->gradient_cache.size; n++) {
781 _cairo_pattern_fini (&device->gradient_cache.cache[n].pattern.base);
782 if (device->gradient_cache.cache[n].buffer.bo != NULL)
783 cairo_drm_bo_destroy (&device->base.base,
784 &device->gradient_cache.cache[n].buffer.bo->base);
789 _intel_glyph_cache_fini (intel_device_t *device, intel_buffer_cache_t *cache)
791 if (cache->buffer.bo == NULL)
794 intel_bo_destroy (device, cache->buffer.bo);
795 _cairo_rtree_fini (&cache->rtree);
799 intel_device_fini (intel_device_t *device)
801 cairo_scaled_font_t *scaled_font, *next_scaled_font;
804 cairo_list_foreach_entry_safe (scaled_font,
810 _cairo_scaled_font_revoke_ownership (scaled_font);
813 for (n = 0; n < ARRAY_LENGTH (device->glyph_cache); n++)
814 _intel_glyph_cache_fini (device, &device->glyph_cache[n]);
816 _cairo_cache_fini (&device->snapshot_cache);
818 _intel_gradient_cache_fini (device);
819 _cairo_freepool_fini (&device->bo_pool);
821 _cairo_drm_device_fini (&device->base);
825 intel_throttle (intel_device_t *device)
827 ioctl (device->base.fd, DRM_IOCTL_I915_GEM_THROTTLE);
831 intel_glyph_cache_unpin (intel_device_t *device)
835 for (n = 0; n < ARRAY_LENGTH (device->glyph_cache); n++)
836 _cairo_rtree_unpin (&device->glyph_cache[n].rtree);
839 static cairo_status_t
840 intel_glyph_cache_add_glyph (intel_device_t *device,
841 intel_buffer_cache_t *cache,
842 cairo_scaled_glyph_t *scaled_glyph)
844 cairo_image_surface_t *glyph_surface = scaled_glyph->surface;
845 intel_glyph_t *glyph;
846 cairo_rtree_node_t *node = NULL;
848 cairo_status_t status;
852 width = glyph_surface->width;
853 if (width < GLYPH_CACHE_MIN_SIZE)
854 width = GLYPH_CACHE_MIN_SIZE;
855 height = glyph_surface->height;
856 if (height < GLYPH_CACHE_MIN_SIZE)
857 height = GLYPH_CACHE_MIN_SIZE;
859 /* search for an available slot */
860 status = _cairo_rtree_insert (&cache->rtree, width, height, &node);
861 /* search for an unpinned slot */
862 if (status == CAIRO_INT_STATUS_UNSUPPORTED) {
863 status = _cairo_rtree_evict_random (&cache->rtree, width, height, &node);
864 if (status == CAIRO_STATUS_SUCCESS)
865 status = _cairo_rtree_node_insert (&cache->rtree, node, width, height, &node);
867 if (unlikely (status))
870 /* XXX streaming upload? */
872 height = glyph_surface->height;
873 src = glyph_surface->data;
874 dst = cache->buffer.bo->virtual;
876 dst = intel_bo_map (device, cache->buffer.bo);
877 if (unlikely (dst == NULL))
878 return _cairo_error (CAIRO_STATUS_DEVICE_ERROR);
881 dst += node->y * cache->buffer.stride;
882 switch (glyph_surface->format) {
883 case CAIRO_FORMAT_A1: {
884 uint8_t buf[CAIRO_STACK_BUFFER_SIZE];
888 if (width > (int) sizeof (buf)) {
890 if (unlikely (a8 == NULL))
891 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
895 width = glyph_surface->width;
897 for (x = 0; x < width; x++)
898 a8[x] = src[x>>3] & (1 << (x&7)) ? 0xff : 0x00;
900 memcpy (dst, a8, width);
901 dst += cache->buffer.stride;
902 src += glyph_surface->stride;
910 case CAIRO_FORMAT_A8:
912 width = glyph_surface->width;
914 memcpy (dst, src, width);
915 dst += cache->buffer.stride;
916 src += glyph_surface->stride;
920 case CAIRO_FORMAT_ARGB32:
922 width = 4*glyph_surface->width;
924 memcpy (dst, src, width);
925 dst += cache->buffer.stride;
926 src += glyph_surface->stride;
930 case CAIRO_FORMAT_RGB16_565:
931 case CAIRO_FORMAT_RGB24:
932 case CAIRO_FORMAT_INVALID:
934 return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
937 scaled_glyph->surface_private = node;
939 glyph= (intel_glyph_t *) node;
940 glyph->node.owner = &scaled_glyph->surface_private;
941 glyph->cache = cache;
943 /* compute tex coords: bottom-right, bottom-left, top-left */
944 sf_x = 1. / cache->buffer.width;
945 sf_y = 1. / cache->buffer.height;
947 texcoord_2d_16 (sf_x * (node->x + glyph_surface->width),
948 sf_y * (node->y + glyph_surface->height));
950 texcoord_2d_16 (sf_x * node->x,
951 sf_y * (node->y + glyph_surface->height));
953 texcoord_2d_16 (sf_x * node->x,
956 glyph->width = glyph_surface->width;
957 glyph->height = glyph_surface->height;
959 return CAIRO_STATUS_SUCCESS;
963 intel_scaled_glyph_fini (cairo_scaled_glyph_t *scaled_glyph,
964 cairo_scaled_font_t *scaled_font)
966 intel_glyph_t *glyph;
968 glyph = scaled_glyph->surface_private;
970 /* XXX thread-safety? Probably ok due to the frozen scaled-font. */
971 glyph->node.owner = NULL;
972 if (! glyph->node.pinned)
973 _cairo_rtree_node_remove (&glyph->cache->rtree, &glyph->node);
978 intel_scaled_font_fini (cairo_scaled_font_t *scaled_font)
980 cairo_list_del (&scaled_font->link);
983 static cairo_status_t
984 intel_get_glyph_cache (intel_device_t *device,
985 cairo_format_t format,
986 intel_buffer_cache_t **out)
988 intel_buffer_cache_t *cache;
989 cairo_status_t status;
992 case CAIRO_FORMAT_ARGB32:
993 cache = &device->glyph_cache[0];
994 format = CAIRO_FORMAT_ARGB32;
996 case CAIRO_FORMAT_A8:
997 case CAIRO_FORMAT_A1:
998 cache = &device->glyph_cache[1];
999 format = CAIRO_FORMAT_A8;
1002 case CAIRO_FORMAT_RGB16_565:
1003 case CAIRO_FORMAT_RGB24:
1004 case CAIRO_FORMAT_INVALID:
1006 return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
1009 if (unlikely (cache->buffer.bo == NULL)) {
1010 status = intel_buffer_cache_init (cache, device, format,
1011 INTEL_GLYPH_CACHE_WIDTH,
1012 INTEL_GLYPH_CACHE_HEIGHT);
1013 if (unlikely (status))
1016 _cairo_rtree_init (&cache->rtree,
1017 INTEL_GLYPH_CACHE_WIDTH,
1018 INTEL_GLYPH_CACHE_HEIGHT,
1019 0, sizeof (intel_glyph_t));
1023 return CAIRO_STATUS_SUCCESS;
1027 intel_get_glyph (intel_device_t *device,
1028 cairo_scaled_font_t *scaled_font,
1029 cairo_scaled_glyph_t *scaled_glyph)
1031 cairo_bool_t own_surface = FALSE;
1032 intel_buffer_cache_t *cache;
1033 cairo_status_t status;
1035 if (scaled_glyph->surface == NULL) {
1037 scaled_font->backend->scaled_glyph_init (scaled_font,
1039 CAIRO_SCALED_GLYPH_INFO_SURFACE);
1040 if (unlikely (status))
1043 if (unlikely (scaled_glyph->surface == NULL))
1044 return CAIRO_INT_STATUS_UNSUPPORTED;
1049 if (unlikely (scaled_glyph->surface->width == 0 ||
1050 scaled_glyph->surface->height == 0))
1052 return CAIRO_INT_STATUS_NOTHING_TO_DO;
1055 if (unlikely (scaled_glyph->surface->width > GLYPH_CACHE_MAX_SIZE ||
1056 scaled_glyph->surface->height > GLYPH_CACHE_MAX_SIZE))
1058 return CAIRO_INT_STATUS_UNSUPPORTED;
1061 status = intel_get_glyph_cache (device,
1062 scaled_glyph->surface->format,
1064 if (unlikely (status))
1067 status = intel_glyph_cache_add_glyph (device, cache, scaled_glyph);
1068 if (unlikely (_cairo_status_is_error (status)))
1071 if (unlikely (status == CAIRO_INT_STATUS_UNSUPPORTED)) {
1072 /* no room, replace entire cache */
1074 assert (cache->buffer.bo->exec != NULL);
1076 _cairo_rtree_reset (&cache->rtree);
1077 intel_bo_destroy (device, cache->buffer.bo);
1078 cache->buffer.bo = NULL;
1080 status = intel_buffer_cache_init (cache, device,
1081 scaled_glyph->surface->format,
1083 GLYPH_CACHE_HEIGHT);
1084 if (unlikely (status))
1087 status = intel_glyph_cache_add_glyph (device, cache, scaled_glyph);
1088 if (unlikely (status))
1093 /* and release the copy of the image from system memory */
1094 cairo_surface_destroy (&scaled_glyph->surface->base);
1095 scaled_glyph->surface = NULL;
1098 return CAIRO_STATUS_SUCCESS;
1102 intel_buffer_cache_init (intel_buffer_cache_t *cache,
1103 intel_device_t *device,
1104 cairo_format_t format,
1105 int width, int height)
1107 const uint32_t tiling = I915_TILING_Y;
1108 uint32_t stride, size;
1110 assert ((width & 3) == 0);
1111 assert ((height & 1) == 0);
1112 cache->buffer.format = format;
1113 cache->buffer.width = width;
1114 cache->buffer.height = height;
1118 case CAIRO_FORMAT_A1:
1119 case CAIRO_FORMAT_RGB16_565:
1120 case CAIRO_FORMAT_RGB24:
1121 case CAIRO_FORMAT_INVALID:
1123 return _cairo_error (CAIRO_STATUS_INVALID_FORMAT);
1124 case CAIRO_FORMAT_ARGB32:
1125 cache->buffer.map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
1128 case CAIRO_FORMAT_A8:
1129 cache->buffer.map0 = MAPSURF_8BIT | MT_8BIT_I8;
1134 size = height * stride;
1135 cache->buffer.bo = intel_bo_create (device,
1137 FALSE, tiling, stride);
1138 if (unlikely (cache->buffer.bo == NULL))
1139 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1141 cache->buffer.stride = stride;
1143 cache->buffer.offset = 0;
1144 cache->buffer.map0 |= MS3_tiling (tiling);
1145 cache->buffer.map0 |= ((height - 1) << MS3_HEIGHT_SHIFT) |
1146 ((width - 1) << MS3_WIDTH_SHIFT);
1147 cache->buffer.map1 = ((stride / 4) - 1) << MS4_PITCH_SHIFT;
1149 cache->ref_count = 0;
1150 cairo_list_init (&cache->link);
1152 return CAIRO_STATUS_SUCCESS;
1156 intel_snapshot_cache_insert (intel_device_t *device,
1157 intel_surface_t *surface)
1159 cairo_status_t status;
1161 surface->snapshot_cache_entry.size = surface->drm.bo->size;
1162 if (surface->snapshot_cache_entry.size >
1163 device->snapshot_cache_max_size)
1165 return CAIRO_STATUS_SUCCESS;
1168 if (device->snapshot_cache.freeze_count == 0)
1169 _cairo_cache_freeze (&device->snapshot_cache);
1171 surface->snapshot_cache_entry.hash = (unsigned long) surface;
1172 status = _cairo_cache_insert (&device->snapshot_cache,
1173 &surface->snapshot_cache_entry);
1174 if (unlikely (status)) {
1175 surface->snapshot_cache_entry.hash = 0;
1179 return CAIRO_STATUS_SUCCESS;
1183 intel_surface_detach_snapshot (cairo_surface_t *abstract_surface)
1185 intel_surface_t *surface = (intel_surface_t *) abstract_surface;
1187 if (surface->snapshot_cache_entry.hash) {
1188 intel_device_t *device;
1190 device = (intel_device_t *) surface->drm.base.device;
1191 _cairo_cache_remove (&device->snapshot_cache,
1192 &surface->snapshot_cache_entry);
1193 assert (surface->snapshot_cache_entry.hash == 0);
1198 intel_snapshot_cache_thaw (intel_device_t *device)
1200 if (device->snapshot_cache.freeze_count)
1201 _cairo_cache_thaw (&device->snapshot_cache);
1205 _gradient_color_stops_equal (const cairo_gradient_pattern_t *a,
1206 const cairo_gradient_pattern_t *b)
1210 if (a->n_stops != b->n_stops)
1213 for (n = 0; n < a->n_stops; n++) {
1214 if (_cairo_fixed_from_double (a->stops[n].offset) !=
1215 _cairo_fixed_from_double (b->stops[n].offset))
1220 if (! _cairo_color_stop_equal (&a->stops[n].color, &b->stops[n].color))
1228 hars_petruska_f54_1_random (void)
1230 #define rol(x,k) ((x << k) | (x >> (32-k)))
1232 return x = (x ^ rol (x, 5) ^ rol (x, 24)) + 0x37798849;
1237 intel_gradient_sample_width (const cairo_gradient_pattern_t *gradient)
1243 for (n = 1; n < gradient->n_stops; n++) {
1244 double dx = gradient->stops[n].offset - gradient->stops[n-1].offset;
1251 max = gradient->stops[n].color.red -
1252 gradient->stops[n-1].color.red;
1254 delta = gradient->stops[n].color.green -
1255 gradient->stops[n-1].color.green;
1259 delta = gradient->stops[n].color.blue -
1260 gradient->stops[n-1].color.blue;
1264 delta = gradient->stops[n].color.alpha -
1265 gradient->stops[n-1].color.alpha;
1269 ramp = 128 * max / dx;
1274 width = (width + 7) & -8;
1275 return MIN (width, 1024);
1279 intel_gradient_render (intel_device_t *device,
1280 const cairo_gradient_pattern_t *pattern,
1281 intel_buffer_t *buffer)
1283 pixman_image_t *gradient, *image;
1284 pixman_gradient_stop_t pixman_stops_stack[32];
1285 pixman_gradient_stop_t *pixman_stops;
1286 pixman_point_fixed_t p1, p2;
1289 cairo_status_t status;
1291 for (i = 0; i < device->gradient_cache.size; i++) {
1292 if (_gradient_color_stops_equal (pattern,
1293 &device->gradient_cache.cache[i].pattern.gradient.base)) {
1294 *buffer = device->gradient_cache.cache[i].buffer;
1295 return CAIRO_STATUS_SUCCESS;
1299 pixman_stops = pixman_stops_stack;
1300 if (unlikely (pattern->n_stops > ARRAY_LENGTH (pixman_stops_stack))) {
1301 pixman_stops = _cairo_malloc_ab (pattern->n_stops,
1302 sizeof (pixman_gradient_stop_t));
1303 if (unlikely (pixman_stops == NULL))
1304 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1307 for (i = 0; i < pattern->n_stops; i++) {
1308 pixman_stops[i].x = _cairo_fixed_16_16_from_double (pattern->stops[i].offset);
1309 pixman_stops[i].color.red = pattern->stops[i].color.red_short;
1310 pixman_stops[i].color.green = pattern->stops[i].color.green_short;
1311 pixman_stops[i].color.blue = pattern->stops[i].color.blue_short;
1312 pixman_stops[i].color.alpha = pattern->stops[i].color.alpha_short;
1315 width = intel_gradient_sample_width (pattern);
1322 gradient = pixman_image_create_linear_gradient (&p1, &p2,
1325 if (pixman_stops != pixman_stops_stack)
1326 free (pixman_stops);
1328 if (unlikely (gradient == NULL))
1329 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1331 pixman_image_set_filter (gradient, PIXMAN_FILTER_BILINEAR, NULL, 0);
1332 pixman_image_set_repeat (gradient, PIXMAN_REPEAT_PAD);
1334 image = pixman_image_create_bits (PIXMAN_a8r8g8b8, width, 1, NULL, 0);
1335 if (unlikely (image == NULL)) {
1336 pixman_image_unref (gradient);
1337 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1340 pixman_image_composite32 (PIXMAN_OP_SRC,
1341 gradient, NULL, image,
1347 pixman_image_unref (gradient);
1349 buffer->bo = intel_bo_create (device,
1351 FALSE, I915_TILING_NONE, 4*width);
1352 if (unlikely (buffer->bo == NULL)) {
1353 pixman_image_unref (image);
1354 return _cairo_error (CAIRO_STATUS_NO_MEMORY);
1357 intel_bo_write (device, buffer->bo, 0, 4*width, pixman_image_get_data (image));
1358 pixman_image_unref (image);
1361 buffer->width = width;
1363 buffer->stride = 4*width;
1364 buffer->format = CAIRO_FORMAT_ARGB32;
1365 buffer->map0 = MAPSURF_32BIT | MT_32BIT_ARGB8888;
1366 buffer->map0 |= ((width - 1) << MS3_WIDTH_SHIFT);
1367 buffer->map1 = (width - 1) << MS4_PITCH_SHIFT;
1369 if (device->gradient_cache.size < GRADIENT_CACHE_SIZE) {
1370 i = device->gradient_cache.size++;
1372 i = hars_petruska_f54_1_random () % GRADIENT_CACHE_SIZE;
1373 _cairo_pattern_fini (&device->gradient_cache.cache[i].pattern.base);
1374 intel_bo_destroy (device, device->gradient_cache.cache[i].buffer.bo);
1377 status = _cairo_pattern_init_copy (&device->gradient_cache.cache[i].pattern.base,
1379 if (unlikely (status)) {
1380 intel_bo_destroy (device, buffer->bo);
1381 /* Ensure the cache is correctly initialised for i965_device_destroy */
1382 _cairo_pattern_init_solid (&device->gradient_cache.cache[i].pattern.solid,
1383 CAIRO_COLOR_TRANSPARENT);
1387 device->gradient_cache.cache[i].buffer = *buffer;
1388 return CAIRO_STATUS_SUCCESS;