EXYNOS_SUBDIR = exynos
endif
-if HAVE_SLP
-SLP_SUBDIR = slp
-endif
-
-SUBDIRS = . $(LIBKMS_SUBDIR) $(INTEL_SUBDIR) $(NOUVEAU_SUBDIR) $(RADEON_SUBDIR) $(OMAP_SUBDIR) $(EXYNOS_SUBDIR) $(SLP_SUBDIR) tests include
+SUBDIRS = . $(LIBKMS_SUBDIR) $(INTEL_SUBDIR) $(NOUVEAU_SUBDIR) $(RADEON_SUBDIR) $(OMAP_SUBDIR) $(EXYNOS_SUBDIR) tests include
libdrm_la_LTLIBRARIES = libdrm.la
libdrm_ladir = $(libdir)
AC_PREREQ([2.63])
AC_INIT([libdrm],
- [2.4.37],
+ [2.4.39],
[https://bugs.freedesktop.org/enter_bug.cgi?product=DRI],
[libdrm])
[Enable support for EXYNOS's experimental API (default: disabled)]),
[EXYNOS=$enableval], [EXYNOS=no])
-AC_ARG_ENABLE(slp,
- AS_HELP_STRING([--disable-slp],
- [Enable support for slp's API (default: auto)]),
- [SLP=$enableval], [SLP=auto])
-
-AC_ARG_VAR([bufmgr_dir], [Directory of slp-bufmgr])
-
-if test "x$bufmgr_dir" = xyes; then
- AC_DEFINE_UNQUOTED(BUFMGR_DIR, "$bufmgr_dir", [Directory for the modules of slp_bufmgr])
-else
- AC_DEFINE(BUFMGR_DIR, "/usr/lib/bufmgr", [Directory for the modules of slp_bufmgr])
-fi
-
dnl ===========================================================================
dnl check compiler flags
AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
-Wpointer-arith -Wwrite-strings -Wstrict-prototypes \
-Wmissing-prototypes -Wmissing-declarations -Wnested-externs \
-Wpacked -Wswitch-enum -Wmissing-format-attribute \
--Wstrict-aliasing=2 -Winit-self -Wunsafe-loop-optimizations \
+-Wstrict-aliasing=2 -Winit-self \
-Wdeclaration-after-statement -Wold-style-definition \
-Wno-missing-field-initializers -Wno-unused-parameter \
-Wno-attributes -Wno-long-long -Winline"
fi
AM_CONDITIONAL(HAVE_LIBUDEV, [test "x$HAVE_LIBUDEV" = xyes])
-if test "x$INTEL" != "xno" -o "x$RADEON" != "xno" -o "x$NOUVEAU" != "xno" -o "x$SLP" != "xno"; then
+if test "x$INTEL" != "xno" -o "x$RADEON" != "xno" -o "x$NOUVEAU" != "xno" -o "x$OMAP" != "xno"; then
# Check for atomic intrinsics
AC_CACHE_CHECK([for native atomic primitives], drm_cv_atomic_primitives,
[
fi
fi
-if test "x$SLP" != "xno"; then
- AC_DEFINE(HAVE_SLP, 1, [Have slp])
-fi
-
if test "x$INTEL" != "xno"; then
PKG_CHECK_MODULES(PCIACCESS, [pciaccess >= 0.10])
fi
AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings])
fi
-AM_CONDITIONAL(HAVE_SLP, [test "x$SLP" != "xno"])
AM_CONDITIONAL(HAVE_INTEL, [test "x$INTEL" != "xno"])
AM_CONDITIONAL(HAVE_RADEON, [test "x$RADEON" != "xno"])
AM_CONDITIONAL(HAVE_NOUVEAU, [test "x$NOUVEAU" != "xno"])
Makefile
libkms/Makefile
libkms/libkms.pc
- slp/Makefile
- slp/libdrm_slp.pc
intel/Makefile
intel/libdrm_intel.pc
radeon/Makefile
echo " Nouveau API $NOUVEAU"
echo " OMAP API $OMAP"
echo " EXYNOS API $EXYNOS"
-echo " SLP API $SLP"
-echo " SLP bufmgr_dir $bufmgr_dir"
echo ""
* A structure for mapping buffer.
*
* @handle: a handle to gem object created.
+ * @pad: just padding to be 64-bit aligned.
* @size: memory size to be mapped.
* @mapped: having user virtual address mmaped.
* - this variable would be filled by exynos gem module
*/
struct drm_exynos_gem_mmap {
unsigned int handle;
- unsigned int size;
+ unsigned int pad;
+ uint64_t size;
uint64_t mapped;
};
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
+#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
+#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
+
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
#define DRM_IOCTL_SG_ALLOC DRM_IOWR(0x38, struct drm_scatter_gather)
#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
-#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
-#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
-
#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
#define DRM_CAP_DUMB_BUFFER 0x1
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+#define DRM_CAP_PRIME 0x5
+
+#define DRM_PRIME_CAP_IMPORT 0x1
+#define DRM_PRIME_CAP_EXPORT 0x2
/* typedef area */
typedef struct drm_clip_rect drm_clip_rect_t;
#define DRM_I915_GEM_WAIT 0x2c
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
+#define DRM_I915_GEM_SET_CACHEING 0x2f
+#define DRM_I915_GEM_GET_CACHEING 0x30
+#define DRM_I915_REG_READ 0x31
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_SET_CACHEING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHEING, struct drm_i915_gem_cacheing)
+#define DRM_IOCTL_I915_GEM_GET_CACHEING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHEING, struct drm_i915_gem_cacheing)
#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
+#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
/** Handle of the buffer to check for busy */
__u32 handle;
- /** Return busy status (1 if busy, 0 if idle) */
+ /** Return busy status (1 if busy, 0 if idle).
+ * The high word is used to indicate on which rings the object
+ * currently resides:
+ * 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+ */
__u32 busy;
};
+#define I915_CACHEING_NONE 0
+#define I915_CACHEING_CACHED 1
+
+struct drm_i915_gem_cacheing {
+ /**
+ * Handle of the buffer to set/get the cacheing level of. */
+ __u32 handle;
+
+ /**
+ * Cacheing level to apply or return value
+ *
+ * bits0-15 are for generic cacheing control (i.e. the above defined
+ * values). bits16-31 are reserved for platform-specific variations
+ * (e.g. l3$ caching on gen7). */
+ __u32 cacheing;
+};
+
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
__u32 pad;
};
+struct drm_i915_reg_read {
+ __u64 offset;
+ __u64 val; /* Return value */
+};
#endif /* _I915_DRM_H_ */
$(BATCHES:.batch=.batch-ref.txt) \
tests/test-batch.sh
-test_decode_LDADD = libdrm_intel.la
+test_decode_LDADD = libdrm_intel.la ../libdrm.la
pkgconfig_DATA = libdrm_intel.pc
int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
int used, unsigned int flags);
+int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd);
+drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr,
+ int prime_fd, int size);
+
/* drm_intel_bufmgr_fake.c */
drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
unsigned long low_offset,
void drm_intel_decode_set_output_file(struct drm_intel_decode *ctx, FILE *out);
void drm_intel_decode(struct drm_intel_decode *ctx);
+int drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
+ uint32_t offset,
+ uint64_t *result);
/** @{ Compatibility defines to keep old code building despite the symbol rename
* from dri_* to drm_intel_*
return 0;
}
+drm_intel_bo *
+drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+ int ret;
+ uint32_t handle;
+ drm_intel_bo_gem *bo_gem;
+ struct drm_i915_gem_get_tiling get_tiling;
+
+ ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
+ if (ret) {
+ fprintf(stderr,"ret is %d %d\n", ret, errno);
+ return NULL;
+ }
+
+ bo_gem = calloc(1, sizeof(*bo_gem));
+ if (!bo_gem)
+ return NULL;
+
+ bo_gem->bo.size = size;
+ bo_gem->bo.handle = handle;
+ bo_gem->bo.bufmgr = bufmgr;
+
+ bo_gem->gem_handle = handle;
+
+ atomic_set(&bo_gem->refcount, 1);
+
+ bo_gem->name = "prime";
+ bo_gem->validate_index = -1;
+ bo_gem->reloc_tree_fences = 0;
+ bo_gem->used_as_reloc_target = false;
+ bo_gem->has_error = false;
+ bo_gem->reusable = false;
+
+ DRMINITLISTHEAD(&bo_gem->name_list);
+ DRMINITLISTHEAD(&bo_gem->vma_list);
+
+ VG_CLEAR(get_tiling);
+ get_tiling.handle = bo_gem->gem_handle;
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_GET_TILING,
+ &get_tiling);
+ if (ret != 0) {
+ drm_intel_gem_bo_unreference(&bo_gem->bo);
+ return NULL;
+ }
+ bo_gem->tiling_mode = get_tiling.tiling_mode;
+ bo_gem->swizzle_mode = get_tiling.swizzle_mode;
+ /* XXX stride is unknown */
+ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
+
+ return &bo_gem->bo;
+}
+
+int
+drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ return drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle, DRM_CLOEXEC, prime_fd);
+}
+
static int
drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
{
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
struct drm_i915_gem_context_create create;
- drm_i915_getparam_t gp;
drm_intel_context *context = NULL;
- int tmp = 0, ret;
+ int ret;
+ VG_CLEAR(create);
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret != 0) {
- fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
- strerror(errno));
+ DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
+ strerror(errno));
return NULL;
}
if (ctx == NULL)
return;
+ VG_CLEAR(destroy);
+
bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
destroy.ctx_id = ctx->ctx_id;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
free(ctx);
}
+int
+drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
+ uint32_t offset,
+ uint64_t *result)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+ struct drm_i915_reg_read reg_read;
+ int ret;
+
+ VG_CLEAR(reg_read);
+ reg_read.offset = offset;
+
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
+
+ *result = reg_read.val;
+ return ret;
+}
+
/**
* Annotate the given bo for use in aub dumping.
else if (IS_GEN6(bufmgr_gem->pci_device))
bufmgr_gem->gen = 6;
else if (IS_GEN7(bufmgr_gem->pci_device))
- bufmgr_gem->gen = 7;
- else
- assert(0);
+ bufmgr_gem->gen = 7;
+ else {
+ free(bufmgr_gem);
+ return NULL;
+ }
if (IS_GEN3(bufmgr_gem->pci_device) &&
bufmgr_gem->gtt_size > 256*1024*1024) {
#define PCI_CHIP_HASWELL_GT1 0x0402 /* Desktop */
#define PCI_CHIP_HASWELL_GT2 0x0412
+#define PCI_CHIP_HASWELL_GT2_PLUS 0x0422
#define PCI_CHIP_HASWELL_M_GT1 0x0406 /* Mobile */
#define PCI_CHIP_HASWELL_M_GT2 0x0416
-#define PCI_CHIP_HASWELL_M_ULT_GT2 0x0A16 /* Mobile ULT */
+#define PCI_CHIP_HASWELL_M_GT2_PLUS 0x0426
+#define PCI_CHIP_HASWELL_S_GT1 0x040A /* Server */
+#define PCI_CHIP_HASWELL_S_GT2 0x041A
+#define PCI_CHIP_HASWELL_S_GT2_PLUS 0x042A
+#define PCI_CHIP_HASWELL_SDV_GT1 0x0C02 /* Desktop */
+#define PCI_CHIP_HASWELL_SDV_GT2 0x0C12
+#define PCI_CHIP_HASWELL_SDV_GT2_PLUS 0x0C22
+#define PCI_CHIP_HASWELL_SDV_M_GT1 0x0C06 /* Mobile */
+#define PCI_CHIP_HASWELL_SDV_M_GT2 0x0C16
+#define PCI_CHIP_HASWELL_SDV_M_GT2_PLUS 0x0C26
+#define PCI_CHIP_HASWELL_SDV_S_GT1 0x0C0A /* Server */
+#define PCI_CHIP_HASWELL_SDV_S_GT2 0x0C1A
+#define PCI_CHIP_HASWELL_SDV_S_GT2_PLUS 0x0C2A
+#define PCI_CHIP_HASWELL_ULT_GT1 0x0A02 /* Desktop */
+#define PCI_CHIP_HASWELL_ULT_GT2 0x0A12
+#define PCI_CHIP_HASWELL_ULT_GT2_PLUS 0x0A22
+#define PCI_CHIP_HASWELL_ULT_M_GT1 0x0A06 /* Mobile */
+#define PCI_CHIP_HASWELL_ULT_M_GT2 0x0A16
+#define PCI_CHIP_HASWELL_ULT_M_GT2_PLUS 0x0A26
+#define PCI_CHIP_HASWELL_ULT_S_GT1 0x0A0A /* Server */
+#define PCI_CHIP_HASWELL_ULT_S_GT2 0x0A1A
+#define PCI_CHIP_HASWELL_ULT_S_GT2_PLUS 0x0A2A
+#define PCI_CHIP_HASWELL_CRW_GT1 0x0D12 /* Desktop */
+#define PCI_CHIP_HASWELL_CRW_GT2 0x0D22
+#define PCI_CHIP_HASWELL_CRW_GT2_PLUS 0x0D32
+#define PCI_CHIP_HASWELL_CRW_M_GT1 0x0D16 /* Mobile */
+#define PCI_CHIP_HASWELL_CRW_M_GT2 0x0D26
+#define PCI_CHIP_HASWELL_CRW_M_GT2_PLUS 0x0D36
+#define PCI_CHIP_HASWELL_CRW_S_GT1 0x0D1A /* Server */
+#define PCI_CHIP_HASWELL_CRW_S_GT2 0x0D2A
+#define PCI_CHIP_HASWELL_CRW_S_GT2_PLUS 0x0D3A
#define IS_830(dev) (dev == 0x3577)
#define IS_845(dev) (dev == 0x2562)
dev == PCI_CHIP_IVYBRIDGE_S_GT2)
#define IS_HSW_GT1(devid) (devid == PCI_CHIP_HASWELL_GT1 || \
- devid == PCI_CHIP_HASWELL_M_GT1)
+ devid == PCI_CHIP_HASWELL_M_GT1 || \
+ devid == PCI_CHIP_HASWELL_S_GT1 || \
+ devid == PCI_CHIP_HASWELL_SDV_GT1 || \
+ devid == PCI_CHIP_HASWELL_SDV_M_GT1 || \
+ devid == PCI_CHIP_HASWELL_SDV_S_GT1 || \
+ devid == PCI_CHIP_HASWELL_ULT_GT1 || \
+ devid == PCI_CHIP_HASWELL_ULT_M_GT1 || \
+ devid == PCI_CHIP_HASWELL_ULT_S_GT1 || \
+ devid == PCI_CHIP_HASWELL_CRW_GT1 || \
+ devid == PCI_CHIP_HASWELL_CRW_M_GT1 || \
+ devid == PCI_CHIP_HASWELL_CRW_S_GT1)
#define IS_HSW_GT2(devid) (devid == PCI_CHIP_HASWELL_GT2 || \
devid == PCI_CHIP_HASWELL_M_GT2 || \
- devid == PCI_CHIP_HASWELL_M_ULT_GT2)
+ devid == PCI_CHIP_HASWELL_S_GT2 || \
+ devid == PCI_CHIP_HASWELL_SDV_GT2 || \
+ devid == PCI_CHIP_HASWELL_SDV_M_GT2 || \
+ devid == PCI_CHIP_HASWELL_SDV_S_GT2 || \
+ devid == PCI_CHIP_HASWELL_ULT_GT2 || \
+ devid == PCI_CHIP_HASWELL_ULT_M_GT2 || \
+ devid == PCI_CHIP_HASWELL_ULT_S_GT2 || \
+ devid == PCI_CHIP_HASWELL_CRW_GT2 || \
+ devid == PCI_CHIP_HASWELL_CRW_M_GT2 || \
+ devid == PCI_CHIP_HASWELL_CRW_S_GT2 || \
+ devid == PCI_CHIP_HASWELL_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_M_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_S_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_SDV_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_SDV_M_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_SDV_S_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_ULT_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_ULT_M_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_ULT_S_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_CRW_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_CRW_M_GT2_PLUS || \
+ devid == PCI_CHIP_HASWELL_CRW_S_GT2_PLUS)
#define IS_HASWELL(devid) (IS_HSW_GT1(devid) || \
IS_HSW_GT2(devid))
* IN THE SOFTWARE.
*/
+#define _GNU_SOURCE
+
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <errno.h>
#include <sys/mman.h>
+#include <fcntl.h>
#include <xf86drm.h>
#include <xf86atomic.h>
}
int
+nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
+ struct nouveau_bo **bo)
+{
+ int ret;
+ unsigned int handle;
+
+ ret = drmPrimeFDToHandle(dev->fd, prime_fd, &handle);
+ if (ret) {
+ nouveau_bo_ref(NULL, bo);
+ return ret;
+ }
+
+ ret = nouveau_bo_wrap(dev, handle, bo);
+ if (ret) {
+ nouveau_bo_ref(NULL, bo);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
+{
+ struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
+ int ret;
+
+ ret = drmPrimeHandleToFD(bo->device->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+int
nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
struct nouveau_client *client)
{
struct nouveau_client *);
int nouveau_bo_wait(struct nouveau_bo *, uint32_t access,
struct nouveau_client *);
+int nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
+ struct nouveau_bo **);
+int nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd);
struct nouveau_bufref {
struct nouveau_list thead;
#include <stdlib.h>
#include <linux/stddef.h>
+#include <linux/types.h>
#include <errno.h>
#include <sys/mman.h>
#include <fcntl.h>
+#include <unistd.h>
+#include <pthread.h>
#include <xf86drm.h>
+#include <xf86atomic.h>
#include "omap_drm.h"
#include "omap_drmif.h"
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#define PAGE_SIZE 4096
+static pthread_mutex_t table_lock = PTHREAD_MUTEX_INITIALIZER;
+static void * dev_table;
+
struct omap_device {
int fd;
+ atomic_t refcnt;
+
+ /* The handle_table is used to track GEM bo handles associated w/
+ * this fd. This is needed, in particular, when importing
+ * dmabuf's because we don't want multiple 'struct omap_bo's
+ * floating around with the same handle. Otherwise, when the
+ * first one is omap_bo_del()'d the handle becomes no longer
+ * valid, and the remaining 'struct omap_bo's are left pointing
+ * to an invalid handle (and possible a GEM bo that is already
+ * free'd).
+ */
+ void *handle_table;
};
/* a GEM buffer object allocated from the DRM device */
uint32_t name; /* flink global handle (DRI2 name) */
uint64_t offset; /* offset to mmap() */
int fd; /* dmabuf handle */
+ atomic_t refcnt;
};
-struct omap_device * omap_device_new(int fd)
+static struct omap_device * omap_device_new_impl(int fd)
{
struct omap_device *dev = calloc(sizeof(*dev), 1);
if (!dev)
return NULL;
dev->fd = fd;
+ atomic_set(&dev->refcnt, 1);
+ dev->handle_table = drmHashCreate();
+ return dev;
+}
+
+struct omap_device * omap_device_new(int fd)
+{
+ struct omap_device *dev = NULL;
+
+ pthread_mutex_lock(&table_lock);
+
+ if (!dev_table)
+ dev_table = drmHashCreate();
+
+ if (drmHashLookup(dev_table, fd, (void **)&dev)) {
+ /* not found, create new device */
+ dev = omap_device_new_impl(fd);
+ drmHashInsert(dev_table, fd, dev);
+ } else {
+ /* found, just incr refcnt */
+ dev = omap_device_ref(dev);
+ }
+
+ pthread_mutex_unlock(&table_lock);
+
+ return dev;
+}
+
+struct omap_device * omap_device_ref(struct omap_device *dev)
+{
+ atomic_inc(&dev->refcnt);
return dev;
}
void omap_device_del(struct omap_device *dev)
{
+ if (!atomic_dec_and_test(&dev->refcnt))
+ return;
+ pthread_mutex_lock(&table_lock);
+ drmHashDestroy(dev->handle_table);
+ drmHashDelete(dev_table, dev->fd);
+ pthread_mutex_unlock(&table_lock);
free(dev);
}
return drmCommandWrite(dev->fd, DRM_OMAP_SET_PARAM, &req, sizeof(req));
}
+/* lookup a buffer from it's handle, call w/ table_lock held: */
+static struct omap_bo * lookup_bo(struct omap_device *dev,
+ uint32_t handle)
+{
+ struct omap_bo *bo = NULL;
+ if (!drmHashLookup(dev->handle_table, handle, (void **)&bo)) {
+ /* found, incr refcnt and return: */
+ bo = omap_bo_ref(bo);
+ }
+ return bo;
+}
+
+/* allocate a new buffer object, call w/ table_lock held */
+static struct omap_bo * bo_from_handle(struct omap_device *dev,
+ uint32_t handle)
+{
+ struct omap_bo *bo = calloc(sizeof(*bo), 1);
+ if (!bo) {
+ struct drm_gem_close req = {
+ .handle = handle,
+ };
+ drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ return NULL;
+ }
+ bo->dev = omap_device_ref(dev);
+ bo->handle = handle;
+ atomic_set(&bo->refcnt, 1);
+ /* add ourselves to the handle table: */
+ drmHashInsert(dev->handle_table, handle, bo);
+ return bo;
+}
+
/* allocate a new buffer object */
static struct omap_bo * omap_bo_new_impl(struct omap_device *dev,
union omap_gem_size size, uint32_t flags)
goto fail;
}
- bo = calloc(sizeof(*bo), 1);
- if (!bo) {
+ if (drmCommandWriteRead(dev->fd, DRM_OMAP_GEM_NEW, &req, sizeof(req))) {
goto fail;
}
- bo->dev = dev;
+ pthread_mutex_lock(&table_lock);
+ bo = bo_from_handle(dev, req.handle);
+ pthread_mutex_unlock(&table_lock);
if (flags & OMAP_BO_TILED) {
bo->size = round_up(size.tiled.width, PAGE_SIZE) * size.tiled.height;
bo->size = size.bytes;
}
- if (drmCommandWriteRead(dev->fd, DRM_OMAP_GEM_NEW, &req, sizeof(req))) {
- goto fail;
- }
-
- bo->handle = req.handle;
-
return bo;
fail:
return omap_bo_new_impl(dev, gsize, flags);
}
+struct omap_bo * omap_bo_ref(struct omap_bo *bo)
+{
+ atomic_inc(&bo->refcnt);
+ return bo;
+}
+
/* get buffer info */
static int get_buffer_info(struct omap_bo *bo)
{
/* import a buffer object from DRI2 name */
struct omap_bo * omap_bo_from_name(struct omap_device *dev, uint32_t name)
{
- struct omap_bo *bo;
+ struct omap_bo *bo = NULL;
struct drm_gem_open req = {
.name = name,
};
- bo = calloc(sizeof(*bo), 1);
- if (!bo) {
+ pthread_mutex_lock(&table_lock);
+
+ if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
goto fail;
}
- if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) {
+ bo = lookup_bo(dev, req.handle);
+ if (!bo) {
+ bo = bo_from_handle(dev, req.handle);
+ bo->name = name;
+ }
+
+ pthread_mutex_unlock(&table_lock);
+
+ return bo;
+
+fail:
+ free(bo);
+ return NULL;
+}
+
+/* import a buffer from dmabuf fd, does not take ownership of the
+ * fd so caller should close() the fd when it is otherwise done
+ * with it (even if it is still using the 'struct omap_bo *')
+ */
+struct omap_bo * omap_bo_from_dmabuf(struct omap_device *dev, int fd)
+{
+ struct omap_bo *bo = NULL;
+ struct drm_prime_handle req = {
+ .fd = fd,
+ };
+ int ret;
+
+ pthread_mutex_lock(&table_lock);
+
+ ret = drmIoctl(dev->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &req);
+ if (ret) {
goto fail;
}
- bo->dev = dev;
- bo->name = name;
- bo->handle = req.handle;
+ bo = lookup_bo(dev, req.handle);
+ if (!bo) {
+ bo = bo_from_handle(dev, req.handle);
+ }
+
+ pthread_mutex_unlock(&table_lock);
return bo;
return;
}
+ if (!atomic_dec_and_test(&bo->refcnt))
+ return;
+
if (bo->map) {
munmap(bo->map, bo->size);
}
+ if (bo->fd) {
+ close(bo->fd);
+ }
+
if (bo->handle) {
struct drm_gem_close req = {
.handle = bo->handle,
};
-
+ pthread_mutex_lock(&table_lock);
+ drmHashDelete(bo->dev->handle_table, bo->handle);
drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req);
+ pthread_mutex_unlock(&table_lock);
}
+ omap_device_del(bo->dev);
+
free(bo);
}
return bo->handle;
}
+/* caller owns the dmabuf fd that is returned and is responsible
+ * to close() it when done
+ */
int omap_bo_dmabuf(struct omap_bo *bo)
{
if (!bo->fd) {
bo->fd = req.fd;
}
- return bo->fd;
+ return dup(bo->fd);
}
uint32_t omap_bo_size(struct omap_bo *bo)
#ifndef __OMAP_DRM_H__
#define __OMAP_DRM_H__
-#include "drm.h"
+#include <stdint.h>
+#include <drm.h>
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
struct omap_device * omap_device_new(int fd);
+struct omap_device * omap_device_ref(struct omap_device *dev);
void omap_device_del(struct omap_device *dev);
int omap_get_param(struct omap_device *dev, uint64_t param, uint64_t *value);
int omap_set_param(struct omap_device *dev, uint64_t param, uint64_t value);
uint32_t size, uint32_t flags);
struct omap_bo * omap_bo_new_tiled(struct omap_device *dev,
uint32_t width, uint32_t height, uint32_t flags);
+struct omap_bo * omap_bo_ref(struct omap_bo *bo);
struct omap_bo * omap_bo_from_name(struct omap_device *dev, uint32_t name);
+struct omap_bo * omap_bo_from_dmabuf(struct omap_device *dev, int fd);
void omap_bo_del(struct omap_bo *bo);
int omap_bo_get_name(struct omap_bo *bo, uint32_t *name);
uint32_t omap_bo_handle(struct omap_bo *bo);
--- /dev/null
+From e2189a84d0da40b46c3406860fe087b7b09420b3 Mon Sep 17 00:00:00 2001
+From: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Date: Wed, 10 Oct 2012 09:56:06 -0700
+Subject: [PATCH] Adding slp subpackage
+
+---
+ Makefile.am | 6 +-
+ libkms/Makefile.am | 5 +
+ libkms/slp.c | 222 +++++++++++++
+ slp/Makefile.am | 22 ++
+ slp/drm_slp_bufmgr.c | 847 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ slp/drm_slp_bufmgr.h | 201 ++++++++++++
+ slp/libdrm_slp.pc.in | 11 +
+ slp/list.h | 131 ++++++++
+ 8 files changed, 1444 insertions(+), 1 deletions(-)
+ create mode 100644 libkms/slp.c
+ create mode 100644 slp/Makefile.am
+ create mode 100644 slp/drm_slp_bufmgr.c
+ create mode 100644 slp/drm_slp_bufmgr.h
+ create mode 100644 slp/libdrm_slp.pc.in
+ create mode 100644 slp/list.h
+
+diff --git a/Makefile.am b/Makefile.am
+index 256a8cc..6e74607 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -49,7 +49,11 @@ if HAVE_EXYNOS
+ EXYNOS_SUBDIR = exynos
+ endif
+
+-SUBDIRS = . $(LIBKMS_SUBDIR) $(INTEL_SUBDIR) $(NOUVEAU_SUBDIR) $(RADEON_SUBDIR) $(OMAP_SUBDIR) $(EXYNOS_SUBDIR) tests include
++if HAVE_SLP
++SLP_SUBDIR = slp
++endif
++
++SUBDIRS = . $(LIBKMS_SUBDIR) $(INTEL_SUBDIR) $(NOUVEAU_SUBDIR) $(RADEON_SUBDIR) $(OMAP_SUBDIR) $(EXYNOS_SUBDIR) $(SLP_SUBDIR) tests include
+
+ libdrm_la_LTLIBRARIES = libdrm.la
+ libdrm_ladir = $(libdir)
+diff --git a/libkms/Makefile.am b/libkms/Makefile.am
+index fa379a4..df74b7e 100644
+--- a/libkms/Makefile.am
++++ b/libkms/Makefile.am
+@@ -31,6 +31,11 @@ if HAVE_RADEON
+ libkms_la_SOURCES += radeon.c
+ endif
+
++if HAVE_SLP
++libkms_la_SOURCES += slp.c
++AM_CFLAGS += -I$(top_srcdir)/exynos
++endif
++
+ libkmsincludedir = ${includedir}/libkms
+ libkmsinclude_HEADERS = libkms.h
+
+diff --git a/libkms/slp.c b/libkms/slp.c
+new file mode 100644
+index 0000000..263f2ab
+--- /dev/null
++++ b/libkms/slp.c
+@@ -0,0 +1,222 @@
++/**************************************************************************
++ *
++ * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ **************************************************************************/
++
++
++#define HAVE_STDINT_H
++#define _FILE_OFFSET_BITS 64
++
++#include <errno.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include "internal.h"
++
++#include <sys/mman.h>
++#include <sys/ioctl.h>
++#include "xf86drm.h"
++
++#include "exynos_drm.h"
++
++struct slp_bo
++{
++ struct kms_bo base;
++ unsigned map_count;
++};
++
++static int
++slp_get_prop(struct kms_driver *kms, unsigned key, unsigned *out)
++{
++ switch (key) {
++ case KMS_BO_TYPE:
++ *out = KMS_BO_TYPE_SCANOUT_X8R8G8B8 | KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8;
++ break;
++ default:
++ return -EINVAL;
++ }
++ return 0;
++}
++
++static int
++slp_destroy(struct kms_driver *kms)
++{
++ free(kms);
++ return 0;
++}
++
++static int
++slp_bo_create(struct kms_driver *kms,
++ const unsigned width, const unsigned height,
++ const enum kms_bo_type type, const unsigned *attr,
++ struct kms_bo **out)
++{
++ struct drm_exynos_gem_create arg;
++ unsigned size, pitch;
++ struct slp_bo *bo;
++ int i, ret;
++
++ for (i = 0; attr[i]; i += 2) {
++ switch (attr[i]) {
++ case KMS_WIDTH:
++ case KMS_HEIGHT:
++ case KMS_BO_TYPE:
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++
++ bo = calloc(1, sizeof(*bo));
++ if (!bo)
++ return -ENOMEM;
++
++ if (type == KMS_BO_TYPE_CURSOR_64X64_A8R8G8B8) {
++ pitch = 64 * 4;
++ size = 64 * 64 * 4;
++ } else if (type == KMS_BO_TYPE_SCANOUT_X8R8G8B8) {
++ pitch = width * 4;
++ pitch = (pitch + 512 - 1) & ~(512 - 1);
++ size = pitch * ((height + 4 - 1) & ~(4 - 1));
++ } else {
++ return -EINVAL;
++ }
++
++ memset(&arg, 0, sizeof(arg));
++ arg.size = size;
++
++ ret = drmCommandWriteRead(kms->fd, DRM_EXYNOS_GEM_CREATE, &arg, sizeof(arg));
++ if (ret)
++ goto err_free;
++
++ bo->base.kms = kms;
++ bo->base.handle = arg.handle;
++ bo->base.size = size;
++ bo->base.pitch = pitch;
++
++ *out = &bo->base;
++
++ return 0;
++
++err_free:
++ free(bo);
++ return ret;
++}
++
++static int
++slp_bo_get_prop(struct kms_bo *bo, unsigned key, unsigned *out)
++{
++ switch (key) {
++ default:
++ return -EINVAL;
++ }
++}
++
++static int
++slp_bo_map(struct kms_bo *_bo, void **out)
++{
++ struct slp_bo *bo = (struct slp_bo *)_bo;
++ struct drm_exynos_gem_map_off arg;
++ void *map = NULL;
++ int ret;
++
++ if (bo->base.ptr) {
++ bo->map_count++;
++ *out = bo->base.ptr;
++ return 0;
++ }
++
++ memset(&arg, 0, sizeof(arg));
++ arg.handle = bo->base.handle;
++
++ ret = drmCommandWriteRead(bo->base.kms->fd, DRM_EXYNOS_GEM_MAP_OFFSET, &arg, sizeof(arg));
++ if (ret)
++ return ret;
++
++ map = mmap(0, bo->base.size, PROT_READ | PROT_WRITE, MAP_SHARED, bo->base.kms->fd, arg.offset);
++ if (map == MAP_FAILED)
++ return -errno;
++
++ bo->base.ptr = map;
++ bo->map_count++;
++ *out = bo->base.ptr;
++
++ return 0;
++}
++
++static int
++slp_bo_unmap(struct kms_bo *_bo)
++{
++ struct slp_bo *bo = (struct slp_bo *)_bo;
++ bo->map_count--;
++ return 0;
++}
++
++static int
++slp_bo_destroy(struct kms_bo *_bo)
++{
++ struct slp_bo *bo = (struct slp_bo *)_bo;
++ struct drm_gem_close arg;
++ int ret;
++
++ if (bo->base.ptr) {
++ /* XXX Sanity check map_count */
++ munmap(bo->base.ptr, bo->base.size);
++ bo->base.ptr = NULL;
++ }
++
++ memset(&arg, 0, sizeof(arg));
++ arg.handle = bo->base.handle;
++
++ ret = drmIoctl(bo->base.kms->fd, DRM_IOCTL_GEM_CLOSE, &arg);
++ if (ret)
++ return -errno;
++
++ free(bo);
++ return 0;
++}
++
++int
++slp_create(int fd, struct kms_driver **out)
++{
++ struct kms_driver *kms;
++
++ kms = calloc(1, sizeof(*kms));
++ if (!kms)
++ return -ENOMEM;
++
++ kms->fd = fd;
++
++ kms->bo_create = slp_bo_create;
++ kms->bo_map = slp_bo_map;
++ kms->bo_unmap = slp_bo_unmap;
++ kms->bo_get_prop = slp_bo_get_prop;
++ kms->bo_destroy = slp_bo_destroy;
++ kms->get_prop = slp_get_prop;
++ kms->destroy = slp_destroy;
++ *out = kms;
++
++ return 0;
++}
+diff --git a/slp/Makefile.am b/slp/Makefile.am
+new file mode 100644
+index 0000000..132662b
+--- /dev/null
++++ b/slp/Makefile.am
+@@ -0,0 +1,22 @@
++SUBDIRS = .
++
++AM_CFLAGS = \
++ $(WARN_CFLAGS) \
++ -I$(top_srcdir) \
++ -I$(top_srcdir)/slp \
++ $(PTHREADSTUBS_CFLAGS) \
++ -I$(top_srcdir)/include/drm
++
++libdrm_slp_la_LTLIBRARIES = libdrm_slp.la
++libdrm_slp_ladir = $(libdir)
++libdrm_slp_la_LDFLAGS = -version-number 1:0:0 -no-undefined
++libdrm_slp_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@ @CLOCK_LIB@ -ldl
++
++libdrm_slp_la_SOURCES = \
++ drm_slp_bufmgr.c \
++ drm_slp_bufmgr.h
++
++libdrm_slpincludedir = ${includedir}/libdrm
++libdrm_slpinclude_HEADERS = drm_slp_bufmgr.h
++
++pkgconfig_DATA = libdrm_slp.pc
+diff --git a/slp/drm_slp_bufmgr.c b/slp/drm_slp_bufmgr.c
+new file mode 100644
+index 0000000..f723ded
+--- /dev/null
++++ b/slp/drm_slp_bufmgr.c
+@@ -0,0 +1,847 @@
++/**************************************************************************
++
++xserver-xorg-video-sec
++
++Copyright 2011 Samsung Electronics co., Ltd. All Rights Reserved.
++
++Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
++
++Permission is hereby granted, free of charge, to any person obtaining a
++copy of this software and associated documentation files (the
++"Software"), to deal in the Software without restriction, including
++without limitation the rights to use, copy, modify, merge, publish,
++distribute, sub license, and/or sell copies of the Software, and to
++permit persons to whom the Software is furnished to do so, subject to
++the following conditions:
++
++The above copyright notice and this permission notice (including the
++next paragraph) shall be included in all copies or substantial portions
++of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
++ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++
++**************************************************************************/
++
++#include "config.h"
++
++#include <unistd.h>
++#include <limits.h>
++#include <stdlib.h>
++#include <stdio.h>
++#include <sys/types.h>
++#include <sys/stat.h>
++#include <fcntl.h>
++#include <dlfcn.h>
++#include <dirent.h>
++#include <string.h>
++#include <errno.h>
++
++#include "drm_slp_bufmgr.h"
++#include "list.h"
++
++#define PREFIX_LIB "libdrm_slp_"
++#define SUFFIX_LIB ".so"
++#define DEFAULT_LIB PREFIX_LIB"default"SUFFIX_LIB
++
++#define NUM_TRY_LOCK 10
++#define SEM_NAME "pixmap_1"
++#define SEM_DEBUG 0
++
++#define DRM_RETURN_IF_FAIL(cond) {if (!(cond)) { fprintf (stderr, "[%s] : '%s' failed.\n", __FUNCTION__, #cond); return; }}
++#define DRM_RETURN_VAL_IF_FAIL(cond, val) {if (!(cond)) { fprintf (stderr, "[%s] : '%s' failed.\n", __FUNCTION__, #cond); return val; }}
++
++#define MGR_IS_VALID(mgr) (mgr && \
++ mgr->link.next && \
++ mgr->link.next->prev == &mgr->link)
++#define BO_IS_VALID(bo) (bo && \
++ MGR_IS_VALID(bo->bufmgr) && \
++ bo->list.next && \
++ bo->list.next->prev == &bo->list)
++
++typedef struct{
++ void* data;
++
++ int is_valid;
++ drm_data_free free_func ;
++}drm_slp_user_data;
++
++static struct list_head *gBufMgrs = NULL;
++
++static int
++_sem_wait_wrapper(sem_t* sem)
++{
++ int res = 0;
++ int num_try = NUM_TRY_LOCK;
++
++ do
++ {
++ res = sem_wait(sem);
++ num_try--;
++ } while((res == -1) && (errno == EINTR) && (num_try >= 0));
++
++ if(res == -1)
++ {
++ fprintf(stderr,
++ "[libdrm] error %s:%d(sem:%p, num_try:%d) PID:%04d\n",
++ __FUNCTION__,
++ __LINE__,
++ sem,
++ num_try,
++ getpid());
++ return 0;
++ }
++#if SEM_DEBUG
++ else
++ {
++ fprintf(stderr,
++ "[libdrm] LOCK >> %s:%d(sem:%p, num_try:%d) PID:%04d\n",
++ __FUNCTION__,
++ __LINE__,
++ sem,
++ num_try,
++ getpid());
++ }
++#endif
++
++ return 1;
++}
++
++static int
++_sem_post_wrapper(sem_t* sem)
++{
++ int res = 0;
++ int num_try = NUM_TRY_LOCK;
++
++ do
++ {
++ res = sem_post(sem);
++ num_try--;
++
++ } while((res == -1) && (errno == EINTR) && (num_try >= 0));
++
++ if(res == -1)
++ {
++ fprintf(stderr,
++ "[libdrm] error %s:%d(sem:%p, num_try:%d) PID:%04d\n",
++ __FUNCTION__,
++ __LINE__,
++ sem,
++ num_try,
++ getpid());
++ return 0;
++ }
++#if SEM_DEBUG
++ else
++ {
++ fprintf(stderr,
++ "[libdrm] UNLOCK << %s:%d(sem:%p, num_try:%d) PID:%04d\n",
++ __FUNCTION__,
++ __LINE__,
++ sem,
++ num_try,
++ getpid());
++ }
++#endif
++
++ return 1;
++}
++
++static int
++_sem_open(drm_slp_bufmgr bufmgr)
++{
++ bufmgr->semObj.handle = sem_open(SEM_NAME, O_CREAT, 0777, 1);
++ if(bufmgr->semObj.handle == SEM_FAILED)
++ {
++ fprintf(stderr,
++ "[libdrm] error %s:%d(name:%s) PID:%04d\n",
++ __FUNCTION__,
++ __LINE__,
++ SEM_NAME,
++ getpid());
++ bufmgr->semObj.handle = NULL;
++ return 0;
++ }
++#if SEM_DEBUG
++ else
++ {
++ fprintf(stderr,
++ "[libdrm] OPEN %s:%d(sem:%p) PID:%04d\n",
++ __FUNCTION__,
++ __LINE__,
++ bufmgr->semObj.handle,
++ getpid());
++ }
++#endif
++
++ bufmgr->semObj.status = STATUS_UNLOCK;
++
++ return 1;
++}
++
++static int
++_sem_close(drm_slp_bufmgr bufmgr)
++{
++ _sem_wait_wrapper(bufmgr->semObj.handle);
++ sem_unlink(SEM_NAME);
++ return 1;
++}
++
++static int
++_sem_lock(drm_slp_bufmgr bufmgr)
++{
++ if(bufmgr->semObj.status != STATUS_UNLOCK) return 0;
++
++ if(!_sem_wait_wrapper(bufmgr->semObj.handle)) return 0;
++ bufmgr->semObj.status = STATUS_LOCK;
++ return 1;
++}
++
++static int
++_sem_unlock(drm_slp_bufmgr bufmgr)
++{
++ if(bufmgr->semObj.status != STATUS_LOCK) return 0;
++
++ _sem_post_wrapper(bufmgr->semObj.handle);
++ bufmgr->semObj.status = STATUS_UNLOCK;
++ return 1;
++}
++
++static drm_slp_bufmgr
++_load_bufmgr(int fd, const char *file, void *arg)
++{
++ char path[PATH_MAX] = {0,};
++ drm_slp_bufmgr bufmgr = NULL;
++ int (*bufmgr_init)(drm_slp_bufmgr bufmgr, int fd, void *arg);
++ void *module;
++
++ snprintf(path, sizeof(path), BUFMGR_DIR "/%s", file);
++
++ module = dlopen(path, RTLD_LAZY);
++ if (!module) {
++ fprintf(stderr,
++ "[libdrm] failed to load module: %s(%s)\n",
++ dlerror(), file);
++ return NULL;
++ }
++
++ bufmgr_init = dlsym(module, "init_slp_bufmgr");
++ if (!bufmgr_init) {
++ fprintf(stderr,
++ "[libdrm] failed to lookup init function: %s(%s)\n",
++ dlerror(), file);
++ return NULL;
++ }
++
++ bufmgr = calloc(sizeof(struct _drm_slp_bufmgr), 1);
++ if(!bufmgr)
++ {
++ return NULL;
++ }
++
++ if(!bufmgr_init(bufmgr, fd, arg))
++ {
++ fprintf(stderr,"[libdrm] Fail to init module(%s)\n", file);
++ free(bufmgr);
++ bufmgr = NULL;
++ return NULL;
++ }
++
++ fprintf(stderr,"[libdrm] Success to load module(%s)\n", file);
++
++ return bufmgr;
++}
++
++drm_slp_bufmgr
++drm_slp_bufmgr_init(int fd, void *arg)
++{
++ drm_slp_bufmgr bufmgr = NULL;
++ const char *p = NULL;
++
++ if (fd < 0)
++ return NULL;
++
++ if(gBufMgrs == NULL)
++ {
++ gBufMgrs = malloc(sizeof(struct list_head));
++ LIST_INITHEAD(gBufMgrs);
++ }
++ else
++ {
++ LIST_FOR_EACH_ENTRY(bufmgr, gBufMgrs, link)
++ {
++ if(bufmgr->drm_fd == fd)
++ {
++ bufmgr->ref_count++;
++ fprintf(stderr, "[libdrm] bufmgr ref: fd=%d, ref_count:%d\n", fd, bufmgr->ref_count);
++ return bufmgr;
++ }
++ }
++ bufmgr = NULL;
++ }
++ fprintf(stderr, "[libdrm] bufmgr init: fd=%d\n", fd);
++
++ p = getenv ("SLP_BUFMGR_MODULE");
++ if (p)
++ {
++ char file[PATH_MAX] = {0,};
++ snprintf(file, sizeof(file), PREFIX_LIB"%s"SUFFIX_LIB, p);
++ bufmgr = _load_bufmgr (fd, file, arg);
++ }
++
++ if (!bufmgr)
++ bufmgr = _load_bufmgr (fd, DEFAULT_LIB, arg);
++
++ if (!bufmgr)
++ {
++ struct dirent **namelist;
++ int found = 0;
++ int n;
++
++ n = scandir(BUFMGR_DIR, &namelist, 0, alphasort);
++ if (n < 0)
++ fprintf(stderr,"[libdrm] no files : %s\n", BUFMGR_DIR);
++ else
++ {
++ while(n--)
++ {
++ if (!found && strstr (namelist[n]->d_name, PREFIX_LIB))
++ {
++ char *p = strstr (namelist[n]->d_name, SUFFIX_LIB);
++ if (!strcmp (p, SUFFIX_LIB))
++ {
++ bufmgr = _load_bufmgr (fd, namelist[n]->d_name, arg);
++ if (bufmgr)
++ found = 1;
++ }
++ }
++ free(namelist[n]);
++ }
++ free(namelist);
++ }
++ }
++
++ if (!bufmgr)
++ {
++ fprintf(stderr,"[libdrm] backend is NULL.\n");
++ return NULL;
++ }
++
++ if (pthread_mutex_init(&bufmgr->lock, NULL) != 0)
++ {
++ bufmgr->bufmgr_destroy(bufmgr);
++ free(bufmgr);
++ return NULL;
++ }
++
++ bufmgr->ref_count = 1;
++ bufmgr->drm_fd = fd;
++
++ LIST_INITHEAD(&bufmgr->bos);
++ LIST_ADD(&bufmgr->link, gBufMgrs);
++
++ return bufmgr;
++}
++
++void
++drm_slp_bufmgr_destroy(drm_slp_bufmgr bufmgr)
++{
++ DRM_RETURN_IF_FAIL(MGR_IS_VALID(bufmgr));
++
++ fprintf(stderr, "[DRM] bufmgr destroy: bufmgr:%p, drm_fd:%d\n",
++ bufmgr, bufmgr->drm_fd);
++
++ /*Check and Free bos*/
++ if(!LIST_IS_EMPTY(&bufmgr->bos))
++ {
++ drm_slp_bo bo, tmp;
++
++ LIST_FOR_EACH_ENTRY_SAFE(bo, tmp, &bufmgr->bos, list)
++ {
++ fprintf(stderr, "[libdrm] Un-freed bo(%p, ref:%d) \n", bo, bo->ref_cnt);
++ bo->ref_cnt = 1;
++ drm_slp_bo_unref(bo);
++ }
++ }
++
++ LIST_DEL(&bufmgr->link);
++ bufmgr->bufmgr_destroy(bufmgr);
++
++ if(bufmgr->semObj.isOpened)
++ {
++ _sem_close(bufmgr);
++ }
++
++ pthread_mutex_destroy(&bufmgr->lock);
++ free(bufmgr);
++}
++
++int
++drm_slp_bufmgr_lock(drm_slp_bufmgr bufmgr)
++{
++ DRM_RETURN_VAL_IF_FAIL(MGR_IS_VALID(bufmgr), 0);
++
++ pthread_mutex_lock(&bufmgr->lock);
++
++ if(bufmgr->bufmgr_lock)
++ {
++ int ret;
++ ret = bufmgr->bufmgr_lock(bufmgr);
++ pthread_mutex_unlock(&bufmgr->lock);
++ return ret;
++ }
++
++ if(!bufmgr->semObj.isOpened)
++ {
++ if(_sem_open(bufmgr) != 1)
++ {
++ pthread_mutex_unlock(&bufmgr->lock);
++ return 0;
++ }
++ bufmgr->semObj.isOpened = 1;
++ }
++
++ if(_sem_lock(bufmgr) != 1)
++ {
++ pthread_mutex_unlock(&bufmgr->lock);
++ return 0;
++ }
++
++ pthread_mutex_unlock(&bufmgr->lock);
++
++ return 1;
++}
++
++int
++drm_slp_bufmgr_unlock(drm_slp_bufmgr bufmgr)
++{
++ DRM_RETURN_VAL_IF_FAIL(MGR_IS_VALID(bufmgr), 0);
++
++ pthread_mutex_lock(&bufmgr->lock);
++
++ if(bufmgr->bufmgr_unlock)
++ {
++ int ret;
++ ret = bufmgr->bufmgr_unlock(bufmgr);
++ pthread_mutex_unlock(&bufmgr->lock);
++ return ret;
++ }
++
++ if(_sem_unlock(bufmgr) != 1)
++ {
++ pthread_mutex_unlock(&bufmgr->lock);
++ return 0;
++ }
++
++ pthread_mutex_unlock(&bufmgr->lock);
++
++ return 1;
++}
++
++int
++drm_slp_bufmgr_cache_flush(drm_slp_bufmgr bufmgr, drm_slp_bo bo, int flags)
++{
++ int ret;
++
++ DRM_RETURN_VAL_IF_FAIL(MGR_IS_VALID(bufmgr) || BO_IS_VALID(bo), 0);
++
++ if (!bo)
++ flags |= DRM_SLP_CACHE_ALL;
++
++ if (bo)
++ {
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo), 0);
++
++ if(!bo->bufmgr)
++ return 0;
++
++ pthread_mutex_lock(&bo->bufmgr->lock);
++ ret = bo->bufmgr->bufmgr_cache_flush(bufmgr, bo, flags);
++ pthread_mutex_unlock(&bo->bufmgr->lock);
++ }
++ else
++ {
++ pthread_mutex_lock(&bufmgr->lock);
++ ret = bufmgr->bufmgr_cache_flush(bufmgr, NULL, flags);
++ pthread_mutex_unlock(&bufmgr->lock);
++ }
++
++ return ret;
++}
++
++int
++drm_slp_bo_size(drm_slp_bo bo)
++{
++ int size;
++ drm_slp_bufmgr bufmgr;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo), 0);
++
++ bufmgr = bo->bufmgr;
++
++ pthread_mutex_lock(&bufmgr->lock);
++ size = bo->bufmgr->bo_size(bo);
++ pthread_mutex_unlock(&bufmgr->lock);
++
++ return size;
++}
++
++drm_slp_bo
++drm_slp_bo_ref(drm_slp_bo bo)
++{
++ drm_slp_bufmgr bufmgr;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo), NULL);
++
++ bufmgr = bo->bufmgr;
++
++ pthread_mutex_lock(&bufmgr->lock);
++
++ bo->ref_cnt++;
++
++ pthread_mutex_unlock(&bufmgr->lock);
++
++ return bo;
++}
++
++void
++drm_slp_bo_unref(drm_slp_bo bo)
++{
++ drm_slp_bufmgr bufmgr;
++
++ DRM_RETURN_IF_FAIL(BO_IS_VALID(bo));
++
++ bufmgr = bo->bufmgr;
++
++ if(0 >= bo->ref_cnt)
++ return;
++
++ pthread_mutex_lock(&bufmgr->lock);
++
++ bo->ref_cnt--;
++ if(bo->ref_cnt == 0)
++ {
++ if(bo->user_data)
++ {
++ void* rd;
++ drm_slp_user_data* old_data;
++ unsigned long key;
++
++ while(1==drmSLFirst(bo->user_data, &key, &rd))
++ {
++ old_data = (drm_slp_user_data*)rd;
++
++ if(old_data->is_valid && old_data->free_func)
++ {
++ if(old_data->data)
++ old_data->free_func(old_data->data);
++ old_data->data = NULL;
++ free(old_data);
++ }
++ drmSLDelete(bo->user_data, key);
++ }
++
++ drmSLDestroy(bo->user_data);
++ bo->user_data = (void*)0;
++ }
++
++ LIST_DEL(&bo->list);
++ bufmgr->bo_free(bo);
++
++ free(bo);
++ }
++
++ pthread_mutex_unlock(&bufmgr->lock);
++}
++
++drm_slp_bo
++drm_slp_bo_alloc(drm_slp_bufmgr bufmgr, const char * name, int size, int flags)
++{
++ drm_slp_bo bo=NULL;
++
++ DRM_RETURN_VAL_IF_FAIL( MGR_IS_VALID(bufmgr) && (size > 0), NULL);
++
++ bo = calloc(sizeof(struct _drm_slp_bo), 1);
++ if(!bo)
++ return NULL;
++
++ bo->bufmgr = bufmgr;
++
++ pthread_mutex_lock(&bufmgr->lock);
++ if(!bufmgr->bo_alloc(bo, name, size, flags))
++ {
++ free(bo);
++ pthread_mutex_unlock(&bufmgr->lock);
++ return NULL;
++ }
++ bo->ref_cnt = 1;
++ LIST_ADD(&bo->list, &bufmgr->bos);
++ pthread_mutex_unlock(&bufmgr->lock);
++
++ return bo;
++}
++
++drm_slp_bo
++drm_slp_bo_attach(drm_slp_bufmgr bufmgr,
++ const char* name,
++ int type,
++ int size,
++ unsigned int handle)
++{
++ drm_slp_bo bo;
++
++ DRM_RETURN_VAL_IF_FAIL(MGR_IS_VALID(bufmgr), NULL);
++
++ bo = calloc(sizeof(struct _drm_slp_bo), 1);
++ if(!bo)
++ return NULL;
++
++ bo->bufmgr = bufmgr;
++
++ pthread_mutex_lock(&bufmgr->lock);
++ if(!bufmgr->bo_attach(bo, name, type, size, handle))
++ {
++ free(bo);
++ pthread_mutex_unlock(&bufmgr->lock);
++ return NULL;
++ }
++ bo->ref_cnt = 1;
++ LIST_ADD(&bo->list, &bufmgr->bos);
++ pthread_mutex_unlock(&bufmgr->lock);
++
++ return bo;
++}
++
++drm_slp_bo
++drm_slp_bo_import(drm_slp_bufmgr bufmgr, unsigned int key)
++{
++ drm_slp_bo bo;
++
++ DRM_RETURN_VAL_IF_FAIL(MGR_IS_VALID(bufmgr), NULL);
++
++ bo = calloc(sizeof(struct _drm_slp_bo), 1);
++ if(!bo)
++ return NULL;
++
++ bo->bufmgr = bufmgr;
++
++ pthread_mutex_lock(&bufmgr->lock);
++ if(!bufmgr->bo_import(bo, key))
++ {
++ free(bo);
++ pthread_mutex_unlock(&bufmgr->lock);
++ return NULL;
++ }
++ bo->ref_cnt = 1;
++ LIST_ADD(&bo->list, &bufmgr->bos);
++ pthread_mutex_unlock(&bufmgr->lock);
++
++ return bo;
++}
++
++unsigned int
++drm_slp_bo_export(drm_slp_bo bo)
++{
++ int ret;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo), 0);
++
++ pthread_mutex_lock(&bo->bufmgr->lock);
++ ret = bo->bufmgr->bo_export(bo);
++ pthread_mutex_unlock(&bo->bufmgr->lock);
++
++ return ret;
++}
++
++unsigned int
++drm_slp_bo_get_handle(drm_slp_bo bo, int device)
++{
++ unsigned int ret;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo), 0);
++
++ pthread_mutex_lock(&bo->bufmgr->lock);
++ ret = bo->bufmgr->bo_get_handle(bo, device);
++ pthread_mutex_unlock(&bo->bufmgr->lock);
++
++ return ret;
++}
++
++unsigned int
++drm_slp_bo_map(drm_slp_bo bo, int device, int opt)
++{
++ unsigned int ret;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo), 0);
++
++ pthread_mutex_lock(&bo->bufmgr->lock);
++ if(bo->bufmgr->bo_lock)
++ {
++ bo->bufmgr->bo_lock(bo, 0, (void*)0);
++ }
++
++ ret = bo->bufmgr->bo_map(bo, device, opt);
++ pthread_mutex_unlock(&bo->bufmgr->lock);
++
++ return ret;
++}
++
++int
++drm_slp_bo_unmap(drm_slp_bo bo, int device)
++{
++ int ret;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo), 0);
++
++ pthread_mutex_lock(&bo->bufmgr->lock);
++ ret = bo->bufmgr->bo_unmap(bo, device);
++
++ if(bo->bufmgr->bo_unlock)
++ {
++ bo->bufmgr->bo_unlock(bo);
++ }
++ pthread_mutex_unlock(&bo->bufmgr->lock);
++
++ return 0;
++}
++
++int
++drm_slp_bo_swap(drm_slp_bo bo1, drm_slp_bo bo2)
++{
++ void* temp;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo1), 0);
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo2), 0);
++
++ if(bo1->bufmgr->bo_size(bo1) != bo2->bufmgr->bo_size(bo2))
++ return 0;
++
++ pthread_mutex_lock(&bo1->bufmgr->lock);
++ temp = bo1->priv;
++ bo1->priv = bo2->priv;
++ bo2->priv = temp;
++ pthread_mutex_unlock(&bo1->bufmgr->lock);
++
++ return 1;
++}
++
++int
++drm_slp_bo_add_user_data(drm_slp_bo bo, unsigned long key, drm_data_free data_free_func)
++{
++ int ret;
++ drm_slp_user_data* data;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo), 0);
++
++ if(!bo->user_data)
++ bo->user_data = drmSLCreate();
++
++ data = calloc(1, sizeof(drm_slp_user_data));
++ if(!data)
++ return 0;
++
++ data->free_func = data_free_func;
++ data->data = (void*)0;
++ data->is_valid = 0;
++
++ ret = drmSLInsert(bo->user_data, key, data);
++ if(ret == 1) /* Already in list */
++ {
++ free(data);
++ return 0;
++ }
++
++ return 1;
++}
++
++int
++drm_slp_bo_set_user_data(drm_slp_bo bo, unsigned long key, void* data)
++{
++ void *rd;
++ drm_slp_user_data* old_data;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo), 0);
++
++ if(!bo->user_data)
++ return 0;
++
++ if(drmSLLookup(bo->user_data, key, &rd))
++ return 0;
++
++ old_data = (drm_slp_user_data*)rd;
++ if (!old_data)
++ return 0;
++
++ if(old_data->is_valid)
++ {
++ if(old_data->free_func)
++ {
++ if(old_data->data)
++ old_data->free_func(old_data->data);
++ old_data->data = NULL;
++ }
++ }
++ else
++ old_data->is_valid = 1;
++
++ old_data->data = data;
++
++ return 1;
++}
++
++int
++drm_slp_bo_get_user_data(drm_slp_bo bo, unsigned long key, void** data)
++{
++ void *rd;
++ drm_slp_user_data* old_data;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo) && data && bo->user_data, 0);
++
++ if(drmSLLookup(bo->user_data, key, &rd))
++ {
++ *data = NULL;
++ return 0;
++ }
++
++ old_data = (drm_slp_user_data*)rd;
++ if (!old_data)
++ {
++ *data = NULL;
++ return 0;
++ }
++
++ *data = old_data->data;
++
++ return 1;
++}
++
++int
++drm_slp_bo_delete_user_data(drm_slp_bo bo, unsigned long key)
++{
++ void *rd;
++ drm_slp_user_data* old_data=(void*)0;
++
++ DRM_RETURN_VAL_IF_FAIL(BO_IS_VALID(bo) && bo->user_data, 0);
++
++ if(drmSLLookup(bo->user_data, key, &rd))
++ return 0;
++
++ old_data = (drm_slp_user_data*)rd;
++ if (!old_data)
++ return 0;
++
++ if(old_data->is_valid && old_data->free_func)
++ {
++ if(old_data->data)
++ old_data->free_func(old_data->data);
++ free(old_data);
++ }
++ drmSLDelete(bo->user_data, key);
++
++ return 1;
++}
+diff --git a/slp/drm_slp_bufmgr.h b/slp/drm_slp_bufmgr.h
+new file mode 100644
+index 0000000..a4adef5
+--- /dev/null
++++ b/slp/drm_slp_bufmgr.h
+@@ -0,0 +1,201 @@
++/**************************************************************************
++
++xserver-xorg-video-sec
++
++Copyright 2011 Samsung Electronics co., Ltd. All Rights Reserved.
++
++Contact: SooChan Lim <sc1.lim@samsung.com>, Sangjin Lee <lsj119@samsung.com>
++
++Permission is hereby granted, free of charge, to any person obtaining a
++copy of this software and associated documentation files (the
++"Software"), to deal in the Software without restriction, including
++without limitation the rights to use, copy, modify, merge, publish,
++distribute, sub license, and/or sell copies of the Software, and to
++permit persons to whom the Software is furnished to do so, subject to
++the following conditions:
++
++The above copyright notice and this permission notice (including the
++next paragraph) shall be included in all copies or substantial portions
++of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
++OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
++MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
++IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
++ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
++TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
++SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
++
++**************************************************************************/
++
++#ifndef _DRM_SLP_BUFMGR_H_
++#define _DRM_SLP_BUFMGR_H_
++
++#include <semaphore.h>
++#include <pthread.h>
++#include <xf86drm.h>
++
++typedef struct _drm_slp_bo * drm_slp_bo;
++typedef struct _drm_slp_bufmgr * drm_slp_bufmgr;
++
++struct list_head
++{
++ struct list_head *prev;
++ struct list_head *next;
++};
++
++struct _drm_slp_bo
++{
++ struct list_head list;
++ drm_slp_bufmgr bufmgr;
++ int ref_cnt; /*atomic count*/
++ void *user_data;
++
++ /* private data */
++ void *priv;
++};
++
++typedef enum
++{
++ STATUS_UNLOCK,
++ STATUS_READY_TO_LOCK,
++ STATUS_LOCK,
++} lock_status;
++
++struct _drm_slp_bufmgr
++{
++ struct list_head bos; /*list head of bo*/
++
++ pthread_mutex_t lock;
++ struct {
++ int isOpened;
++ lock_status status;
++ sem_t* handle;
++ } semObj;
++
++ void (*bufmgr_destroy)(drm_slp_bufmgr bufmgr);
++ int (*bufmgr_cache_flush)(drm_slp_bufmgr bufmgr, drm_slp_bo bo, int flags);
++
++ int (*bo_size)(drm_slp_bo bo);
++
++ void (*bo_free)(drm_slp_bo bo);
++ int (*bo_alloc)(drm_slp_bo bo,
++ const char* name,
++ int size,
++ int flags);
++ int (*bo_attach)(drm_slp_bo bo,
++ const char* name,
++ int type,
++ int size,
++ unsigned int handle);
++ int (*bo_import)(drm_slp_bo bo, unsigned int key);
++ unsigned int (*bo_export)(drm_slp_bo bo);
++
++ unsigned int (*bo_get_handle)(drm_slp_bo bo, int device);
++ unsigned int (*bo_map)(drm_slp_bo bo, int device, int opt);
++ int (*bo_unmap)(drm_slp_bo bo, int device);
++
++
++ /* Padding for future extension */
++ int (*bufmgr_lock) (drm_slp_bufmgr bufmgr);
++ int (*bufmgr_unlock) (drm_slp_bufmgr bufmgr);
++ int (*bo_lock) (drm_slp_bo bo, unsigned int checkOnly, unsigned int* isLocked);
++ int (*bo_unlock) (drm_slp_bo bo);
++ void (*reserved5) (void);
++ void (*reserved6) (void);
++
++ /* private data */
++ void *priv;
++
++ struct list_head link; /*link of bufmgr*/
++
++ int drm_fd;
++ int ref_count;
++};
++
++/* DRM_SLP_MEM_TYPE */
++#define DRM_SLP_MEM_GEM 0
++#define DRM_SLP_MEM_USERPTR 1
++#define DRM_SLP_MEM_DMABUF 2
++#define DRM_SLP_MEM_GPU 3
++
++/* DRM_SLP_DEVICE_TYPE */
++#define DRM_SLP_DEVICE_DEFAULT 0 //Default handle
++#define DRM_SLP_DEVICE_CPU 1
++#define DRM_SLP_DEVICE_2D 2
++#define DRM_SLP_DEVICE_3D 3
++#define DRM_SLP_DEVICE_MM 4
++
++/* DRM_SLP_OPTION */
++#define DRM_SLP_OPTION_READ (1 << 0)
++#define DRM_SLP_OPTION_WRITE (1 << 1)
++
++/* DRM_SLP_CACHE */
++#define DRM_SLP_CACHE_INV 0x01
++#define DRM_SLP_CACHE_CLN 0x02
++#define DRM_SLP_CACHE_ALL 0x10
++#define DRM_SLP_CACHE_FLUSH (DRM_SLP_CACHE_INV|DRM_SLP_CACHE_CLN)
++#define DRM_SLP_CACHE_FLUSH_ALL (DRM_SLP_CACHE_FLUSH|DRM_SLP_CACHE_ALL)
++
++enum DRM_SLP_BO_FLAGS{
++ DRM_SLP_BO_DEFAULT = 0,
++ DRM_SLP_BO_SCANOUT = (1<<0),
++ DRM_SLP_BO_NONCACHABLE = (1<<1),
++ DRM_SLP_BO_WC = (1<<2),
++};
++
++/* Functions for buffer mnager */
++drm_slp_bufmgr
++drm_slp_bufmgr_init(int fd, void * arg);
++void
++drm_slp_bufmgr_destroy(drm_slp_bufmgr bufmgr);
++int
++drm_slp_bufmgr_lock(drm_slp_bufmgr bufmgr);
++int
++drm_slp_bufmgr_unlock(drm_slp_bufmgr bufmgr);
++int
++drm_slp_bufmgr_cache_flush(drm_slp_bufmgr bufmgr, drm_slp_bo bo, int flags);
++
++
++/*Functions for bo*/
++int
++drm_slp_bo_size (drm_slp_bo bo);
++drm_slp_bo
++drm_slp_bo_ref(drm_slp_bo bo);
++void
++drm_slp_bo_unref(drm_slp_bo bo);
++drm_slp_bo
++drm_slp_bo_alloc(drm_slp_bufmgr bufmgr,
++ const char* name,
++ int size,
++ int flags);
++drm_slp_bo
++drm_slp_bo_attach(drm_slp_bufmgr bufmgr,
++ const char* name,
++ int type,
++ int size,
++ unsigned int handle);
++drm_slp_bo
++drm_slp_bo_import(drm_slp_bufmgr bufmgr, unsigned int key);
++unsigned int
++drm_slp_bo_export(drm_slp_bo bo);
++unsigned int
++drm_slp_bo_get_handle(drm_slp_bo, int device);
++unsigned int
++drm_slp_bo_map(drm_slp_bo bo, int device, int opt);
++int
++drm_slp_bo_unmap(drm_slp_bo bo, int device);
++int
++drm_slp_bo_swap(drm_slp_bo bo1, drm_slp_bo bo2);
++
++/*Functions for userdata of bo*/
++typedef void (*drm_data_free)(void *);
++int
++drm_slp_bo_add_user_data(drm_slp_bo bo, unsigned long key, drm_data_free data_free_func);
++int
++drm_slp_bo_delete_user_data(drm_slp_bo bo, unsigned long key);
++int
++drm_slp_bo_set_user_data(drm_slp_bo bo, unsigned long key, void* data);
++int
++drm_slp_bo_get_user_data(drm_slp_bo bo, unsigned long key, void** data);
++#endif /* _DRM_SLP_BUFMGR_H_ */
+diff --git a/slp/libdrm_slp.pc.in b/slp/libdrm_slp.pc.in
+new file mode 100644
+index 0000000..220d38b
+--- /dev/null
++++ b/slp/libdrm_slp.pc.in
+@@ -0,0 +1,11 @@
++prefix=@prefix@
++exec_prefix=@exec_prefix@
++libdir=@libdir@
++includedir=@includedir@
++
++Name: libdrm
++Description: Userspace interface to kernel DRM services
++Version: @PACKAGE_VERSION@
++Requires: libdrm
++Libs: -L${libdir} -ldrm_slp
++Cflags: -I${includedir} -I${includedir}/libdrm
+diff --git a/slp/list.h b/slp/list.h
+new file mode 100644
+index 0000000..e967b93
+--- /dev/null
++++ b/slp/list.h
+@@ -0,0 +1,131 @@
++/*
++ *
++ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
++ * All Rights Reserved.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a
++ * copy of this software and associated documentation files (the
++ * "Software"), to deal in the Software without restriction, including
++ * without limitation the rights to use, copy, modify, merge, publish,
++ * distribute, sub license, and/or sell copies of the Software, and to
++ * permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
++ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
++ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
++ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
++ * USE OR OTHER DEALINGS IN THE SOFTWARE.
++ *
++ * The above copyright notice and this permission notice (including the
++ * next paragraph) shall be included in all copies or substantial portions
++ * of the Software.
++ *
++ */
++
++/**
++ * \file
++ * List macros heavily inspired by the Linux kernel
++ * list handling. No list looping yet.
++ *
++ * Is not threadsafe, so common operations need to
++ * be protected using an external mutex.
++ */
++#ifndef _U_DOUBLE_LIST_H_
++#define _U_DOUBLE_LIST_H_
++
++#include <stddef.h>
++
++static void list_inithead(struct list_head *item)
++{
++ item->prev = item;
++ item->next = item;
++}
++
++static inline void list_add(struct list_head *item, struct list_head *list)
++{
++ item->prev = list;
++ item->next = list->next;
++ list->next->prev = item;
++ list->next = item;
++}
++
++static inline void list_addtail(struct list_head *item, struct list_head *list)
++{
++ item->next = list;
++ item->prev = list->prev;
++ list->prev->next = item;
++ list->prev = item;
++}
++
++static inline void list_replace(struct list_head *from, struct list_head *to)
++{
++ to->prev = from->prev;
++ to->next = from->next;
++ from->next->prev = to;
++ from->prev->next = to;
++}
++
++static inline void list_del(struct list_head *item)
++{
++ item->prev->next = item->next;
++ item->next->prev = item->prev;
++}
++
++static inline void list_delinit(struct list_head *item)
++{
++ item->prev->next = item->next;
++ item->next->prev = item->prev;
++ item->next = item;
++ item->prev = item;
++}
++
++#define LIST_INITHEAD(__item) list_inithead(__item)
++#define LIST_ADD(__item, __list) list_add(__item, __list)
++#define LIST_ADDTAIL(__item, __list) list_addtail(__item, __list)
++#define LIST_REPLACE(__from, __to) list_replace(__from, __to)
++#define LIST_DEL(__item) list_del(__item)
++#define LIST_DELINIT(__item) list_delinit(__item)
++
++#define LIST_ENTRY(__type, __item, __field) \
++ ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
++
++#define LIST_IS_EMPTY(__list) \
++ ((__list)->next == (__list))
++
++#ifndef container_of
++#define container_of(ptr, sample, member) \
++ (void *)((char *)(ptr) \
++ - ((char *)&(sample)->member - (char *)(sample)))
++#endif
++
++#define LIST_FOR_EACH_ENTRY(pos, head, member) \
++ for (pos = container_of((head)->next, pos, member); \
++ &pos->member != (head); \
++ pos = container_of(pos->member.next, pos, member))
++
++#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
++ for (pos = container_of((head)->next, pos, member), \
++ storage = container_of(pos->member.next, pos, member); \
++ &pos->member != (head); \
++ pos = storage, storage = container_of(storage->member.next, storage, member))
++
++#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
++ for (pos = container_of((head)->prev, pos, member), \
++ storage = container_of(pos->member.prev, pos, member); \
++ &pos->member != (head); \
++ pos = storage, storage = container_of(storage->member.prev, storage, member))
++
++#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
++ for (pos = container_of((start), pos, member); \
++ &pos->member != (head); \
++ pos = container_of(pos->member.next, pos, member))
++
++#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
++ for (pos = container_of((start), pos, member); \
++ &pos->member != (head); \
++ pos = container_of(pos->member.prev, pos, member))
++
++#endif /*_U_DOUBLE_LIST_H_*/
+--
+1.7.3.4
+
--- /dev/null
+From a770b7404b7611043701e56332eede3f19f4d7c5 Mon Sep 17 00:00:00 2001
+From: Prajwal Mohan <prajwal.karur.mohan@intel.com>
+Date: Wed, 10 Oct 2012 10:10:45 -0700
+Subject: [PATCH 2/2] Adding changes to configure.ac
+
+---
+ configure.ac | 24 +++++++++++++++++++++++-
+ 1 files changed, 23 insertions(+), 1 deletions(-)
+
+diff --git a/configure.ac b/configure.ac
+index 9506827..97f62dc 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -93,6 +93,19 @@ AC_ARG_ENABLE(exynos-experimental-api,
+ [Enable support for EXYNOS's experimental API (default: disabled)]),
+ [EXYNOS=$enableval], [EXYNOS=no])
+
++AC_ARG_ENABLE(slp,
++ AS_HELP_STRING([--disable-slp],
++ [Enable support for slp's API (default: auto)]),
++ [SLP=$enableval], [SLP=auto])
++
++AC_ARG_VAR([bufmgr_dir], [Directory of slp-bufmgr])
++
++if test "x$bufmgr_dir" = xyes; then
++ AC_DEFINE_UNQUOTED(BUFMGR_DIR, "$bufmgr_dir", [Directory for the modules of slp_bufmgr])
++else
++ AC_DEFINE(BUFMGR_DIR, "/usr/lib/bufmgr", [Directory for the modules of slp_bufmgr])
++fi
++
+ dnl ===========================================================================
+ dnl check compiler flags
+ AC_DEFUN([LIBDRM_CC_TRY_FLAG], [
+@@ -214,7 +227,7 @@ if test "x$HAVE_LIBUDEV" = xyes; then
+ fi
+ AM_CONDITIONAL(HAVE_LIBUDEV, [test "x$HAVE_LIBUDEV" = xyes])
+
+-if test "x$INTEL" != "xno" -o "x$RADEON" != "xno" -o "x$NOUVEAU" != "xno" -o "x$OMAP" != "xno"; then
++if test "x$INTEL" != "xno" -o "x$RADEON" != "xno" -o "x$NOUVEAU" != "xno" -o "x$OMAP" != "xno" -o "x$SLP" != "xno"; then
+ # Check for atomic intrinsics
+ AC_CACHE_CHECK([for native atomic primitives], drm_cv_atomic_primitives,
+ [
+@@ -286,6 +299,10 @@ if test "x$INTEL" != "xno" -o "x$RADEON" != "xno" -o "x$NOUVEAU" != "xno" -o "x$
+ fi
+ fi
+
++if test "x$SLP" != "xno"; then
++ AC_DEFINE(HAVE_SLP, 1, [Have slp])
++fi
++
+ if test "x$INTEL" != "xno"; then
+ PKG_CHECK_MODULES(PCIACCESS, [pciaccess >= 0.10])
+ fi
+@@ -297,6 +314,7 @@ if test "x$have_valgrind" = "xyes"; then
+ AC_DEFINE([HAVE_VALGRIND], 1, [Use valgrind intrinsics to suppress false warnings])
+ fi
+
++AM_CONDITIONAL(HAVE_SLP, [test "x$SLP" != "xno"])
+ AM_CONDITIONAL(HAVE_INTEL, [test "x$INTEL" != "xno"])
+ AM_CONDITIONAL(HAVE_RADEON, [test "x$RADEON" != "xno"])
+ AM_CONDITIONAL(HAVE_NOUVEAU, [test "x$NOUVEAU" != "xno"])
+@@ -315,6 +333,8 @@ AC_CONFIG_FILES([
+ Makefile
+ libkms/Makefile
+ libkms/libkms.pc
++ slp/Makefile
++ slp/libdrm_slp.pc
+ intel/Makefile
+ intel/libdrm_intel.pc
+ radeon/Makefile
+@@ -346,4 +366,6 @@ echo " Radeon API $RADEON"
+ echo " Nouveau API $NOUVEAU"
+ echo " OMAP API $OMAP"
+ echo " EXYNOS API $EXYNOS"
++echo " SLP API $SLP"
++echo " SLP bufmgr_dir $bufmgr_dir"
+ echo ""
+--
+1.7.3.4
+
-* Mon Oct 01 2012 Rusty Lynch <rusty.lynch@intel.com> submit/trunk/20120813.1312@44dd45c
-- Adding slp buffer support
-- Adjusting spec to build in a Tizen 2.0 environment
-
-* Mon Aug 13 20:11:59 UTC 2012 - tracy.graydon@intel.com
-- Tagging
+* Mon Oct 22 23:58:52 UTC 2012 - tracy.graydon@intel.com
+- Branching for 1.0alpha
* Mon Aug 13 2012 Quanxian Wang <quanxian.wang@intel.com> - 2.4.37
- Updating to 2.4.37
-Name: libdrm
-Version: 2.4.37
-Release: 1
-License: MIT
-Summary: Userspace interface to kernel DRM services
-Group: System/Libraries
-Source0: %{name}-%{version}.tar.gz
-BuildRequires: kernel-headers
-BuildRequires: pkgconfig(xorg-macros)
+Name: libdrm
+Summary: Userspace interface to kernel DRM services -- runtime
+Version: 2.4.39
+Release: 1
+Group: libs
+License: MIT
+Source0: libdrm-%{version}.tar.bz2
+Patch0: 0001-Adding-slp-subpackage.patch
+Patch1: 0002-Adding-changes-to-configure.ac.patch
+Requires(post): /sbin/ldconfig
+Requires(postun): /sbin/ldconfig
+BuildRequires: pkgconfig(x11)
BuildRequires: pkgconfig(pthread-stubs)
BuildRequires: pkgconfig(pciaccess)
-BuildRequires: libatomic_ops-devel
+BuildRequires: automake
+BuildRequires: libtool
+
%description
-Description: %{summary}
+Userspace interface to kernel DRM services -- runtime
+ This library implements the userspace interface to the kernel DRM
+ services. DRM stands for "Direct Rendering Manager", which is the
+ kernelspace portion of the "Direct Rendering Infrastructure" (DRI).
+ The DRI is currently used on Linux to provide hardware-accelerated
+ OpenGL drivers.
+ .
+ This package provides the runtime environment for libdrm..
-%package devel
-Summary: Userspace interface to kernel DRM services
-Group: Development/Libraries
-Requires: kernel-headers
-Requires: libdrm2
-Requires: libdrm-slp1
-Requires: libkms1
-%description devel
-Userspace interface to kernel DRM services
-%package -n libdrm2
-Summary: Userspace interface to kernel DRM services
-Group: Development/Libraries
+%package devel
+Summary: Userspace interface to kernel DRM services -- development files
+Group: libdevel
+Requires: libdrm = %{version}-%{release}
+Obsoletes: linux-libc-dev >= 2.6.29
-%description -n libdrm2
-Userspace interface to kernel DRM services
+%description devel
+Userspace interface to kernel DRM services -- development files
+ This library implements the userspace interface to the kernel DRM
+ services. DRM stands for "Direct Rendering Manager", which is the
+ kernelspace portion of the "Direct Rendering Infrastructure" (DRI).
+ The DRI is currently used on Linux to provide hardware-accelerated
+ OpenGL drivers.
+ .
+ This package provides the development environment for libdrm..
%package slp1
Summary: Userspace interface to slp-specific kernel DRM services
%description slp1
Userspace interface to slp-specific kernel DRM services
-%package -n libkms1
-Summary: Userspace interface to kernel DRM buffer management
-Group: Development/Libraries
-%description -n libkms1
+%package -n libkms
+Summary: Userspace interface to kernel DRM buffer management
+Group: libs
+Requires(post): /sbin/ldconfig
+Requires(postun): /sbin/ldconfig
+
+%description -n libkms
Userspace interface to kernel DRM buffer management
+ This library implements a unified userspace interface to the different buffer
+ management interfaces of the kernel DRM hardware drivers..
%prep
-%setup -q
+%setup -q -n %{name}-%{version}
+%patch0 -p1
+%patch1 -p1
%build
-%reconfigure --prefix=%{_prefix} --mandir=%{_prefix}/share/man --infodir=%{_prefix}/share/info \
- --enable-static=yes --enable-udev --enable-libkms --enable-exynos-experimental-api \
- --disable-nouveau
-
-make %{?_smp_mflags}
+%configure --disable-static --enable-nouveau-experimental-api
+make %{?jobs:-j%jobs}
%install
+rm -rf %{buildroot}
%make_install
%post -p /sbin/ldconfig
-%postun -p /sbin/ldconfig
-%post -n libdrm2 -p /sbin/ldconfig
-%postun -n libdrm2 -p /sbin/ldconfig
+%postun -p /sbin/ldconfig
-%post slp1 -p /sbin/ldconfig
-%postun slp1 -p /sbin/ldconfig
+%post -n libkms -p /sbin/ldconfig
-%post -n libkms1 -p /sbin/ldconfig
-%postun -n libkms1 -p /sbin/ldconfig
+%postun -n libkms -p /sbin/ldconfig
+%files
+%defattr(-,root,root,-)
+%{_libdir}/libdrm.so.*
+%{_libdir}/libdrm_intel.so.*
+%{_libdir}/libdrm_radeon.so.*
+%{_libdir}/libdrm_nouveau.so.*
%files devel
-%dir %{_includedir}/libdrm
-%{_includedir}/*
-%{_includedir}/exynos/*
-%{_libdir}/libdrm.so
-%{_libdir}/libdrm_*.so
-%{_libdir}/libkms.so
-%{_libdir}/pkgconfig/*
+%defattr(-,root,root,-)
+%{_includedir}/libdrm/*
+%{_includedir}/xf86drmMode.h
+%{_includedir}/xf86drm.h
+%{_includedir}/libkms/*
-%files -n libdrm2
-%{_libdir}/libdrm.so.*
-%{_libdir}/libdrm_*.so.*
+%{_libdir}/lib*.so
+%{_libdir}/pkgconfig/*
%files slp1
%{_libdir}/libdrm_slp*.so.*
-%files -n libkms1
+%files -n libkms
+%defattr(-,root,root,-)
%{_libdir}/libkms.so.*
+
libdrm_radeon_la_LTLIBRARIES = libdrm_radeon.la
libdrm_radeon_ladir = $(libdir)
-libdrm_radeon_la_LDFLAGS = -version-number 1:0:0 -no-undefined
+libdrm_radeon_la_LDFLAGS = -version-number 1:0:1 -no-undefined
libdrm_radeon_la_LIBADD = ../libdrm.la @PTHREADSTUBS_LIBS@
libdrm_radeon_la_SOURCES = \
CHIPSET(0x68C0, REDWOOD_68C0, REDWOOD)
CHIPSET(0x68C1, REDWOOD_68C1, REDWOOD)
+CHIPSET(0x68C7, REDWOOD_68C7, REDWOOD)
CHIPSET(0x68C8, REDWOOD_68C8, REDWOOD)
CHIPSET(0x68C9, REDWOOD_68C9, REDWOOD)
CHIPSET(0x68D8, REDWOOD_68D8, REDWOOD)
CHIPSET(0x6888, CYPRESS_6888, CYPRESS)
CHIPSET(0x6889, CYPRESS_6889, CYPRESS)
CHIPSET(0x688A, CYPRESS_688A, CYPRESS)
+CHIPSET(0x688C, CYPRESS_688C, CYPRESS)
+CHIPSET(0x688D, CYPRESS_688D, CYPRESS)
CHIPSET(0x6898, CYPRESS_6898, CYPRESS)
CHIPSET(0x6899, CYPRESS_6899, CYPRESS)
CHIPSET(0x689B, CYPRESS_689B, CYPRESS)
CHIPSET(0x6800, PITCAIRN_6800, PITCAIRN)
CHIPSET(0x6801, PITCAIRN_6801, PITCAIRN)
CHIPSET(0x6802, PITCAIRN_6802, PITCAIRN)
+CHIPSET(0x6806, PITCAIRN_6806, PITCAIRN)
CHIPSET(0x6808, PITCAIRN_6808, PITCAIRN)
CHIPSET(0x6809, PITCAIRN_6809, PITCAIRN)
CHIPSET(0x6810, PITCAIRN_6810, PITCAIRN)
+CHIPSET(0x6816, PITCAIRN_6816, PITCAIRN)
+CHIPSET(0x6817, PITCAIRN_6817, PITCAIRN)
CHIPSET(0x6818, PITCAIRN_6818, PITCAIRN)
CHIPSET(0x6819, PITCAIRN_6819, PITCAIRN)
CHIPSET(0x684C, PITCAIRN_684C, PITCAIRN)
#include "radeon_bo.h"
#include "radeon_bo_int.h"
#include "radeon_bo_gem.h"
-
+#include <fcntl.h>
struct radeon_bo_gem {
struct radeon_bo_int base;
uint32_t name;
sizeof(args));
return r;
}
+
+int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
+ int ret;
+
+ ret = drmPrimeHandleToFD(bo_gem->base.bom->fd, bo->handle, DRM_CLOEXEC, handle);
+ return ret;
+}
+
+struct radeon_bo *radeon_gem_bo_open_prime(struct radeon_bo_manager *bom,
+ int fd_handle,
+ uint32_t size)
+{
+ struct radeon_bo_gem *bo;
+ int r;
+ uint32_t handle;
+
+ bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
+ if (bo == NULL) {
+ return NULL;
+ }
+
+ bo->base.bom = bom;
+ bo->base.handle = 0;
+ bo->base.size = size;
+ bo->base.alignment = 0;
+ bo->base.domains = RADEON_GEM_DOMAIN_GTT;
+ bo->base.flags = 0;
+ bo->base.ptr = NULL;
+ atomic_set(&bo->reloc_in_cs, 0);
+ bo->map_count = 0;
+
+ r = drmPrimeFDToHandle(bom->fd, fd_handle, &handle);
+ if (r != 0) {
+ free(bo);
+ return NULL;
+ }
+
+ bo->base.handle = handle;
+ bo->name = handle;
+
+ radeon_bo_ref((struct radeon_bo *)bo);
+ return (struct radeon_bo *)bo;
+
+}
void *radeon_gem_get_reloc_in_cs(struct radeon_bo *bo);
int radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain);
int radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name);
+int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle);
+struct radeon_bo *radeon_gem_bo_open_prime(struct radeon_bo_manager *bom,
+ int fd_handle,
+ uint32_t size);
#endif
surf->level[level].nblk_x = (surf->level[level].npix_x + surf->blk_w - 1) / surf->blk_w;
surf->level[level].nblk_y = (surf->level[level].npix_y + surf->blk_h - 1) / surf->blk_h;
surf->level[level].nblk_z = (surf->level[level].npix_z + surf->blk_d - 1) / surf->blk_d;
- if (surf->level[level].mode == RADEON_SURF_MODE_2D) {
+ if (surf->nsamples == 1 && surf->level[level].mode == RADEON_SURF_MODE_2D) {
if (surf->level[level].nblk_x < xalign || surf->level[level].nblk_y < yalign) {
surf->level[level].mode = RADEON_SURF_MODE_1D;
return;
surf->level[level].nblk_z = ALIGN(surf->level[level].nblk_z, zalign);
surf->level[level].offset = offset;
- surf->level[level].pitch_bytes = surf->level[level].nblk_x * surf->bpe;
+ surf->level[level].pitch_bytes = surf->level[level].nblk_x * surf->bpe * surf->nsamples;
surf->level[level].slice_size = surf->level[level].pitch_bytes * surf->level[level].nblk_y;
surf->bo_size = offset + surf->level[level].slice_size * surf->level[level].nblk_z * surf->array_size;
surf->bo_alignment =
MAX2(surf_man->hw_info.num_pipes *
surf_man->hw_info.num_banks *
- surf->bpe * 64,
+ surf->nsamples * surf->bpe * 64,
xalign * yalign * surf->nsamples * surf->bpe);
}
unsigned mode;
int r;
+ /* MSAA surfaces support the 2D mode only. */
+ if (surf->nsamples > 1) {
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
+ }
+
/* tiling mode */
mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
- /* always enable z & stencil together */
- if (surf->flags & RADEON_SURF_ZBUFFER) {
- surf->flags |= RADEON_SURF_SBUFFER;
- }
- if (surf->flags & RADEON_SURF_SBUFFER) {
- surf->flags |= RADEON_SURF_ZBUFFER;
- }
- if (surf->flags & RADEON_SURF_ZBUFFER) {
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
/* zbuffer only support 1D or 2D tiled surface */
switch (mode) {
case RADEON_SURF_MODE_1D:
/* force 1d on kernel that can't do 2d */
if (!surf_man->hw_info.allow_2d && mode > RADEON_SURF_MODE_1D) {
+ if (surf->nsamples > 1) {
+ fprintf(stderr, "radeon: Cannot use 2D tiling for an MSAA surface (%i).\n", __LINE__);
+ return -EFAULT;
+ }
mode = RADEON_SURF_MODE_1D;
surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
surf->flags |= RADEON_SURF_SET(mode, MODE);
surf->level[level].nblk_x = (surf->level[level].npix_x + surf->blk_w - 1) / surf->blk_w;
surf->level[level].nblk_y = (surf->level[level].npix_y + surf->blk_h - 1) / surf->blk_h;
surf->level[level].nblk_z = (surf->level[level].npix_z + surf->blk_d - 1) / surf->blk_d;
- if (surf->level[level].mode == RADEON_SURF_MODE_2D) {
+ if (surf->nsamples == 1 && surf->level[level].mode == RADEON_SURF_MODE_2D) {
if (surf->level[level].nblk_x < mtilew || surf->level[level].nblk_y < mtileh) {
surf->level[level].mode = RADEON_SURF_MODE_1D;
return;
}
}
- if (surf->flags & RADEON_SURF_SBUFFER) {
+ /* The depth and stencil buffers are in separate resources on evergreen.
+ * We allocate them in one buffer next to each other to simplify
+ * communication between the DDX and the Mesa driver. */
+ if ((surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) ==
+ (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
surf->stencil_offset = ALIGN(surf->bo_size, surf->bo_alignment);
surf->bo_size = surf->stencil_offset + surf->bo_size / 4;
}
}
}
- if (surf->flags & RADEON_SURF_SBUFFER) {
+ if ((surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) ==
+ (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
surf->stencil_offset = ALIGN(surf->bo_size, surf->bo_alignment);
surf->bo_size = surf->stencil_offset + surf->bo_size / 4;
}
/* force 1d on kernel that can't do 2d */
if (!surf_man->hw_info.allow_2d && mode > RADEON_SURF_MODE_1D) {
+ if (surf->nsamples > 1) {
+ fprintf(stderr, "radeon: Cannot use 2D tiling for an MSAA surface (%i).\n", __LINE__);
+ return -EFAULT;
+ }
mode = RADEON_SURF_MODE_1D;
surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
surf->flags |= RADEON_SURF_SET(mode, MODE);
unsigned mode;
int r;
+ /* MSAA surfaces support the 2D mode only. */
+ if (surf->nsamples > 1) {
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
+ }
+
/* tiling mode */
mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
- /* for some reason eg need to have room for stencil right after depth */
- if (surf->flags & RADEON_SURF_ZBUFFER) {
- surf->flags |= RADEON_SURF_SBUFFER;
- }
- if (surf->flags & RADEON_SURF_SBUFFER) {
- surf->flags |= RADEON_SURF_ZBUFFER;
- }
- if (surf->flags & RADEON_SURF_ZBUFFER) {
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
/* zbuffer only support 1D or 2D tiled surface */
switch (mode) {
case RADEON_SURF_MODE_1D:
/* tiling mode */
mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
- /* for some reason eg need to have room for stencil right after depth */
- if (surf->flags & RADEON_SURF_ZBUFFER) {
- surf->flags |= RADEON_SURF_SBUFFER;
- }
-
/* set some default value to avoid sanity check choking on them */
surf->tile_split = 1024;
surf->bankw = 1;
return 0;
}
- /* set tile split to row size, optimize latter for multi-sample surface
- * tile split >= 256 for render buffer surface. Also depth surface want
- * smaller value for optimal performances.
- */
- surf->tile_split = surf_man->hw_info.row_size;
- surf->stencil_tile_split = surf_man->hw_info.row_size / 2;
+ /* Tweak TILE_SPLIT for performance here. */
+ if (surf->nsamples > 1) {
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ switch (surf->nsamples) {
+ case 2:
+ surf->tile_split = 128;
+ break;
+ case 4:
+ surf->tile_split = 128;
+ break;
+ case 8:
+ surf->tile_split = 256;
+ break;
+ case 16: /* cayman only */
+ surf->tile_split = 512;
+ break;
+ default:
+ fprintf(stderr, "radeon: Wrong number of samples %i (%i)\n",
+ surf->nsamples, __LINE__);
+ return -EINVAL;
+ }
+ surf->stencil_tile_split = 64;
+ } else {
+ /* tile split must be >= 256 for colorbuffer surfaces */
+ surf->tile_split = MAX2(surf->nsamples * surf->bpe * 64, 256);
+ }
+ } else {
+ /* set tile split to row size */
+ surf->tile_split = surf_man->hw_info.row_size;
+ surf->stencil_tile_split = surf_man->hw_info.row_size / 2;
+ }
/* bankw or bankh greater than 1 increase alignment requirement, not
* sure if it's worth using smaller bankw & bankh to stick with 2D
modetest
modetest_SOURCES = \
- modetest.c
+ buffers.c modetest.c buffers.h
+
modetest_LDADD = \
$(top_builddir)/libdrm.la \
$(top_builddir)/libkms/libkms.la \
--- /dev/null
+/*
+ * DRM based mode setting test program
+ * Copyright 2008 Tungsten Graphics
+ * Jakob Bornecrantz <jakob@tungstengraphics.com>
+ * Copyright 2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "config.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "drm_fourcc.h"
+#include "libkms.h"
+
+#include "buffers.h"
+
+#ifdef HAVE_CAIRO
+#include <math.h>
+#include <cairo.h>
+#endif
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+/* -----------------------------------------------------------------------------
+ * Formats
+ */
+
+struct color_component {
+ unsigned int length;
+ unsigned int offset;
+};
+
+struct rgb_info {
+ struct color_component red;
+ struct color_component green;
+ struct color_component blue;
+ struct color_component alpha;
+};
+
+enum yuv_order {
+ YUV_YCbCr = 1,
+ YUV_YCrCb = 2,
+ YUV_YC = 4,
+ YUV_CY = 8,
+};
+
+struct yuv_info {
+ enum yuv_order order;
+ unsigned int xsub;
+ unsigned int ysub;
+ unsigned int chroma_stride;
+};
+
+struct format_info {
+ unsigned int format;
+ const char *name;
+ const struct rgb_info rgb;
+ const struct yuv_info yuv;
+};
+
+#define MAKE_RGB_INFO(rl, ro, bl, bo, gl, go, al, ao) \
+ .rgb = { { (rl), (ro) }, { (bl), (bo) }, { (gl), (go) }, { (al), (ao) } }
+
+#define MAKE_YUV_INFO(order, xsub, ysub, chroma_stride) \
+ .yuv = { (order), (xsub), (ysub), (chroma_stride) }
+
+static const struct format_info format_info[] = {
+ /* YUV packed */
+ { DRM_FORMAT_UYVY, "UYVY", MAKE_YUV_INFO(YUV_YCbCr | YUV_CY, 2, 2, 2) },
+ { DRM_FORMAT_VYUY, "VYUY", MAKE_YUV_INFO(YUV_YCrCb | YUV_CY, 2, 2, 2) },
+ { DRM_FORMAT_YUYV, "YUYV", MAKE_YUV_INFO(YUV_YCbCr | YUV_YC, 2, 2, 2) },
+ { DRM_FORMAT_YVYU, "YVYU", MAKE_YUV_INFO(YUV_YCrCb | YUV_YC, 2, 2, 2) },
+ /* YUV semi-planar */
+ { DRM_FORMAT_NV12, "NV12", MAKE_YUV_INFO(YUV_YCbCr, 2, 2, 2) },
+ { DRM_FORMAT_NV21, "NV21", MAKE_YUV_INFO(YUV_YCrCb, 2, 2, 2) },
+ { DRM_FORMAT_NV16, "NV16", MAKE_YUV_INFO(YUV_YCbCr, 2, 1, 2) },
+ { DRM_FORMAT_NV61, "NV61", MAKE_YUV_INFO(YUV_YCrCb, 2, 1, 2) },
+ /* YUV planar */
+ { DRM_FORMAT_YVU420, "YV12", MAKE_YUV_INFO(YUV_YCrCb, 2, 2, 1) },
+ /* RGB16 */
+ { DRM_FORMAT_ARGB1555, "AR15", MAKE_RGB_INFO(5, 10, 5, 5, 5, 0, 1, 15) },
+ { DRM_FORMAT_XRGB1555, "XR15", MAKE_RGB_INFO(5, 10, 5, 5, 5, 0, 0, 0) },
+ { DRM_FORMAT_RGB565, "RG16", MAKE_RGB_INFO(5, 11, 6, 5, 5, 0, 0, 0) },
+ /* RGB24 */
+ { DRM_FORMAT_BGR888, "BG24", MAKE_RGB_INFO(8, 0, 8, 8, 8, 16, 0, 0) },
+ { DRM_FORMAT_RGB888, "RG24", MAKE_RGB_INFO(8, 16, 8, 8, 8, 0, 0, 0) },
+ /* RGB32 */
+ { DRM_FORMAT_ARGB8888, "AR24", MAKE_RGB_INFO(8, 16, 8, 8, 8, 0, 8, 24) },
+ { DRM_FORMAT_BGRA8888, "BA24", MAKE_RGB_INFO(8, 8, 8, 16, 8, 24, 8, 0) },
+ { DRM_FORMAT_XRGB8888, "XR24", MAKE_RGB_INFO(8, 16, 8, 8, 8, 0, 0, 0) },
+ { DRM_FORMAT_BGRX8888, "BX24", MAKE_RGB_INFO(8, 8, 8, 16, 8, 24, 0, 0) },
+};
+
+unsigned int format_fourcc(const char *name)
+{
+ unsigned int i;
+ for (i = 0; i < ARRAY_SIZE(format_info); i++) {
+ if (!strcmp(format_info[i].name, name))
+ return format_info[i].format;
+ }
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * Test patterns
+ */
+
+struct color_rgb24 {
+ unsigned int value:24;
+} __attribute__((__packed__));
+
+struct color_yuv {
+ unsigned char y;
+ unsigned char u;
+ unsigned char v;
+};
+
+#define MAKE_YUV_601_Y(r, g, b) \
+ ((( 66 * (r) + 129 * (g) + 25 * (b) + 128) >> 8) + 16)
+#define MAKE_YUV_601_U(r, g, b) \
+ (((-38 * (r) - 74 * (g) + 112 * (b) + 128) >> 8) + 128)
+#define MAKE_YUV_601_V(r, g, b) \
+ (((112 * (r) - 94 * (g) - 18 * (b) + 128) >> 8) + 128)
+
+#define MAKE_YUV_601(r, g, b) \
+ { .y = MAKE_YUV_601_Y(r, g, b), \
+ .u = MAKE_YUV_601_U(r, g, b), \
+ .v = MAKE_YUV_601_V(r, g, b) }
+
+#define MAKE_RGBA(rgb, r, g, b, a) \
+ ((((r) >> (8 - (rgb)->red.length)) << (rgb)->red.offset) | \
+ (((g) >> (8 - (rgb)->green.length)) << (rgb)->green.offset) | \
+ (((b) >> (8 - (rgb)->blue.length)) << (rgb)->blue.offset) | \
+ (((a) >> (8 - (rgb)->alpha.length)) << (rgb)->alpha.offset))
+
+#define MAKE_RGB24(rgb, r, g, b) \
+ { .value = MAKE_RGBA(rgb, r, g, b, 0) }
+
+static void
+fill_smpte_yuv_planar(const struct yuv_info *yuv,
+ unsigned char *y_mem, unsigned char *u_mem,
+ unsigned char *v_mem, unsigned int width,
+ unsigned int height, unsigned int stride)
+{
+ const struct color_yuv colors_top[] = {
+ MAKE_YUV_601(191, 192, 192), /* grey */
+ MAKE_YUV_601(192, 192, 0), /* yellow */
+ MAKE_YUV_601(0, 192, 192), /* cyan */
+ MAKE_YUV_601(0, 192, 0), /* green */
+ MAKE_YUV_601(192, 0, 192), /* magenta */
+ MAKE_YUV_601(192, 0, 0), /* red */
+ MAKE_YUV_601(0, 0, 192), /* blue */
+ };
+ const struct color_yuv colors_middle[] = {
+ MAKE_YUV_601(0, 0, 192), /* blue */
+ MAKE_YUV_601(19, 19, 19), /* black */
+ MAKE_YUV_601(192, 0, 192), /* magenta */
+ MAKE_YUV_601(19, 19, 19), /* black */
+ MAKE_YUV_601(0, 192, 192), /* cyan */
+ MAKE_YUV_601(19, 19, 19), /* black */
+ MAKE_YUV_601(192, 192, 192), /* grey */
+ };
+ const struct color_yuv colors_bottom[] = {
+ MAKE_YUV_601(0, 33, 76), /* in-phase */
+ MAKE_YUV_601(255, 255, 255), /* super white */
+ MAKE_YUV_601(50, 0, 106), /* quadrature */
+ MAKE_YUV_601(19, 19, 19), /* black */
+ MAKE_YUV_601(9, 9, 9), /* 3.5% */
+ MAKE_YUV_601(19, 19, 19), /* 7.5% */
+ MAKE_YUV_601(29, 29, 29), /* 11.5% */
+ MAKE_YUV_601(19, 19, 19), /* black */
+ };
+ unsigned int cs = yuv->chroma_stride;
+ unsigned int xsub = yuv->xsub;
+ unsigned int ysub = yuv->ysub;
+ unsigned int x;
+ unsigned int y;
+
+ /* Luma */
+ for (y = 0; y < height * 6 / 9; ++y) {
+ for (x = 0; x < width; ++x)
+ y_mem[x] = colors_top[x * 7 / width].y;
+ y_mem += stride;
+ }
+
+ for (; y < height * 7 / 9; ++y) {
+ for (x = 0; x < width; ++x)
+ y_mem[x] = colors_middle[x * 7 / width].y;
+ y_mem += stride;
+ }
+
+ for (; y < height; ++y) {
+ for (x = 0; x < width * 5 / 7; ++x)
+ y_mem[x] = colors_bottom[x * 4 / (width * 5 / 7)].y;
+ for (; x < width * 6 / 7; ++x)
+ y_mem[x] = colors_bottom[(x - width * 5 / 7) * 3
+ / (width / 7) + 4].y;
+ for (; x < width; ++x)
+ y_mem[x] = colors_bottom[7].y;
+ y_mem += stride;
+ }
+
+ /* Chroma */
+ for (y = 0; y < height / ysub * 6 / 9; ++y) {
+ for (x = 0; x < width; x += xsub) {
+ u_mem[x*cs/xsub] = colors_top[x * 7 / width].u;
+ v_mem[x*cs/xsub] = colors_top[x * 7 / width].v;
+ }
+ u_mem += stride * cs / xsub;
+ v_mem += stride * cs / xsub;
+ }
+
+ for (; y < height / ysub * 7 / 9; ++y) {
+ for (x = 0; x < width; x += xsub) {
+ u_mem[x*cs/xsub] = colors_middle[x * 7 / width].u;
+ v_mem[x*cs/xsub] = colors_middle[x * 7 / width].v;
+ }
+ u_mem += stride * cs / xsub;
+ v_mem += stride * cs / xsub;
+ }
+
+ for (; y < height / ysub; ++y) {
+ for (x = 0; x < width * 5 / 7; x += xsub) {
+ u_mem[x*cs/xsub] =
+ colors_bottom[x * 4 / (width * 5 / 7)].u;
+ v_mem[x*cs/xsub] =
+ colors_bottom[x * 4 / (width * 5 / 7)].v;
+ }
+ for (; x < width * 6 / 7; x += xsub) {
+ u_mem[x*cs/xsub] = colors_bottom[(x - width * 5 / 7) *
+ 3 / (width / 7) + 4].u;
+ v_mem[x*cs/xsub] = colors_bottom[(x - width * 5 / 7) *
+ 3 / (width / 7) + 4].v;
+ }
+ for (; x < width; x += xsub) {
+ u_mem[x*cs/xsub] = colors_bottom[7].u;
+ v_mem[x*cs/xsub] = colors_bottom[7].v;
+ }
+ u_mem += stride * cs / xsub;
+ v_mem += stride * cs / xsub;
+ }
+}
+
+static void
+fill_smpte_yuv_packed(const struct yuv_info *yuv, unsigned char *mem,
+ unsigned int width, unsigned int height,
+ unsigned int stride)
+{
+ const struct color_yuv colors_top[] = {
+ MAKE_YUV_601(191, 192, 192), /* grey */
+ MAKE_YUV_601(192, 192, 0), /* yellow */
+ MAKE_YUV_601(0, 192, 192), /* cyan */
+ MAKE_YUV_601(0, 192, 0), /* green */
+ MAKE_YUV_601(192, 0, 192), /* magenta */
+ MAKE_YUV_601(192, 0, 0), /* red */
+ MAKE_YUV_601(0, 0, 192), /* blue */
+ };
+ const struct color_yuv colors_middle[] = {
+ MAKE_YUV_601(0, 0, 192), /* blue */
+ MAKE_YUV_601(19, 19, 19), /* black */
+ MAKE_YUV_601(192, 0, 192), /* magenta */
+ MAKE_YUV_601(19, 19, 19), /* black */
+ MAKE_YUV_601(0, 192, 192), /* cyan */
+ MAKE_YUV_601(19, 19, 19), /* black */
+ MAKE_YUV_601(192, 192, 192), /* grey */
+ };
+ const struct color_yuv colors_bottom[] = {
+ MAKE_YUV_601(0, 33, 76), /* in-phase */
+ MAKE_YUV_601(255, 255, 255), /* super white */
+ MAKE_YUV_601(50, 0, 106), /* quadrature */
+ MAKE_YUV_601(19, 19, 19), /* black */
+ MAKE_YUV_601(9, 9, 9), /* 3.5% */
+ MAKE_YUV_601(19, 19, 19), /* 7.5% */
+ MAKE_YUV_601(29, 29, 29), /* 11.5% */
+ MAKE_YUV_601(19, 19, 19), /* black */
+ };
+ unsigned char *y_mem = (yuv->order & YUV_YC) ? mem : mem + 1;
+ unsigned char *c_mem = (yuv->order & YUV_CY) ? mem : mem + 1;
+ unsigned int u = (yuv->order & YUV_YCrCb) ? 2 : 0;
+ unsigned int v = (yuv->order & YUV_YCbCr) ? 2 : 0;
+ unsigned int x;
+ unsigned int y;
+
+ /* Luma */
+ for (y = 0; y < height * 6 / 9; ++y) {
+ for (x = 0; x < width; ++x)
+ y_mem[2*x] = colors_top[x * 7 / width].y;
+ y_mem += stride * 2;
+ }
+
+ for (; y < height * 7 / 9; ++y) {
+ for (x = 0; x < width; ++x)
+ y_mem[2*x] = colors_middle[x * 7 / width].y;
+ y_mem += stride * 2;
+ }
+
+ for (; y < height; ++y) {
+ for (x = 0; x < width * 5 / 7; ++x)
+ y_mem[2*x] = colors_bottom[x * 4 / (width * 5 / 7)].y;
+ for (; x < width * 6 / 7; ++x)
+ y_mem[2*x] = colors_bottom[(x - width * 5 / 7) * 3
+ / (width / 7) + 4].y;
+ for (; x < width; ++x)
+ y_mem[2*x] = colors_bottom[7].y;
+ y_mem += stride * 2;
+ }
+
+ /* Chroma */
+ for (y = 0; y < height * 6 / 9; ++y) {
+ for (x = 0; x < width; x += 2) {
+ c_mem[2*x+u] = colors_top[x * 7 / width].u;
+ c_mem[2*x+v] = colors_top[x * 7 / width].v;
+ }
+ c_mem += stride * 2;
+ }
+
+ for (; y < height * 7 / 9; ++y) {
+ for (x = 0; x < width; x += 2) {
+ c_mem[2*x+u] = colors_middle[x * 7 / width].u;
+ c_mem[2*x+v] = colors_middle[x * 7 / width].v;
+ }
+ c_mem += stride * 2;
+ }
+
+ for (; y < height; ++y) {
+ for (x = 0; x < width * 5 / 7; x += 2) {
+ c_mem[2*x+u] = colors_bottom[x * 4 / (width * 5 / 7)].u;
+ c_mem[2*x+v] = colors_bottom[x * 4 / (width * 5 / 7)].v;
+ }
+ for (; x < width * 6 / 7; x += 2) {
+ c_mem[2*x+u] = colors_bottom[(x - width * 5 / 7) *
+ 3 / (width / 7) + 4].u;
+ c_mem[2*x+v] = colors_bottom[(x - width * 5 / 7) *
+ 3 / (width / 7) + 4].v;
+ }
+ for (; x < width; x += 2) {
+ c_mem[2*x+u] = colors_bottom[7].u;
+ c_mem[2*x+v] = colors_bottom[7].v;
+ }
+ c_mem += stride * 2;
+ }
+}
+
+static void
+fill_smpte_rgb16(const struct rgb_info *rgb, unsigned char *mem,
+ unsigned int width, unsigned int height, unsigned int stride)
+{
+ const uint16_t colors_top[] = {
+ MAKE_RGBA(rgb, 192, 192, 192, 255), /* grey */
+ MAKE_RGBA(rgb, 192, 192, 0, 255), /* yellow */
+ MAKE_RGBA(rgb, 0, 192, 192, 255), /* cyan */
+ MAKE_RGBA(rgb, 0, 192, 0, 255), /* green */
+ MAKE_RGBA(rgb, 192, 0, 192, 255), /* magenta */
+ MAKE_RGBA(rgb, 192, 0, 0, 255), /* red */
+ MAKE_RGBA(rgb, 0, 0, 192, 255), /* blue */
+ };
+ const uint16_t colors_middle[] = {
+ MAKE_RGBA(rgb, 0, 0, 192, 255), /* blue */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* black */
+ MAKE_RGBA(rgb, 192, 0, 192, 255), /* magenta */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* black */
+ MAKE_RGBA(rgb, 0, 192, 192, 255), /* cyan */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* black */
+ MAKE_RGBA(rgb, 192, 192, 192, 255), /* grey */
+ };
+ const uint16_t colors_bottom[] = {
+ MAKE_RGBA(rgb, 0, 33, 76, 255), /* in-phase */
+ MAKE_RGBA(rgb, 255, 255, 255, 255), /* super white */
+ MAKE_RGBA(rgb, 50, 0, 106, 255), /* quadrature */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* black */
+ MAKE_RGBA(rgb, 9, 9, 9, 255), /* 3.5% */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* 7.5% */
+ MAKE_RGBA(rgb, 29, 29, 29, 255), /* 11.5% */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* black */
+ };
+ unsigned int x;
+ unsigned int y;
+
+ for (y = 0; y < height * 6 / 9; ++y) {
+ for (x = 0; x < width; ++x)
+ ((uint16_t *)mem)[x] = colors_top[x * 7 / width];
+ mem += stride;
+ }
+
+ for (; y < height * 7 / 9; ++y) {
+ for (x = 0; x < width; ++x)
+ ((uint16_t *)mem)[x] = colors_middle[x * 7 / width];
+ mem += stride;
+ }
+
+ for (; y < height; ++y) {
+ for (x = 0; x < width * 5 / 7; ++x)
+ ((uint16_t *)mem)[x] =
+ colors_bottom[x * 4 / (width * 5 / 7)];
+ for (; x < width * 6 / 7; ++x)
+ ((uint16_t *)mem)[x] =
+ colors_bottom[(x - width * 5 / 7) * 3
+ / (width / 7) + 4];
+ for (; x < width; ++x)
+ ((uint16_t *)mem)[x] = colors_bottom[7];
+ mem += stride;
+ }
+}
+
+static void
+fill_smpte_rgb24(const struct rgb_info *rgb, void *mem,
+ unsigned int width, unsigned int height, unsigned int stride)
+{
+ const struct color_rgb24 colors_top[] = {
+ MAKE_RGB24(rgb, 192, 192, 192), /* grey */
+ MAKE_RGB24(rgb, 192, 192, 0), /* yellow */
+ MAKE_RGB24(rgb, 0, 192, 192), /* cyan */
+ MAKE_RGB24(rgb, 0, 192, 0), /* green */
+ MAKE_RGB24(rgb, 192, 0, 192), /* magenta */
+ MAKE_RGB24(rgb, 192, 0, 0), /* red */
+ MAKE_RGB24(rgb, 0, 0, 192), /* blue */
+ };
+ const struct color_rgb24 colors_middle[] = {
+ MAKE_RGB24(rgb, 0, 0, 192), /* blue */
+ MAKE_RGB24(rgb, 19, 19, 19), /* black */
+ MAKE_RGB24(rgb, 192, 0, 192), /* magenta */
+ MAKE_RGB24(rgb, 19, 19, 19), /* black */
+ MAKE_RGB24(rgb, 0, 192, 192), /* cyan */
+ MAKE_RGB24(rgb, 19, 19, 19), /* black */
+ MAKE_RGB24(rgb, 192, 192, 192), /* grey */
+ };
+ const struct color_rgb24 colors_bottom[] = {
+ MAKE_RGB24(rgb, 0, 33, 76), /* in-phase */
+ MAKE_RGB24(rgb, 255, 255, 255), /* super white */
+ MAKE_RGB24(rgb, 50, 0, 106), /* quadrature */
+ MAKE_RGB24(rgb, 19, 19, 19), /* black */
+ MAKE_RGB24(rgb, 9, 9, 9), /* 3.5% */
+ MAKE_RGB24(rgb, 19, 19, 19), /* 7.5% */
+ MAKE_RGB24(rgb, 29, 29, 29), /* 11.5% */
+ MAKE_RGB24(rgb, 19, 19, 19), /* black */
+ };
+ unsigned int x;
+ unsigned int y;
+
+ for (y = 0; y < height * 6 / 9; ++y) {
+ for (x = 0; x < width; ++x)
+ ((struct color_rgb24 *)mem)[x] =
+ colors_top[x * 7 / width];
+ mem += stride;
+ }
+
+ for (; y < height * 7 / 9; ++y) {
+ for (x = 0; x < width; ++x)
+ ((struct color_rgb24 *)mem)[x] =
+ colors_middle[x * 7 / width];
+ mem += stride;
+ }
+
+ for (; y < height; ++y) {
+ for (x = 0; x < width * 5 / 7; ++x)
+ ((struct color_rgb24 *)mem)[x] =
+ colors_bottom[x * 4 / (width * 5 / 7)];
+ for (; x < width * 6 / 7; ++x)
+ ((struct color_rgb24 *)mem)[x] =
+ colors_bottom[(x - width * 5 / 7) * 3
+ / (width / 7) + 4];
+ for (; x < width; ++x)
+ ((struct color_rgb24 *)mem)[x] = colors_bottom[7];
+ mem += stride;
+ }
+}
+
+static void
+fill_smpte_rgb32(const struct rgb_info *rgb, unsigned char *mem,
+ unsigned int width, unsigned int height, unsigned int stride)
+{
+ const uint32_t colors_top[] = {
+ MAKE_RGBA(rgb, 192, 192, 192, 255), /* grey */
+ MAKE_RGBA(rgb, 192, 192, 0, 255), /* yellow */
+ MAKE_RGBA(rgb, 0, 192, 192, 255), /* cyan */
+ MAKE_RGBA(rgb, 0, 192, 0, 255), /* green */
+ MAKE_RGBA(rgb, 192, 0, 192, 255), /* magenta */
+ MAKE_RGBA(rgb, 192, 0, 0, 255), /* red */
+ MAKE_RGBA(rgb, 0, 0, 192, 255), /* blue */
+ };
+ const uint32_t colors_middle[] = {
+ MAKE_RGBA(rgb, 0, 0, 192, 255), /* blue */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* black */
+ MAKE_RGBA(rgb, 192, 0, 192, 255), /* magenta */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* black */
+ MAKE_RGBA(rgb, 0, 192, 192, 255), /* cyan */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* black */
+ MAKE_RGBA(rgb, 192, 192, 192, 255), /* grey */
+ };
+ const uint32_t colors_bottom[] = {
+ MAKE_RGBA(rgb, 0, 33, 76, 255), /* in-phase */
+ MAKE_RGBA(rgb, 255, 255, 255, 255), /* super white */
+ MAKE_RGBA(rgb, 50, 0, 106, 255), /* quadrature */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* black */
+ MAKE_RGBA(rgb, 9, 9, 9, 255), /* 3.5% */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* 7.5% */
+ MAKE_RGBA(rgb, 29, 29, 29, 255), /* 11.5% */
+ MAKE_RGBA(rgb, 19, 19, 19, 255), /* black */
+ };
+ unsigned int x;
+ unsigned int y;
+
+ for (y = 0; y < height * 6 / 9; ++y) {
+ for (x = 0; x < width; ++x)
+ ((uint32_t *)mem)[x] = colors_top[x * 7 / width];
+ mem += stride;
+ }
+
+ for (; y < height * 7 / 9; ++y) {
+ for (x = 0; x < width; ++x)
+ ((uint32_t *)mem)[x] = colors_middle[x * 7 / width];
+ mem += stride;
+ }
+
+ for (; y < height; ++y) {
+ for (x = 0; x < width * 5 / 7; ++x)
+ ((uint32_t *)mem)[x] =
+ colors_bottom[x * 4 / (width * 5 / 7)];
+ for (; x < width * 6 / 7; ++x)
+ ((uint32_t *)mem)[x] =
+ colors_bottom[(x - width * 5 / 7) * 3
+ / (width / 7) + 4];
+ for (; x < width; ++x)
+ ((uint32_t *)mem)[x] = colors_bottom[7];
+ mem += stride;
+ }
+}
+
+static void
+fill_smpte(const struct format_info *info, void *planes[3], unsigned int width,
+ unsigned int height, unsigned int stride)
+{
+ unsigned char *u, *v;
+
+ switch (info->format) {
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ return fill_smpte_yuv_packed(&info->yuv, planes[0], width,
+ height, stride);
+
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ u = info->yuv.order & YUV_YCbCr ? planes[1] : planes[1] + 1;
+ v = info->yuv.order & YUV_YCrCb ? planes[1] : planes[1] + 1;
+ return fill_smpte_yuv_planar(&info->yuv, planes[0], u, v,
+ width, height, stride);
+
+ case DRM_FORMAT_YVU420:
+ return fill_smpte_yuv_planar(&info->yuv, planes[0], planes[1],
+ planes[2], width, height, stride);
+
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_XRGB1555:
+ return fill_smpte_rgb16(&info->rgb, planes[0],
+ width, height, stride);
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_RGB888:
+ return fill_smpte_rgb24(&info->rgb, planes[0],
+ width, height, stride);
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ return fill_smpte_rgb32(&info->rgb, planes[0],
+ width, height, stride);
+ }
+}
+
+/* swap these for big endian.. */
+#define RED 2
+#define GREEN 1
+#define BLUE 0
+
+static void
+make_pwetty(void *data, int width, int height, int stride)
+{
+#ifdef HAVE_CAIRO
+ cairo_surface_t *surface;
+ cairo_t *cr;
+ int x, y;
+
+ surface = cairo_image_surface_create_for_data(data,
+ CAIRO_FORMAT_ARGB32,
+ width, height,
+ stride);
+ cr = cairo_create(surface);
+ cairo_surface_destroy(surface);
+
+ cairo_set_line_cap(cr, CAIRO_LINE_CAP_SQUARE);
+ for (x = 0; x < width; x += 250)
+ for (y = 0; y < height; y += 250) {
+ char buf[64];
+
+ cairo_move_to(cr, x, y - 20);
+ cairo_line_to(cr, x, y + 20);
+ cairo_move_to(cr, x - 20, y);
+ cairo_line_to(cr, x + 20, y);
+ cairo_new_sub_path(cr);
+ cairo_arc(cr, x, y, 10, 0, M_PI * 2);
+ cairo_set_line_width(cr, 4);
+ cairo_set_source_rgb(cr, 0, 0, 0);
+ cairo_stroke_preserve(cr);
+ cairo_set_source_rgb(cr, 1, 1, 1);
+ cairo_set_line_width(cr, 2);
+ cairo_stroke(cr);
+
+ snprintf(buf, sizeof buf, "%d, %d", x, y);
+ cairo_move_to(cr, x + 20, y + 20);
+ cairo_text_path(cr, buf);
+ cairo_set_source_rgb(cr, 0, 0, 0);
+ cairo_stroke_preserve(cr);
+ cairo_set_source_rgb(cr, 1, 1, 1);
+ cairo_fill(cr);
+ }
+
+ cairo_destroy(cr);
+#endif
+}
+
+static void
+fill_tiles_yuv_planar(const struct yuv_info *yuv,
+ unsigned char *y_mem, unsigned char *u_mem,
+ unsigned char *v_mem, unsigned int width,
+ unsigned int height, unsigned int stride)
+{
+ unsigned int cs = yuv->chroma_stride;
+ unsigned int xsub = yuv->xsub;
+ unsigned int ysub = yuv->ysub;
+ unsigned int x;
+ unsigned int y;
+
+ for (y = 0; y < height; ++y) {
+ for (x = 0; x < width; ++x) {
+ div_t d = div(x+y, width);
+ uint32_t rgb32 = 0x00130502 * (d.quot >> 6)
+ + 0x000a1120 * (d.rem >> 6);
+ struct color_yuv color =
+ MAKE_YUV_601((rgb32 >> 16) & 0xff,
+ (rgb32 >> 8) & 0xff, rgb32 & 0xff);
+
+ y_mem[x] = color.y;
+ u_mem[x/xsub*cs] = color.u;
+ v_mem[x/xsub*cs] = color.v;
+ }
+
+ y_mem += stride;
+ if ((y + 1) % ysub == 0) {
+ u_mem += stride * cs / xsub;
+ v_mem += stride * cs / xsub;
+ }
+ }
+}
+
+static void
+fill_tiles_yuv_packed(const struct yuv_info *yuv, unsigned char *mem,
+ unsigned int width, unsigned int height,
+ unsigned int stride)
+{
+ unsigned char *y_mem = (yuv->order & YUV_YC) ? mem : mem + 1;
+ unsigned char *c_mem = (yuv->order & YUV_CY) ? mem : mem + 1;
+ unsigned int u = (yuv->order & YUV_YCrCb) ? 2 : 0;
+ unsigned int v = (yuv->order & YUV_YCbCr) ? 2 : 0;
+ unsigned int x;
+ unsigned int y;
+
+ for (y = 0; y < height; ++y) {
+ for (x = 0; x < width; x += 2) {
+ div_t d = div(x+y, width);
+ uint32_t rgb32 = 0x00130502 * (d.quot >> 6)
+ + 0x000a1120 * (d.rem >> 6);
+ struct color_yuv color =
+ MAKE_YUV_601((rgb32 >> 16) & 0xff,
+ (rgb32 >> 8) & 0xff, rgb32 & 0xff);
+
+ y_mem[2*x] = color.y;
+ c_mem[2*x+u] = color.u;
+ y_mem[2*x+2] = color.y;
+ c_mem[2*x+v] = color.v;
+ }
+
+ y_mem += stride;
+ c_mem += stride;
+ }
+}
+
+static void
+fill_tiles_rgb16(const struct rgb_info *rgb, unsigned char *mem,
+ unsigned int width, unsigned int height, unsigned int stride)
+{
+ unsigned int x, y;
+
+ for (y = 0; y < height; ++y) {
+ for (x = 0; x < width; ++x) {
+ div_t d = div(x+y, width);
+ uint32_t rgb32 = 0x00130502 * (d.quot >> 6)
+ + 0x000a1120 * (d.rem >> 6);
+ uint16_t color =
+ MAKE_RGBA(rgb, (rgb32 >> 16) & 0xff,
+ (rgb32 >> 8) & 0xff, rgb32 & 0xff,
+ 255);
+
+ ((uint16_t *)mem)[x] = color;
+ }
+ mem += stride;
+ }
+}
+
+static void
+fill_tiles_rgb24(const struct rgb_info *rgb, unsigned char *mem,
+ unsigned int width, unsigned int height, unsigned int stride)
+{
+ unsigned int x, y;
+
+ for (y = 0; y < height; ++y) {
+ for (x = 0; x < width; ++x) {
+ div_t d = div(x+y, width);
+ uint32_t rgb32 = 0x00130502 * (d.quot >> 6)
+ + 0x000a1120 * (d.rem >> 6);
+ struct color_rgb24 color =
+ MAKE_RGB24(rgb, (rgb32 >> 16) & 0xff,
+ (rgb32 >> 8) & 0xff, rgb32 & 0xff);
+
+ ((struct color_rgb24 *)mem)[x] = color;
+ }
+ mem += stride;
+ }
+}
+
+static void
+fill_tiles_rgb32(const struct rgb_info *rgb, unsigned char *mem,
+ unsigned int width, unsigned int height, unsigned int stride)
+{
+ unsigned char *mem_base = mem;
+ unsigned int x, y;
+
+ for (y = 0; y < height; ++y) {
+ for (x = 0; x < width; ++x) {
+ div_t d = div(x+y, width);
+ uint32_t rgb32 = 0x00130502 * (d.quot >> 6)
+ + 0x000a1120 * (d.rem >> 6);
+ uint32_t color =
+ MAKE_RGBA(rgb, (rgb32 >> 16) & 0xff,
+ (rgb32 >> 8) & 0xff, rgb32 & 0xff,
+ 255);
+
+ ((uint32_t *)mem)[x] = color;
+ }
+ mem += stride;
+ }
+
+ make_pwetty(mem_base, width, height, stride);
+}
+
+static void
+fill_tiles(const struct format_info *info, void *planes[3], unsigned int width,
+ unsigned int height, unsigned int stride)
+{
+ unsigned char *u, *v;
+
+ switch (info->format) {
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ return fill_tiles_yuv_packed(&info->yuv, planes[0],
+ width, height, stride);
+
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ u = info->yuv.order & YUV_YCbCr ? planes[1] : planes[1] + 1;
+ v = info->yuv.order & YUV_YCrCb ? planes[1] : planes[1] + 1;
+ return fill_tiles_yuv_planar(&info->yuv, planes[0], u, v,
+ width, height, stride);
+
+ case DRM_FORMAT_YVU420:
+ return fill_tiles_yuv_planar(&info->yuv, planes[0], planes[1],
+ planes[2], width, height, stride);
+
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_XRGB1555:
+ return fill_tiles_rgb16(&info->rgb, planes[0],
+ width, height, stride);
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_RGB888:
+ return fill_tiles_rgb24(&info->rgb, planes[0],
+ width, height, stride);
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ return fill_tiles_rgb32(&info->rgb, planes[0],
+ width, height, stride);
+ }
+}
+
+static void
+fill_plain(const struct format_info *info, void *planes[3], unsigned int width,
+ unsigned int height, unsigned int stride)
+{
+ memset(planes[0], 0x77, stride * height);
+}
+
+/*
+ * fill_pattern - Fill a buffer with a test pattern
+ * @format: Pixel format
+ * @pattern: Test pattern
+ * @buffer: Buffer memory
+ * @width: Width in pixels
+ * @height: Height in pixels
+ * @stride: Line stride (pitch) in bytes
+ *
+ * Fill the buffer with the test pattern specified by the pattern parameter.
+ * Supported formats vary depending on the selected pattern.
+ */
+static void
+fill_pattern(unsigned int format, enum fill_pattern pattern, void *planes[3],
+ unsigned int width, unsigned int height, unsigned int stride)
+{
+ const struct format_info *info = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(format_info); ++i) {
+ if (format_info[i].format == format) {
+ info = &format_info[i];
+ break;
+ }
+ }
+
+ if (info == NULL)
+ return;
+
+ switch (pattern) {
+ case PATTERN_TILES:
+ return fill_tiles(info, planes, width, height, stride);
+
+ case PATTERN_SMPTE:
+ return fill_smpte(info, planes, width, height, stride);
+
+ case PATTERN_PLAIN:
+ return fill_plain(info, planes, width, height, stride);
+
+ default:
+ printf("Error: unsupported test pattern %u.\n", pattern);
+ break;
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * Buffers management
+ */
+
+static struct kms_bo *
+allocate_buffer(struct kms_driver *kms,
+ int width, int height, int *stride)
+{
+ struct kms_bo *bo;
+ unsigned bo_attribs[] = {
+ KMS_WIDTH, 0,
+ KMS_HEIGHT, 0,
+ KMS_BO_TYPE, KMS_BO_TYPE_SCANOUT_X8R8G8B8,
+ KMS_TERMINATE_PROP_LIST
+ };
+ int ret;
+
+ bo_attribs[1] = width;
+ bo_attribs[3] = height;
+
+ ret = kms_bo_create(kms, bo_attribs, &bo);
+ if (ret) {
+ fprintf(stderr, "failed to alloc buffer: %s\n",
+ strerror(-ret));
+ return NULL;
+ }
+
+ ret = kms_bo_get_prop(bo, KMS_PITCH, stride);
+ if (ret) {
+ fprintf(stderr, "failed to retreive buffer stride: %s\n",
+ strerror(-ret));
+ kms_bo_destroy(&bo);
+ return NULL;
+ }
+
+ return bo;
+}
+
+struct kms_bo *
+create_test_buffer(struct kms_driver *kms, unsigned int format,
+ int width, int height, int handles[4],
+ int pitches[4], int offsets[4], enum fill_pattern pattern)
+{
+ struct kms_bo *bo;
+ int ret, stride;
+ void *planes[3];
+ void *virtual;
+
+ bo = allocate_buffer(kms, width, height, &pitches[0]);
+ if (!bo)
+ return NULL;
+
+ ret = kms_bo_map(bo, &virtual);
+ if (ret) {
+ fprintf(stderr, "failed to map buffer: %s\n",
+ strerror(-ret));
+ kms_bo_destroy(&bo);
+ return NULL;
+ }
+
+ /* just testing a limited # of formats to test single
+ * and multi-planar path.. would be nice to add more..
+ */
+ switch (format) {
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ pitches[0] = width * 2;
+ offsets[0] = 0;
+ kms_bo_get_prop(bo, KMS_HANDLE, &handles[0]);
+
+ planes[0] = virtual;
+ break;
+
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ pitches[0] = width;
+ offsets[0] = 0;
+ kms_bo_get_prop(bo, KMS_HANDLE, &handles[0]);
+ pitches[1] = width;
+ offsets[1] = width * height;
+ kms_bo_get_prop(bo, KMS_HANDLE, &handles[1]);
+
+ planes[0] = virtual;
+ planes[1] = virtual + offsets[1];
+ break;
+
+ case DRM_FORMAT_YVU420:
+ pitches[0] = width;
+ offsets[0] = 0;
+ kms_bo_get_prop(bo, KMS_HANDLE, &handles[0]);
+ pitches[1] = width / 2;
+ offsets[1] = width * height;
+ kms_bo_get_prop(bo, KMS_HANDLE, &handles[1]);
+ pitches[2] = width / 2;
+ offsets[2] = offsets[1] + (width * height) / 4;
+ kms_bo_get_prop(bo, KMS_HANDLE, &handles[2]);
+
+ planes[0] = virtual;
+ planes[1] = virtual + offsets[1];
+ planes[2] = virtual + offsets[2];
+ break;
+
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_XRGB1555:
+ pitches[0] = width * 2;
+ offsets[0] = 0;
+ kms_bo_get_prop(bo, KMS_HANDLE, &handles[0]);
+
+ planes[0] = virtual;
+ break;
+
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_RGB888:
+ pitches[0] = width * 3;
+ offsets[0] = 0;
+ kms_bo_get_prop(bo, KMS_HANDLE, &handles[0]);
+
+ planes[0] = virtual;
+ break;
+
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_BGRX8888:
+ pitches[0] = width * 4;
+ offsets[0] = 0;
+ kms_bo_get_prop(bo, KMS_HANDLE, &handles[0]);
+
+ planes[0] = virtual;
+ break;
+ }
+
+ fill_pattern(format, pattern, planes, width, height, pitches[0]);
+ kms_bo_unmap(bo);
+
+ return bo;
+}
--- /dev/null
+/*
+ * DRM based mode setting test program
+ * Copyright 2008 Tungsten Graphics
+ * Jakob Bornecrantz <jakob@tungstengraphics.com>
+ * Copyright 2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __BUFFERS_H__
+#define __BUFFERS_H__
+
+struct kms_bo;
+struct kms_driver;
+
+enum fill_pattern {
+ PATTERN_TILES = 0,
+ PATTERN_PLAIN = 1,
+ PATTERN_SMPTE = 2,
+};
+
+struct kms_bo *create_test_buffer(struct kms_driver *kms, unsigned int format,
+ int width, int height, int handles[4], int pitches[4],
+ int offsets[4], enum fill_pattern pattern);
+
+unsigned int format_fourcc(const char *name);
+
+#endif
#include "drm_fourcc.h"
#include "libkms.h"
-#ifdef HAVE_CAIRO
-#include <math.h>
-#include <cairo.h>
-#endif
+#include "buffers.h"
drmModeRes *resources;
int fd, modes;
sep = ", "; \
} \
} \
+ return NULL; \
}
static const char *mode_type_names[] = {
return;
}
+/* -----------------------------------------------------------------------------
+ * Connectors and planes
+ */
+
/*
* Mode setting with the kernel interfaces is a bit of a chore.
* First you have to find the connector in question and make sure the
struct connector {
uint32_t id;
char mode_str[64];
+ char format_str[5];
+ unsigned int fourcc;
drmModeModeInfo *mode;
drmModeEncoder *encoder;
int crtc;
uint32_t w, h;
unsigned int fb_id;
char format_str[5]; /* need to leave room for terminating \0 */
+ unsigned int fourcc;
};
static void
}
-static struct kms_bo *
-allocate_buffer(struct kms_driver *kms,
- int width, int height, int *stride)
-{
- struct kms_bo *bo;
- unsigned bo_attribs[] = {
- KMS_WIDTH, 0,
- KMS_HEIGHT, 0,
- KMS_BO_TYPE, KMS_BO_TYPE_SCANOUT_X8R8G8B8,
- KMS_TERMINATE_PROP_LIST
- };
- int ret;
-
- bo_attribs[1] = width;
- bo_attribs[3] = height;
-
- ret = kms_bo_create(kms, bo_attribs, &bo);
- if (ret) {
- fprintf(stderr, "failed to alloc buffer: %s\n",
- strerror(-ret));
- return NULL;
- }
-
- ret = kms_bo_get_prop(bo, KMS_PITCH, stride);
- if (ret) {
- fprintf(stderr, "failed to retreive buffer stride: %s\n",
- strerror(-ret));
- kms_bo_destroy(&bo);
- return NULL;
- }
-
- return bo;
-}
-
-static void
-make_pwetty(void *data, int width, int height, int stride)
-{
-#ifdef HAVE_CAIRO
- cairo_surface_t *surface;
- cairo_t *cr;
- int x, y;
-
- surface = cairo_image_surface_create_for_data(data,
- CAIRO_FORMAT_ARGB32,
- width, height,
- stride);
- cr = cairo_create(surface);
- cairo_surface_destroy(surface);
-
- cairo_set_line_cap(cr, CAIRO_LINE_CAP_SQUARE);
- for (x = 0; x < width; x += 250)
- for (y = 0; y < height; y += 250) {
- char buf[64];
-
- cairo_move_to(cr, x, y - 20);
- cairo_line_to(cr, x, y + 20);
- cairo_move_to(cr, x - 20, y);
- cairo_line_to(cr, x + 20, y);
- cairo_new_sub_path(cr);
- cairo_arc(cr, x, y, 10, 0, M_PI * 2);
- cairo_set_line_width(cr, 4);
- cairo_set_source_rgb(cr, 0, 0, 0);
- cairo_stroke_preserve(cr);
- cairo_set_source_rgb(cr, 1, 1, 1);
- cairo_set_line_width(cr, 2);
- cairo_stroke(cr);
-
- snprintf(buf, sizeof buf, "%d, %d", x, y);
- cairo_move_to(cr, x + 20, y + 20);
- cairo_text_path(cr, buf);
- cairo_set_source_rgb(cr, 0, 0, 0);
- cairo_stroke_preserve(cr);
- cairo_set_source_rgb(cr, 1, 1, 1);
- cairo_fill(cr);
- }
-
- cairo_destroy(cr);
-#endif
-}
-
-static int
-create_test_buffer(struct kms_driver *kms,
- int width, int height, int *stride_out,
- struct kms_bo **bo_out)
-{
- struct kms_bo *bo;
- int ret, i, j, stride;
- void *virtual;
-
- bo = allocate_buffer(kms, width, height, &stride);
- if (!bo)
- return -1;
-
- ret = kms_bo_map(bo, &virtual);
- if (ret) {
- fprintf(stderr, "failed to map buffer: %s\n",
- strerror(-ret));
- kms_bo_destroy(&bo);
- return -1;
- }
-
- /* paint the buffer with colored tiles */
- for (j = 0; j < height; j++) {
- uint32_t *fb_ptr = (uint32_t*)((char*)virtual + j * stride);
- for (i = 0; i < width; i++) {
- div_t d = div(i, width);
- fb_ptr[i] =
- 0x00130502 * (d.quot >> 6) +
- 0x000a1120 * (d.rem >> 6);
- }
- }
-
- make_pwetty(virtual, width, height, stride);
-
- kms_bo_unmap(bo);
-
- *bo_out = bo;
- *stride_out = stride;
- return 0;
-}
-
-static int
-create_grey_buffer(struct kms_driver *kms,
- int width, int height, int *stride_out,
- struct kms_bo **bo_out)
-{
- struct kms_bo *bo;
- int size, ret, stride;
- void *virtual;
-
- bo = allocate_buffer(kms, width, height, &stride);
- if (!bo)
- return -1;
-
- ret = kms_bo_map(bo, &virtual);
- if (ret) {
- fprintf(stderr, "failed to map buffer: %s\n",
- strerror(-ret));
- kms_bo_destroy(&bo);
- return -1;
- }
-
- size = stride * height;
- memset(virtual, 0x77, size);
- kms_bo_unmap(bo);
-
- *bo_out = bo;
- *stride_out = stride;
-
- return 0;
-}
+/* -------------------------------------------------------------------------- */
void
page_flip_handler(int fd, unsigned int frame,
new_fb_id = c->fb_id[1];
else
new_fb_id = c->fb_id[0];
-
+
drmModePageFlip(fd, c->crtc, new_fb_id,
DRM_MODE_PAGE_FLIP_EVENT, c);
c->current_fb_id = new_fb_id;
}
}
-/* swap these for big endian.. */
-#define RED 2
-#define GREEN 1
-#define BLUE 0
-
-static void
-fill420(unsigned char *y, unsigned char *u, unsigned char *v,
- int cs /*chroma pixel stride */,
- int n, int width, int height, int stride)
-{
- int i, j;
-
- /* paint the buffer with colored tiles, in blocks of 2x2 */
- for (j = 0; j < height; j+=2) {
- unsigned char *y1p = y + j * stride;
- unsigned char *y2p = y1p + stride;
- unsigned char *up = u + (j/2) * stride * cs / 2;
- unsigned char *vp = v + (j/2) * stride * cs / 2;
-
- for (i = 0; i < width; i+=2) {
- div_t d = div(n+i+j, width);
- uint32_t rgb = 0x00130502 * (d.quot >> 6) + 0x000a1120 * (d.rem >> 6);
- unsigned char *rgbp = (unsigned char *)&rgb;
- unsigned char y = (0.299 * rgbp[RED]) + (0.587 * rgbp[GREEN]) + (0.114 * rgbp[BLUE]);
-
- *(y2p++) = *(y1p++) = y;
- *(y2p++) = *(y1p++) = y;
-
- *up = (rgbp[BLUE] - y) * 0.565 + 128;
- *vp = (rgbp[RED] - y) * 0.713 + 128;
- up += cs;
- vp += cs;
- }
- }
-}
-
-static void
-fill422(unsigned char *virtual, int n, int width, int height, int stride)
-{
- int i, j;
- /* paint the buffer with colored tiles */
- for (j = 0; j < height; j++) {
- uint8_t *ptr = (uint8_t*)((char*)virtual + j * stride);
- for (i = 0; i < width; i++) {
- div_t d = div(n+i+j, width);
- uint32_t rgb = 0x00130502 * (d.quot >> 6) + 0x000a1120 * (d.rem >> 6);
- unsigned char *rgbp = (unsigned char *)&rgb;
- unsigned char y = (0.299 * rgbp[RED]) + (0.587 * rgbp[GREEN]) + (0.114 * rgbp[BLUE]);
-
- *(ptr++) = y;
- *(ptr++) = (rgbp[BLUE] - y) * 0.565 + 128;
- *(ptr++) = y;
- *(ptr++) = (rgbp[RED] - y) * 0.713 + 128;
- }
- }
-}
-
-static void
-fill1555(unsigned char *virtual, int n, int width, int height, int stride)
-{
- int i, j;
- /* paint the buffer with colored tiles */
- for (j = 0; j < height; j++) {
- uint16_t *ptr = (uint16_t*)((char*)virtual + j * stride);
- for (i = 0; i < width; i++) {
- div_t d = div(n+i+j, width);
- uint32_t rgb = 0x00130502 * (d.quot >> 6) + 0x000a1120 * (d.rem >> 6);
- unsigned char *rgbp = (unsigned char *)&rgb;
-
- *(ptr++) = 0x8000 |
- (rgbp[RED] >> 3) << 10 |
- (rgbp[GREEN] >> 3) << 5 |
- (rgbp[BLUE] >> 3);
- }
- }
-}
-
static int
set_plane(struct kms_driver *kms, struct connector *c, struct plane *p)
{
uint32_t handles[4], pitches[4], offsets[4] = {0}; /* we only use [0] */
uint32_t plane_id = 0;
struct kms_bo *plane_bo;
- uint32_t plane_flags = 0, format;
+ uint32_t plane_flags = 0;
int ret, crtc_x, crtc_y, crtc_w, crtc_h;
unsigned int i;
return -1;
}
- if (!strcmp(p->format_str, "XR24")) {
- if (create_test_buffer(kms, p->w, p->h, &pitches[0], &plane_bo))
- return -1;
- kms_bo_get_prop(plane_bo, KMS_HANDLE, &handles[0]);
- format = DRM_FORMAT_XRGB8888;
- } else {
- void *virtual;
-
- /* TODO: this always allocates a buffer for 32bpp RGB.. but for
- * YUV formats, we don't use all of it.. since 4bytes/pixel is
- * worst case, so live with it for now and just don't use all
- * the buffer:
- */
- plane_bo = allocate_buffer(kms, p->w, p->h, &pitches[0]);
- if (!plane_bo)
- return -1;
-
- ret = kms_bo_map(plane_bo, &virtual);
- if (ret) {
- fprintf(stderr, "failed to map buffer: %s\n",
- strerror(-ret));
- kms_bo_destroy(&plane_bo);
- return -1;
- }
-
- /* just testing a limited # of formats to test single
- * and multi-planar path.. would be nice to add more..
- */
- if (!strcmp(p->format_str, "YUYV")) {
- pitches[0] = p->w * 2;
- offsets[0] = 0;
- kms_bo_get_prop(plane_bo, KMS_HANDLE, &handles[0]);
-
- fill422(virtual, 0, p->w, p->h, pitches[0]);
-
- format = DRM_FORMAT_YUYV;
- } else if (!strcmp(p->format_str, "NV12")) {
- pitches[0] = p->w;
- offsets[0] = 0;
- kms_bo_get_prop(plane_bo, KMS_HANDLE, &handles[0]);
- pitches[1] = p->w;
- offsets[1] = p->w * p->h;
- kms_bo_get_prop(plane_bo, KMS_HANDLE, &handles[1]);
-
- fill420(virtual, virtual+offsets[1], virtual+offsets[1]+1,
- 2, 0, p->w, p->h, pitches[0]);
-
- format = DRM_FORMAT_NV12;
- } else if (!strcmp(p->format_str, "YV12")) {
- pitches[0] = p->w;
- offsets[0] = 0;
- kms_bo_get_prop(plane_bo, KMS_HANDLE, &handles[0]);
- pitches[1] = p->w / 2;
- offsets[1] = p->w * p->h;
- kms_bo_get_prop(plane_bo, KMS_HANDLE, &handles[1]);
- pitches[2] = p->w / 2;
- offsets[2] = offsets[1] + (p->w * p->h) / 4;
- kms_bo_get_prop(plane_bo, KMS_HANDLE, &handles[2]);
-
- fill420(virtual, virtual+offsets[1], virtual+offsets[2],
- 1, 0, p->w, p->h, pitches[0]);
-
- format = DRM_FORMAT_YVU420;
- } else if (!strcmp(p->format_str, "XR15")) {
- pitches[0] = p->w * 2;
- offsets[0] = 0;
- kms_bo_get_prop(plane_bo, KMS_HANDLE, &handles[0]);
-
- fill1555(virtual, 0, p->w, p->h, pitches[0]);
-
- format = DRM_FORMAT_XRGB1555;
- } else if (!strcmp(p->format_str, "AR15")) {
- pitches[0] = p->w * 2;
- offsets[0] = 0;
- kms_bo_get_prop(plane_bo, KMS_HANDLE, &handles[0]);
-
- fill1555(virtual, 0, p->w, p->h, pitches[0]);
-
- format = DRM_FORMAT_ARGB1555;
- } else {
- fprintf(stderr, "Unknown format: %s\n", p->format_str);
- return -1;
- }
-
- kms_bo_unmap(plane_bo);
- }
+ plane_bo = create_test_buffer(kms, p->fourcc, p->w, p->h, handles,
+ pitches, offsets, PATTERN_TILES);
+ if (plane_bo == NULL)
+ return -1;
/* just use single plane format for now.. */
- if (drmModeAddFB2(fd, p->w, p->h, format,
+ if (drmModeAddFB2(fd, p->w, p->h, p->fourcc,
handles, pitches, offsets, &p->fb_id, plane_flags)) {
fprintf(stderr, "failed to add fb: %s\n", strerror(errno));
return -1;
struct kms_driver *kms;
struct kms_bo *bo, *other_bo;
unsigned int fb_id, other_fb_id;
- int i, j, ret, width, height, x, stride;
- unsigned handle;
+ int i, j, ret, width, height, x;
+ uint32_t handles[4], pitches[4], offsets[4] = {0}; /* we only use [0] */
drmEventContext evctx;
width = 0;
return;
}
- if (create_test_buffer(kms, width, height, &stride, &bo))
+ bo = create_test_buffer(kms, c->fourcc, width, height, handles,
+ pitches, offsets, PATTERN_SMPTE);
+ if (bo == NULL)
return;
- kms_bo_get_prop(bo, KMS_HANDLE, &handle);
- ret = drmModeAddFB(fd, width, height, 24, 32, stride, handle, &fb_id);
+ ret = drmModeAddFB2(fd, width, height, c->fourcc,
+ handles, pitches, offsets, &fb_id, 0);
if (ret) {
fprintf(stderr, "failed to add fb (%ux%u): %s\n",
width, height, strerror(errno));
if (c[i].mode == NULL)
continue;
- printf("setting mode %s on connector %d, crtc %d\n",
- c[i].mode_str, c[i].id, c[i].crtc);
+ printf("setting mode %s@%s on connector %d, crtc %d\n",
+ c[i].mode_str, c[i].format_str, c[i].id, c[i].crtc);
ret = drmModeSetCrtc(fd, c[i].crtc, fb_id, x, 0,
&c[i].id, 1, c[i].mode);
if (!page_flip)
return;
- if (create_grey_buffer(kms, width, height, &stride, &other_bo))
+ other_bo = create_test_buffer(kms, c->fourcc, width, height, handles,
+ pitches, offsets, PATTERN_PLAIN);
+ if (other_bo == NULL)
return;
- kms_bo_get_prop(other_bo, KMS_HANDLE, &handle);
- ret = drmModeAddFB(fd, width, height, 32, 32, stride, handle,
- &other_fb_id);
+ ret = drmModeAddFB2(fd, width, height, c->fourcc, handles, pitches, offsets,
+ &other_fb_id, 0);
if (ret) {
fprintf(stderr, "failed to add fb: %s\n", strerror(errno));
return;
extern int optind, opterr, optopt;
static char optstr[] = "ecpmfs:P:v";
+#define min(a, b) ((a) < (b) ? (a) : (b))
+
+static int parse_connector(struct connector *c, const char *arg)
+{
+ unsigned int len;
+ const char *p;
+ char *endp;
+
+ c->crtc = -1;
+ strcpy(c->format_str, "XR24");
+
+ c->id = strtoul(arg, &endp, 10);
+ if (*endp == '@') {
+ arg = endp + 1;
+ c->crtc = strtoul(arg, &endp, 10);
+ }
+ if (*endp != ':')
+ return -1;
+
+ arg = endp + 1;
+
+ p = strchrnul(arg, '@');
+ len = min(sizeof c->mode_str - 1, p - arg);
+ strncpy(c->mode_str, arg, len);
+ c->mode_str[len] = '\0';
+
+ if (*p == '@') {
+ strncpy(c->format_str, p + 1, 4);
+ c->format_str[4] = '\0';
+ }
+
+ c->fourcc = format_fourcc(c->format_str);
+ if (c->fourcc == 0) {
+ fprintf(stderr, "unknown format %s\n", c->format_str);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int parse_plane(struct plane *p, const char *arg)
+{
+ strcpy(p->format_str, "XR24");
+
+ if (sscanf(arg, "%d:%dx%d@%4s", &p->con_id, &p->w, &p->h, &p->format_str) != 4 &&
+ sscanf(arg, "%d:%dx%d", &p->con_id, &p->w, &p->h) != 3)
+ return -1;
+
+ p->fourcc = format_fourcc(p->format_str);
+ if (p->fourcc == 0) {
+ fprintf(stderr, "unknown format %s\n", p->format_str);
+ return -1;
+ }
+
+ return 0;
+}
+
void usage(char *name)
{
fprintf(stderr, "usage: %s [-ecpmf]\n", name);
fprintf(stderr, "\t-m\tlist modes\n");
fprintf(stderr, "\t-f\tlist framebuffers\n");
fprintf(stderr, "\t-v\ttest vsynced page flipping\n");
- fprintf(stderr, "\t-s <connector_id>:<mode>\tset a mode\n");
- fprintf(stderr, "\t-s <connector_id>@<crtc_id>:<mode>\tset a mode\n");
- fprintf(stderr, "\t-P <connector_id>:<w>x<h>\tset a plane\n");
- fprintf(stderr, "\t-P <connector_id>:<w>x<h>@<format>\tset a plane\n");
+ fprintf(stderr, "\t-s <connector_id>[@<crtc_id>]:<mode>[@<format>]\tset a mode\n");
+ fprintf(stderr, "\t-P <connector_id>:<w>x<h>[@<format>]\tset a plane\n");
fprintf(stderr, "\n\tDefault is to dump all info.\n");
exit(0);
}
test_vsync = 1;
break;
case 's':
- con_args[count].crtc = -1;
- if (sscanf(optarg, "%d:%64s",
- &con_args[count].id,
- con_args[count].mode_str) != 2 &&
- sscanf(optarg, "%d@%d:%64s",
- &con_args[count].id,
- &con_args[count].crtc,
- con_args[count].mode_str) != 3)
+ if (parse_connector(&con_args[count], optarg) < 0)
usage(argv[0]);
count++;
break;
case 'P':
- strcpy(plane_args[plane_count].format_str, "XR24");
- if (sscanf(optarg, "%d:%dx%d@%4s",
- &plane_args[plane_count].con_id,
- &plane_args[plane_count].w,
- &plane_args[plane_count].h,
- plane_args[plane_count].format_str) != 4 &&
- sscanf(optarg, "%d:%dx%d",
- &plane_args[plane_count].con_id,
- &plane_args[plane_count].w,
- &plane_args[plane_count].h) != 3)
+ if (parse_plane(&plane_args[plane_count], optarg) < 0)
usage(argv[0]);
plane_count++;
break;
return strdup(name);
}
+
+int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd)
+{
+ struct drm_prime_handle args;
+ int ret;
+
+ args.handle = handle;
+ args.flags = flags;
+ ret = drmIoctl(fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args);
+ if (ret)
+ return ret;
+
+ *prime_fd = args.fd;
+ return 0;
+}
+
+int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle)
+{
+ struct drm_prime_handle args;
+ int ret;
+
+ args.fd = prime_fd;
+ args.flags = 0;
+ ret = drmIoctl(fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args);
+ if (ret)
+ return ret;
+
+ *handle = args.handle;
+ return 0;
+}
+
extern char *drmGetDeviceNameFromFd(int fd);
+extern int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd);
+extern int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle);
+
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif