* Dave Airlie <airlied@linux.ie>
*/
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
#include <xf86drm.h>
#include <xf86atomic.h>
#include <fcntl.h>
#include "i915_drm.h"
#include "uthash.h"
-#ifdef HAVE_VALGRIND
+#if HAVE_VALGRIND
#include <valgrind.h>
#include <memcheck.h>
#define VG(x) x
unsigned int bo_reuse : 1;
unsigned int no_exec : 1;
unsigned int has_vebox : 1;
+ unsigned int has_exec_async : 1;
bool fenced_relocs;
struct {
uint32_t swizzle_mode;
unsigned long stride;
+ unsigned long kflags;
+
time_t free_time;
/** Array passed to the DRM containing relocation information. */
* Boolean of whether the GPU is definitely not accessing the buffer.
*
* This is only valid when reusable, since non-reusable
- * buffers are those that have been shared wth other
+ * buffers are those that have been shared with other
* processes, so we don't know their state.
*/
bool idle;
bool is_userptr;
/**
- * Boolean of whether this buffer can be placed in the full 48-bit
- * address range on gen8+.
- *
- * By default, buffers will be keep in a 32-bit range, unless this
- * flag is explicitly set.
- */
- bool use_48b_address_range;
-
- /**
- * Whether this buffer is softpinned at offset specified by the user
- */
- bool is_softpin;
-
- /**
* Size in bytes of this buffer and its relocation descendents.
*
* Used to avoid costly tree walking in
*/
int reloc_tree_fences;
- /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
+ /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
bool mapped_cpu_write;
};
if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
- bo_gem->is_softpin ? "*" : "",
+ bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
bo_gem->name);
continue;
}
"%d (%s)@0x%08x %08x + 0x%08x\n",
i,
bo_gem->gem_handle,
- bo_gem->is_softpin ? "*" : "",
+ bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
bo_gem->name,
upper_32_bits(bo_gem->relocs[j].offset),
lower_32_bits(bo_gem->relocs[j].offset),
"%d *(%s)@0x%08x %08x\n",
i,
bo_gem->gem_handle,
- bo_gem->is_softpin ? "*" : "",
+ bo_gem->kflags & EXEC_OBJECT_PINNED ? "*" : "",
bo_gem->name,
target_gem->gem_handle,
target_gem->name,
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int index;
- int flags = 0;
+ unsigned long flags;
+ flags = 0;
if (need_fence)
flags |= EXEC_OBJECT_NEEDS_FENCE;
- if (bo_gem->use_48b_address_range)
- flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
- if (bo_gem->is_softpin)
- flags |= EXEC_OBJECT_PINNED;
if (bo_gem->validate_index != -1) {
bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
bufmgr_gem->exec2_objects[index].alignment = bo->align;
- bufmgr_gem->exec2_objects[index].offset = bo_gem->is_softpin ?
- bo->offset64 : 0;
- bufmgr_gem->exec_bos[index] = bo;
- bufmgr_gem->exec2_objects[index].flags = flags;
+ bufmgr_gem->exec2_objects[index].offset = bo->offset64;
+ bufmgr_gem->exec2_objects[index].flags = bo_gem->kflags | flags;
bufmgr_gem->exec2_objects[index].rsvd1 = 0;
bufmgr_gem->exec2_objects[index].rsvd2 = 0;
+ bufmgr_gem->exec_bos[index] = bo;
bufmgr_gem->exec_count++;
}
} else {
return false;
}
- return (ret == 0 && busy.busy);
}
static int
}
bo_gem->gem_handle = create.handle;
+ HASH_ADD(handle_hh, bufmgr_gem->handle_table,
+ gem_handle, sizeof(bo_gem->gem_handle),
+ bo_gem);
+
bo_gem->bo.handle = bo_gem->gem_handle;
bo_gem->bo.bufmgr = bufmgr;
bo_gem->bo.align = alignment;
tiling_mode,
stride))
goto err_free;
-
- HASH_ADD(handle_hh, bufmgr_gem->handle_table,
- gem_handle, sizeof(bo_gem->gem_handle),
- bo_gem);
}
bo_gem->name = name;
bo_gem->used_as_reloc_target = false;
bo_gem->has_error = false;
bo_gem->reusable = true;
- bo_gem->use_48b_address_range = false;
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
pthread_mutex_unlock(&bufmgr_gem->lock);
bo_gem->used_as_reloc_target = false;
bo_gem->has_error = false;
bo_gem->reusable = false;
- bo_gem->use_48b_address_range = false;
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
pthread_mutex_unlock(&bufmgr_gem->lock);
tiling_mode, stride, size, flags);
}
+static int get_tiling_mode(drm_intel_bufmgr_gem *bufmgr_gem,
+ uint32_t gem_handle,
+ uint32_t *tiling_mode,
+ uint32_t *swizzle_mode)
+{
+ struct drm_i915_gem_get_tiling get_tiling = {
+ .handle = gem_handle,
+ };
+ int ret;
+
+ ret = drmIoctl(bufmgr_gem->fd,
+ DRM_IOCTL_I915_GEM_GET_TILING,
+ &get_tiling);
+ if (ret != 0 && errno != EOPNOTSUPP)
+ return ret;
+
+ *tiling_mode = get_tiling.tiling_mode;
+ *swizzle_mode = get_tiling.swizzle_mode;
+
+ return 0;
+}
+
/**
* Returns a drm_intel_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle)
drm_intel_bo_gem *bo_gem;
int ret;
struct drm_gem_open open_arg;
- struct drm_i915_gem_get_tiling get_tiling;
/* At the moment most applications only have a few named bo.
* For instance, in a DRI client only the render buffers passed
bo_gem->bo.handle = open_arg.handle;
bo_gem->global_name = handle;
bo_gem->reusable = false;
- bo_gem->use_48b_address_range = false;
HASH_ADD(handle_hh, bufmgr_gem->handle_table,
gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
HASH_ADD(name_hh, bufmgr_gem->name_table,
global_name, sizeof(bo_gem->global_name), bo_gem);
- memclear(get_tiling);
- get_tiling.handle = bo_gem->gem_handle;
- ret = drmIoctl(bufmgr_gem->fd,
- DRM_IOCTL_I915_GEM_GET_TILING,
- &get_tiling);
+ ret = get_tiling_mode(bufmgr_gem, bo_gem->gem_handle,
+ &bo_gem->tiling_mode, &bo_gem->swizzle_mode);
if (ret != 0)
goto err_unref;
- bo_gem->tiling_mode = get_tiling.tiling_mode;
- bo_gem->swizzle_mode = get_tiling.swizzle_mode;
/* XXX stride is unknown */
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
for (i = 0; i < bo_gem->softpin_target_count; i++)
drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
time);
+ bo_gem->kflags = 0;
bo_gem->reloc_count = 0;
bo_gem->used_as_reloc_target = false;
bo_gem->softpin_target_count = 0;
return 0;
}
-int
+drm_public int
drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
* undefined).
*/
-int
+drm_public int
drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
-#ifdef HAVE_VALGRIND
+#if HAVE_VALGRIND
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
#endif
int ret;
}
/* We need to unmap after every innovation as we cannot track
- * an open vma for every bo as that will exhaasut the system
+ * an open vma for every bo as that will exhaust the system
* limits and cause later failures.
*/
if (--bo_gem->map_count == 0) {
return ret;
}
-int
+drm_public int
drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
{
return drm_intel_gem_bo_unmap(bo);
* Note that some kernels have broken the inifite wait for negative values
* promise, upgrade to latest stable kernels if this is the case.
*/
-int
+drm_public int
drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
* In combination with drm_intel_gem_bo_pin() and manual fence management, we
* can do tiled pixmaps this way.
*/
-void
+drm_public void
drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- bo_gem->use_48b_address_range = enable;
+
+ if (enable)
+ bo_gem->kflags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+ else
+ bo_gem->kflags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
}
static int
return -ENOMEM;
}
- if (!target_bo_gem->is_softpin)
+ if (!(target_bo_gem->kflags & EXEC_OBJECT_PINNED))
return -EINVAL;
if (target_bo_gem == bo_gem)
return -EINVAL;
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
- if (target_bo_gem->is_softpin)
+ if (target_bo_gem->kflags & EXEC_OBJECT_PINNED)
return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
else
return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
read_domains, write_domain, true);
}
-int
+drm_public int
drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
*
* This also removes all softpinned targets being referenced by the BO.
*/
-void
+drm_public void
drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
/* If we're seeing softpinned object here it means that the kernel
* has relocated our object... Indicating a programming error
*/
- assert(!bo_gem->is_softpin);
+ assert(!(bo_gem->kflags & EXEC_OBJECT_PINNED));
DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
bo_gem->gem_handle, bo_gem->name,
upper_32_bits(bo->offset64),
}
}
-void
+drm_public void
drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
int x1, int y1, int width, int height,
enum aub_dump_bmp_format format,
static int
do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
+ int in_fence, int *out_fence,
unsigned int flags)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
else
i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
execbuf.rsvd2 = 0;
+ if (in_fence != -1) {
+ execbuf.rsvd2 = in_fence;
+ execbuf.flags |= I915_EXEC_FENCE_IN;
+ }
+ if (out_fence != NULL) {
+ *out_fence = -1;
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ }
if (bufmgr_gem->no_exec)
goto skip_execution;
ret = drmIoctl(bufmgr_gem->fd,
- DRM_IOCTL_I915_GEM_EXECBUFFER2,
+ DRM_IOCTL_I915_GEM_EXECBUFFER2_WR,
&execbuf);
if (ret != 0) {
ret = -errno;
}
drm_intel_update_buffer_offsets2(bufmgr_gem);
+ if (ret == 0 && out_fence != NULL)
+ *out_fence = execbuf.rsvd2 >> 32;
+
skip_execution:
if (bufmgr_gem->bufmgr.debug)
drm_intel_gem_dump_validation_list(bufmgr_gem);
int DR4)
{
return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
- I915_EXEC_RENDER);
+ -1, NULL, I915_EXEC_RENDER);
}
static int
unsigned int flags)
{
return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
- flags);
+ -1, NULL, flags);
}
-int
+drm_public int
drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
int used, unsigned int flags)
{
- return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
+ return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
+}
+
+drm_public int
+drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
+ drm_intel_context *ctx,
+ int used,
+ int in_fence,
+ int *out_fence,
+ unsigned int flags)
+{
+ return do_exec2(bo, used, ctx, NULL, 0, 0, in_fence, out_fence, flags);
}
static int
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- bo_gem->is_softpin = true;
bo->offset64 = offset;
bo->offset = offset;
+ bo_gem->kflags |= EXEC_OBJECT_PINNED;
+
return 0;
}
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
int ret;
uint32_t handle;
drm_intel_bo_gem *bo_gem;
- struct drm_i915_gem_get_tiling get_tiling;
pthread_mutex_lock(&bufmgr_gem->lock);
ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
bo_gem->used_as_reloc_target = false;
bo_gem->has_error = false;
bo_gem->reusable = false;
- bo_gem->use_48b_address_range = false;
- memclear(get_tiling);
- get_tiling.handle = bo_gem->gem_handle;
- if (drmIoctl(bufmgr_gem->fd,
- DRM_IOCTL_I915_GEM_GET_TILING,
- &get_tiling))
+ ret = get_tiling_mode(bufmgr_gem, handle,
+ &bo_gem->tiling_mode, &bo_gem->swizzle_mode);
+ if (ret)
goto err;
- bo_gem->tiling_mode = get_tiling.tiling_mode;
- bo_gem->swizzle_mode = get_tiling.swizzle_mode;
/* XXX stride is unknown */
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
return NULL;
}
-int
+drm_public int
drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
- DRM_CLOEXEC, prime_fd) != 0)
+ DRM_CLOEXEC | DRM_RDWR, prime_fd) != 0)
return -errno;
bo_gem->reusable = false;
pthread_mutex_lock(&bufmgr_gem->lock);
if (!bo_gem->global_name) {
+ bo_gem->global_name = flink.name;
+ bo_gem->reusable = false;
+
HASH_ADD(name_hh, bufmgr_gem->name_table,
global_name, sizeof(bo_gem->global_name),
bo_gem);
- bo_gem->global_name = flink.name;
- bo_gem->reusable = false;
}
pthread_mutex_unlock(&bufmgr_gem->lock);
}
* size is only bounded by how many buffers of that size we've managed to have
* in flight at once.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
}
/**
+ * Disables implicit synchronisation before executing the bo
+ *
+ * This will cause rendering corruption unless you correctly manage explicit
+ * fences for all rendering involving this buffer - including use by others.
+ * Disabling the implicit serialisation is only required if that serialisation
+ * is too coarse (for example, you have split the buffer into many
+ * non-overlapping regions and are sharing the whole buffer between concurrent
+ * independent command streams).
+ *
+ * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
+ * which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
+ * or subsequent execbufs involving the bo will generate EINVAL.
+ */
+drm_public void
+drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ bo_gem->kflags |= EXEC_OBJECT_ASYNC;
+}
+
+/**
+ * Enables implicit synchronisation before executing the bo
+ *
+ * This is the default behaviour of the kernel, to wait upon prior writes
+ * completing on the object before rendering with it, or to wait for prior
+ * reads to complete before writing into the object.
+ * drm_intel_gem_bo_disable_implicit_sync() can stop this behaviour, telling
+ * the kernel never to insert a stall before using the object. Then this
+ * function can be used to restore the implicit sync before subsequent
+ * rendering.
+ */
+drm_public void
+drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ bo_gem->kflags &= ~EXEC_OBJECT_ASYNC;
+}
+
+/**
+ * Query whether the kernel supports disabling of its implicit synchronisation
+ * before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
+ */
+drm_public int
+drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+
+ return bufmgr_gem->has_exec_async;
+}
+
+/**
* Enable use of fenced reloc type.
*
* New code should enable this to avoid unnecessary fence register
* allocation. If this option is not enabled, all relocs will have fence
* register allocated.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
}
}
-void
+drm_public void
drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
}
+static int
+parse_devid_override(const char *devid_override)
+{
+ static const struct {
+ const char *name;
+ int pci_id;
+ } name_map[] = {
+ { "brw", PCI_CHIP_I965_GM },
+ { "g4x", PCI_CHIP_GM45_GM },
+ { "ilk", PCI_CHIP_ILD_G },
+ { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS },
+ { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 },
+ { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 },
+ { "byt", PCI_CHIP_VALLEYVIEW_3 },
+ { "bdw", 0x1620 | BDW_ULX },
+ { "skl", PCI_CHIP_SKYLAKE_DT_GT2 },
+ { "kbl", PCI_CHIP_KABYLAKE_DT_GT2 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(name_map); i++) {
+ if (!strcmp(name_map[i].name, devid_override))
+ return name_map[i].pci_id;
+ }
+
+ return strtod(devid_override, NULL);
+}
+
/**
* Get the PCI ID for the device. This can be overridden by setting the
* INTEL_DEVID_OVERRIDE environment variable to the desired ID.
devid_override = getenv("INTEL_DEVID_OVERRIDE");
if (devid_override) {
bufmgr_gem->no_exec = true;
- return strtod(devid_override, NULL);
+ return parse_devid_override(devid_override);
}
}
return devid;
}
-int
+drm_public int
drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
* This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
* for it to have any effect.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
const char *filename)
{
* You can set up a GTT and upload your objects into the referenced
* space, then send off batchbuffers and get BMPs out the other end.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
{
fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
"See the intel_aubdump man page for more details.\n");
}
-drm_intel_context *
+drm_public drm_intel_context *
drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
return context;
}
-void
+drm_public int
+drm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
+{
+ if (ctx == NULL)
+ return -EINVAL;
+
+ *ctx_id = ctx->ctx_id;
+
+ return 0;
+}
+
+drm_public void
drm_intel_gem_context_destroy(drm_intel_context *ctx)
{
drm_intel_bufmgr_gem *bufmgr_gem;
free(ctx);
}
-int
+drm_public int
drm_intel_get_reset_stats(drm_intel_context *ctx,
uint32_t *reset_count,
uint32_t *active,
return ret;
}
-int
+drm_public int
drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result)
return ret;
}
-int
+drm_public int
drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
{
drm_i915_getparam_t gp;
return 0;
}
-int
+drm_public int
drm_intel_get_eu_total(int fd, unsigned int *eu_total)
{
drm_i915_getparam_t gp;
return 0;
}
-int
+drm_public int
drm_intel_get_pooled_eu(int fd)
{
drm_i915_getparam_t gp;
return ret;
}
-int
+drm_public int
drm_intel_get_min_eu_in_pool(int fd)
{
drm_i915_getparam_t gp;
* default state (no annotations), call this function with a \c count
* of zero.
*/
-void
-drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
+drm_public void drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
drm_intel_aub_annotation *annotations,
unsigned count)
{
}
}
-void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
+drm_public void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
return bo_gem->gtt_virtual;
}
-void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
+drm_public void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
return bo_gem->mem_virtual;
}
-void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
+drm_public void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
*
* \param fd File descriptor of the opened DRM device.
*/
-drm_intel_bufmgr *
+drm_public drm_intel_bufmgr *
drm_intel_bufmgr_gem_init(int fd, int batch_size)
{
drm_intel_bufmgr_gem *bufmgr_gem;
bufmgr_gem->gen = 7;
else if (IS_GEN8(bufmgr_gem->pci_device))
bufmgr_gem->gen = 8;
- else if (IS_GEN9(bufmgr_gem->pci_device))
- bufmgr_gem->gen = 9;
- else {
+ else if (!intel_get_genx(bufmgr_gem->pci_device, &bufmgr_gem->gen)) {
free(bufmgr_gem);
bufmgr_gem = NULL;
goto exit;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_relaxed_fencing = ret == 0;
+ gp.param = I915_PARAM_HAS_EXEC_ASYNC;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ bufmgr_gem->has_exec_async = ret == 0;
+
bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;