* Convenience functions for buffer management methods.
*/
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
const char *name, void *addr,
uint32_t tiling_mode,
return NULL;
}
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
tiling_mode, pitch, flags);
}
-void
+drm_public void
drm_intel_bo_reference(drm_intel_bo *bo)
{
bo->bufmgr->bo_reference(bo);
}
-void
+drm_public void
drm_intel_bo_unreference(drm_intel_bo *bo)
{
if (bo == NULL)
bo->bufmgr->bo_unreference(bo);
}
-int
+drm_public int
drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
{
return buf->bufmgr->bo_map(buf, write_enable);
}
-int
+drm_public int
drm_intel_bo_unmap(drm_intel_bo *buf)
{
return buf->bufmgr->bo_unmap(buf);
}
-int
+drm_public int
drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
return bo->bufmgr->bo_subdata(bo, offset, size, data);
}
-int
+drm_public int
drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
return 0;
}
-void
+drm_public void
drm_intel_bo_wait_rendering(drm_intel_bo *bo)
{
bo->bufmgr->bo_wait_rendering(bo);
}
-void
+drm_public void
drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
{
bufmgr->destroy(bufmgr);
}
-int
+drm_public int
drm_intel_bo_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
{
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
}
-int
+drm_public int
drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int rings)
}
}
-void
+drm_public void
drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
{
bufmgr->debug = enable_debug;
}
-int
+drm_public int
drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
{
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}
-int
+drm_public int
drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
{
if (bo->bufmgr->bo_flink)
return -ENODEV;
}
-int
+drm_public int
drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
}
/* For fence registers, not GL fences */
-int
+drm_public int
drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
}
-int
+drm_public int
drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
{
if (bo->bufmgr->bo_pin)
return -ENODEV;
}
-int
+drm_public int
drm_intel_bo_unpin(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_unpin)
return -ENODEV;
}
-int
+drm_public int
drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
{
return 0;
}
-int
+drm_public int
drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode)
{
return 0;
}
-int
+drm_public int
drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
{
if (bo->bufmgr->bo_set_softpin_offset)
return -ENODEV;
}
-int
+drm_public int
drm_intel_bo_disable_reuse(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_disable_reuse)
return 0;
}
-int
+drm_public int
drm_intel_bo_is_reusable(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_is_reusable)
return 0;
}
-int
+drm_public int
drm_intel_bo_busy(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_busy)
return 0;
}
-int
+drm_public int
drm_intel_bo_madvise(drm_intel_bo *bo, int madv)
{
if (bo->bufmgr->bo_madvise)
return -1;
}
-int
+drm_public int
drm_intel_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
{
if (bo->bufmgr->bo_use_48b_address_range) {
return -ENODEV;
}
-int
+drm_public int
drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
{
return bo->bufmgr->bo_references(bo, target_bo);
}
-int
+drm_public int
drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
{
if (bufmgr->get_pipe_from_crtc_id)
}
#endif
-int
+drm_public int
drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total)
{
* -- just evict everything
* -- and wait for idle
*/
-void
+drm_public void
drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
4096);
}
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long offset,
* Used by the X Server on LeaveVT, when the card memory is no longer our
* own.
*/
-void
+drm_public void
drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
pthread_mutex_unlock(&bufmgr_fake->lock);
}
-void
+drm_public void
drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
volatile unsigned int
*last_dispatch)
bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
}
-drm_intel_bufmgr *
+drm_public drm_intel_bufmgr *
drm_intel_bufmgr_fake_init(int fd, unsigned long low_offset,
void *low_virtual, unsigned long size,
volatile unsigned int *last_dispatch)
* This can be used when one application needs to pass a buffer object
* to another.
*/
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle)
return 0;
}
-int
+drm_public int
drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
* undefined).
*/
-int
+drm_public int
drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
return ret;
}
-int
+drm_public int
drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
{
return drm_intel_gem_bo_unmap(bo);
* Note that some kernels have broken the inifite wait for negative values
* promise, upgrade to latest stable kernels if this is the case.
*/
-int
+drm_public int
drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
* In combination with drm_intel_gem_bo_pin() and manual fence management, we
* can do tiled pixmaps this way.
*/
-void
+drm_public void
drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
read_domains, write_domain, true);
}
-int
+drm_public int
drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
*
* This also removes all softpinned targets being referenced by the BO.
*/
-void
+drm_public void
drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
}
}
-void
+drm_public void
drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
int x1, int y1, int width, int height,
enum aub_dump_bmp_format format,
-1, NULL, flags);
}
-int
+drm_public int
drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
int used, unsigned int flags)
{
return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
}
-int
+drm_public int
drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
drm_intel_context *ctx,
int used,
return 0;
}
-drm_intel_bo *
+drm_public drm_intel_bo *
drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
return NULL;
}
-int
+drm_public int
drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
* size is only bounded by how many buffers of that size we've managed to have
* in flight at once.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
* which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
* or subsequent execbufs involving the bo will generate EINVAL.
*/
-void
+drm_public void
drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
* function can be used to restore the implicit sync before subsequent
* rendering.
*/
-void
+drm_public void
drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
* Query whether the kernel supports disabling of its implicit synchronisation
* before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
*/
-int
+drm_public int
drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
* allocation. If this option is not enabled, all relocs will have fence
* register allocated.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
}
}
-void
+drm_public void
drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
return devid;
}
-int
+drm_public int
drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
* This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
* for it to have any effect.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
const char *filename)
{
* You can set up a GTT and upload your objects into the referenced
* space, then send off batchbuffers and get BMPs out the other end.
*/
-void
+drm_public void
drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
{
fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
"See the intel_aubdump man page for more details.\n");
}
-drm_intel_context *
+drm_public drm_intel_context *
drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
return context;
}
-int
+drm_public int
drm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
{
if (ctx == NULL)
return 0;
}
-void
+drm_public void
drm_intel_gem_context_destroy(drm_intel_context *ctx)
{
drm_intel_bufmgr_gem *bufmgr_gem;
free(ctx);
}
-int
+drm_public int
drm_intel_get_reset_stats(drm_intel_context *ctx,
uint32_t *reset_count,
uint32_t *active,
return ret;
}
-int
+drm_public int
drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result)
return ret;
}
-int
+drm_public int
drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
{
drm_i915_getparam_t gp;
return 0;
}
-int
+drm_public int
drm_intel_get_eu_total(int fd, unsigned int *eu_total)
{
drm_i915_getparam_t gp;
return 0;
}
-int
+drm_public int
drm_intel_get_pooled_eu(int fd)
{
drm_i915_getparam_t gp;
return ret;
}
-int
+drm_public int
drm_intel_get_min_eu_in_pool(int fd)
{
drm_i915_getparam_t gp;
* default state (no annotations), call this function with a \c count
* of zero.
*/
-void
-drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
+drm_public void drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
drm_intel_aub_annotation *annotations,
unsigned count)
{
}
}
-void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
+drm_public void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
return bo_gem->gtt_virtual;
}
-void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
+drm_public void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
return bo_gem->mem_virtual;
}
-void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
+drm_public void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
*
* \param fd File descriptor of the opened DRM device.
*/
-drm_intel_bufmgr *
+drm_public drm_intel_bufmgr *
drm_intel_bufmgr_gem_init(int fd, int batch_size)
{
drm_intel_bufmgr_gem *bufmgr_gem;
return 1;
}
-struct drm_intel_decode *
+drm_public struct drm_intel_decode *
drm_intel_decode_context_alloc(uint32_t devid)
{
struct drm_intel_decode *ctx;
return ctx;
}
-void
+drm_public void
drm_intel_decode_context_free(struct drm_intel_decode *ctx)
{
free(ctx);
}
-void
+drm_public void
drm_intel_decode_set_dump_past_end(struct drm_intel_decode *ctx,
int dump_past_end)
{
ctx->dump_past_end = !!dump_past_end;
}
-void
+drm_public void
drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
void *data, uint32_t hw_offset, int count)
{
ctx->base_count = count;
}
-void
+drm_public void
drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
uint32_t head, uint32_t tail)
{
ctx->tail = tail;
}
-void
+drm_public void
drm_intel_decode_set_output_file(struct drm_intel_decode *ctx,
FILE *output)
{
* \param count number of DWORDs to decode in the batch buffer
* \param hw_offset hardware address for the buffer
*/
-void
+drm_public void
drm_intel_decode(struct drm_intel_decode *ctx)
{
int ret;
#if HAVE_VISIBILITY
# define drm_private __attribute__((visibility("hidden")))
+# define drm_public __attribute__((visibility("default")))
#else
# define drm_private
+# define drm_public
#endif