intel_winsys_import_userptr() was added.
intel_winsys_read_reset_stats() was renamed to intel_winsys_get_reset_stats().
intel_bo_reference()/intel_bo_unreference() was replaced by
intel_bo_ref()/intel_bo_unref().
intel_winsys_alloc_buffer() was renamed to intel_winsys_alloc_bo(). The
old intel_winsys_alloc_bo() was removed and intel_bo_set_tiling() was
added.
writer->ptr = NULL;
}
- if (writer->bo) {
- intel_bo_unreference(writer->bo);
- writer->bo = NULL;
- }
+ intel_bo_unref(writer->bo);
+ writer->bo = NULL;
writer->used = 0;
[INTEL_CMD_WRITER_INSTRUCTION] = "instruction",
};
- return intel_winsys_alloc_buffer(winsys, writer_names[which], size, true);
+ return intel_winsys_alloc_bo(winsys, writer_names[which], size, true);
}
/**
bo = alloc_writer_bo(cmd->dev->winsys, which, writer->size);
if (bo) {
- if (writer->bo)
- intel_bo_unreference(writer->bo);
+ intel_bo_unref(writer->bo);
writer->bo = bo;
} else if (writer->bo) {
/* reuse the old bo */
/* map and copy the data over */
new_ptr = intel_bo_map(new_bo, true);
if (!new_ptr) {
- intel_bo_unreference(new_bo);
+ intel_bo_unref(new_bo);
cmd_writer_discard(cmd, which);
cmd_fail(cmd, XGL_ERROR_UNKNOWN);
return;
memcpy(new_ptr, writer->ptr, writer->used);
intel_bo_unmap(writer->bo);
- intel_bo_unreference(writer->bo);
+ intel_bo_unref(writer->bo);
writer->size = new_size;
writer->bo = new_bo;
dev->winsys = gpu->winsys;
- dev->cmd_scratch_bo = intel_winsys_alloc_buffer(dev->winsys,
+ dev->cmd_scratch_bo = intel_winsys_alloc_bo(dev->winsys,
"command buffer scratch", 4096, false);
if (!dev->cmd_scratch_bo) {
intel_dev_destroy(dev);
dev_destroy_meta_shaders(dev);
- if (dev->cmd_scratch_bo)
- intel_bo_unreference(dev->cmd_scratch_bo);
+ intel_bo_unref(dev->cmd_scratch_bo);
intel_base_destroy(&dev->base);
void intel_fence_destroy(struct intel_fence *fence)
{
- if (fence->seqno_bo)
- intel_bo_unreference(fence->seqno_bo);
+ intel_bo_unref(fence->seqno_bo);
intel_base_destroy(&fence->obj.base);
}
fence->x11 = NULL;
#endif
- if (fence->seqno_bo)
- intel_bo_unreference(fence->seqno_bo);
-
- fence->seqno_bo = seqno_bo;
- intel_bo_reference(fence->seqno_bo);
+ intel_bo_unref(fence->seqno_bo);
+ fence->seqno_bo = intel_bo_ref(seqno_bo);
}
void intel_fence_set_x11(struct intel_fence *fence,
fence->x11_serial = serial;
#endif
- if (fence->seqno_bo)
- intel_bo_unreference(fence->seqno_bo);
-
- fence->seqno_bo = seqno_bo;
- intel_bo_reference(fence->seqno_bo);
+ intel_bo_unref(fence->seqno_bo);
+ fence->seqno_bo = intel_bo_ref(seqno_bo);
}
XGL_RESULT intel_fence_wait(struct intel_fence *fence, int64_t timeout_ns)
uint32_t reg, uint64_t *val);
/**
+ * Return the numbers of submissions lost due to GPU reset.
+ *
+ * \param active_lost Number of lost active/guilty submissions
+ * \param pending_lost Number of lost pending/innocent submissions
+ */
+int
+intel_winsys_get_reset_stats(struct intel_winsys *winsys,
+ uint32_t *active_lost,
+ uint32_t *pending_lost);
+/**
* Allocate a buffer object.
*
* \param name Informative description of the bo.
- * \param tiling Tiling mode.
- * \param pitch Pitch of the bo.
- * \param height Height of the bo.
+ * \param size Size of the bo.
* \param cpu_init Will be initialized by CPU.
*/
struct intel_bo *
intel_winsys_alloc_bo(struct intel_winsys *winsys,
const char *name,
- enum intel_tiling_mode tiling,
- unsigned long pitch,
- unsigned long height,
+ unsigned long size,
bool cpu_init);
/**
- * Allocate a linear buffer object.
+ * Create a bo from a user memory pointer. Both \p userptr and \p size must
+ * be page aligned.
*/
-static inline struct intel_bo *
-intel_winsys_alloc_buffer(struct intel_winsys *winsys,
- const char *name,
- unsigned long size,
- bool cpu_init)
-{
- return intel_winsys_alloc_bo(winsys, name,
- INTEL_TILING_NONE, size, 1, cpu_init);
-}
+struct intel_bo *
+intel_winsys_import_userptr(struct intel_winsys *winsys,
+ const char *name,
+ void *userptr,
+ unsigned long size,
+ unsigned long flags);
/**
* Create a bo from a winsys handle.
unsigned long *pitch);
/**
- * Export \p bo as a winsys handle for inter-process sharing.
+ * Export \p bo as a winsys handle for inter-process sharing. \p tiling and
+ * \p pitch must match those set by \p intel_bo_set_tiling().
*/
int
intel_winsys_export_handle(struct intel_winsys *winsys,
intel_winsys_decode_bo(struct intel_winsys *winsys,
struct intel_bo *bo, int used);
-int
-intel_winsys_read_reset_stats(struct intel_winsys *winsys,
- uint32_t *active, uint32_t *pending);
-
/**
- * Increase the reference count of \p bo.
+ * Increase the reference count of \p bo. No-op when \p bo is NULL.
*/
-void
-intel_bo_reference(struct intel_bo *bo);
+struct intel_bo *
+intel_bo_ref(struct intel_bo *bo);
/**
* Decrease the reference count of \p bo. When the reference count reaches
- * zero, \p bo is destroyed.
+ * zero, \p bo is destroyed. No-op when \p bo is NULL.
*/
void
-intel_bo_unreference(struct intel_bo *bo);
+intel_bo_unref(struct intel_bo *bo);
+
+/**
+ * Set the tiling of \p bo. The info is used by GTT mapping and bo export.
+ */
+int
+intel_bo_set_tiling(struct intel_bo *bo,
+ enum intel_tiling_mode tiling,
+ unsigned long pitch);
/**
* Map \p bo for CPU access. Recursive mapping is allowed.
* sequential writes, but reads would be very slow. Callers always have a
* linear view of the bo.
*
- * map_gtt_async() is similar to map_gtt(), except that it does not block.
+ * map_async() and map_gtt_async() work similar to map() and map_gtt()
+ * respectively, except that they do not block.
*/
void *
intel_bo_map(struct intel_bo *bo, bool write_enable);
return drm_intel_reg_read(winsys->bufmgr, reg, val);
}
+int
+intel_winsys_get_reset_stats(struct intel_winsys *winsys,
+ uint32_t *active_lost,
+ uint32_t *pending_lost)
+{
+ uint32_t reset_count;
+
+ return drm_intel_get_reset_stats(winsys->ctx,
+ &reset_count, active_lost, pending_lost);
+}
+
struct intel_bo *
intel_winsys_alloc_bo(struct intel_winsys *winsys,
const char *name,
- enum intel_tiling_mode tiling,
- unsigned long pitch,
- unsigned long height,
+ unsigned long size,
bool cpu_init)
{
const unsigned int alignment = 4096; /* always page-aligned */
- unsigned long size;
drm_intel_bo *bo;
- switch (tiling) {
- case INTEL_TILING_X:
- if (pitch % 512)
- return NULL;
- break;
- case INTEL_TILING_Y:
- if (pitch % 128)
- return NULL;
- break;
- default:
- break;
- }
-
- if (pitch > ULONG_MAX / height)
- return NULL;
-
- size = pitch * height;
-
if (cpu_init) {
bo = drm_intel_bo_alloc(winsys->bufmgr, name, size, alignment);
- }
- else {
+ } else {
bo = drm_intel_bo_alloc_for_render(winsys->bufmgr,
name, size, alignment);
}
- if (bo && tiling != INTEL_TILING_NONE) {
- uint32_t real_tiling = tiling;
- int err;
+ return (struct intel_bo *) bo;
+}
- err = drm_intel_bo_set_tiling(bo, &real_tiling, pitch);
- if (err || real_tiling != tiling) {
- assert(!"tiling mismatch");
- drm_intel_bo_unreference(bo);
- return NULL;
- }
- }
+struct intel_bo *
+intel_winsys_import_userptr(struct intel_winsys *winsys,
+ const char *name,
+ void *userptr,
+ unsigned long size,
+ unsigned long flags)
+{
+ drm_intel_bo *bo;
+
+ bo = drm_intel_bo_alloc_userptr(winsys->bufmgr, name, userptr,
+ INTEL_TILING_NONE, 0, size, flags);
return (struct intel_bo *) bo;
}
intel_bo_unmap(bo);
}
-int
-intel_winsys_read_reset_stats(struct intel_winsys *winsys,
- uint32_t *active, uint32_t *pending)
+struct intel_bo *
+intel_bo_ref(struct intel_bo *bo)
{
- return drm_intel_get_reset_stats(winsys->ctx, NULL, active, pending);
+ if (bo)
+ drm_intel_bo_reference(gem_bo(bo));
+
+ return bo;
}
void
-intel_bo_reference(struct intel_bo *bo)
+intel_bo_unref(struct intel_bo *bo)
{
- drm_intel_bo_reference(gem_bo(bo));
+ if (bo)
+ drm_intel_bo_unreference(gem_bo(bo));
}
-void
-intel_bo_unreference(struct intel_bo *bo)
+int
+intel_bo_set_tiling(struct intel_bo *bo,
+ enum intel_tiling_mode tiling,
+ unsigned long pitch)
{
- drm_intel_bo_unreference(gem_bo(bo));
+ uint32_t real_tiling = tiling;
+ int err;
+
+ switch (tiling) {
+ case INTEL_TILING_X:
+ if (pitch % 512)
+ return -1;
+ break;
+ case INTEL_TILING_Y:
+ if (pitch % 128)
+ return -1;
+ break;
+ default:
+ break;
+ }
+
+ err = drm_intel_bo_set_tiling(gem_bo(bo), &real_tiling, pitch);
+ if (err || real_tiling != tiling) {
+ assert(!"tiling mismatch");
+ return -1;
+ }
+
+ return 0;
}
void *
if (!mem)
return XGL_ERROR_OUT_OF_MEMORY;
- mem->bo = intel_winsys_alloc_buffer(dev->winsys,
+ mem->bo = intel_winsys_alloc_bo(dev->winsys,
"xgl-gpu-memory", info->allocationSize, 0);
if (!mem->bo) {
intel_mem_free(mem);
void intel_mem_free(struct intel_mem *mem)
{
- if (mem->bo)
- intel_bo_unreference(mem->bo);
+ intel_bo_unref(mem->bo);
intel_base_destroy(&mem->base);
}
struct intel_bo *bo;
void *ptr;
- bo = intel_winsys_alloc_buffer(queue->dev->winsys,
+ bo = intel_winsys_alloc_bo(queue->dev->winsys,
"queue bo", size, true);
if (!bo)
return NULL;
ptr = intel_bo_map(bo, true);
if (!ptr) {
- intel_bo_unreference(bo);
+ intel_bo_unref(bo);
return NULL;
}
ret = queue_submit_bo(queue, bo, sizeof(ctx_init_cmd));
if (ret != XGL_SUCCESS) {
- intel_bo_unreference(bo);
+ intel_bo_unref(bo);
return ret;
}
return ret;
if (intel_debug & INTEL_DEBUG_HANG) {
- intel_winsys_read_reset_stats(queue->dev->winsys,
+ intel_winsys_get_reset_stats(queue->dev->winsys,
&active[0], &pending[0]);
}
if (intel_debug & INTEL_DEBUG_HANG) {
intel_bo_wait(bo, -1);
- intel_winsys_read_reset_stats(queue->dev->winsys,
+ intel_winsys_get_reset_stats(queue->dev->winsys,
&active[1], &pending[1]);
if (active[0] != active[1] || pending[0] != pending[1]) {
void intel_queue_destroy(struct intel_queue *queue)
{
- if (queue->seqno_bo)
- intel_bo_unreference(queue->seqno_bo);
-
- if (queue->atomic_bo)
- intel_bo_unreference(queue->atomic_bo);
- if (queue->select_graphics_bo)
- intel_bo_unreference(queue->select_graphics_bo);
- if (queue->select_compute_bo)
- intel_bo_unreference(queue->select_compute_bo);
+ intel_bo_unref(queue->seqno_bo);
+ intel_bo_unref(queue->atomic_bo);
+ intel_bo_unref(queue->select_graphics_bo);
+ intel_bo_unref(queue->select_compute_bo);
+
intel_base_destroy(&queue->base);
}
last_cmd = intel_cmd(pCmdBuffers[i - 1]);
if (ret == XGL_SUCCESS) {
- if (queue->seqno_bo)
- intel_bo_unreference(queue->seqno_bo);
- queue->seqno_bo = intel_cmd_get_batch(last_cmd, NULL);
- intel_bo_reference(queue->seqno_bo);
+ intel_bo_unref(queue->seqno_bo);
+ queue->seqno_bo = intel_bo_ref(intel_cmd_get_batch(last_cmd, NULL));
if (fence_ != XGL_NULL_HANDLE) {
struct intel_fence *fence = intel_fence(fence_);