pthread_mutex_destroy(&cache->mutex);
}
-#define ANV_BO_CACHE_SUPPORTED_FLAGS \
- (EXEC_OBJECT_WRITE | \
- EXEC_OBJECT_ASYNC | \
- EXEC_OBJECT_SUPPORTS_48B_ADDRESS | \
- EXEC_OBJECT_PINNED | \
- EXEC_OBJECT_CAPTURE)
-
-static uint32_t
-anv_bo_alloc_flags_to_bo_flags(struct anv_device *device,
- enum anv_bo_alloc_flags alloc_flags)
-{
- struct anv_physical_device *pdevice = device->physical;
-
- uint64_t bo_flags = EXEC_OBJECT_PINNED;
-
- if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS))
- bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
-
- if (((alloc_flags & ANV_BO_ALLOC_CAPTURE) ||
- INTEL_DEBUG(DEBUG_CAPTURE_ALL)) &&
- pdevice->has_exec_capture)
- bo_flags |= EXEC_OBJECT_CAPTURE;
-
- if (alloc_flags & ANV_BO_ALLOC_IMPLICIT_WRITE) {
- assert(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC);
- bo_flags |= EXEC_OBJECT_WRITE;
- }
-
- if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async)
- bo_flags |= EXEC_OBJECT_ASYNC;
-
- return bo_flags;
-}
-
static void
anv_bo_unmap_close(struct anv_device *device, struct anv_bo *bo)
{
assert(!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_CCS));
const uint32_t bo_flags =
- anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
- assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+ device->kmd_backend->bo_alloc_flags_to_bo_flags(device, alloc_flags);
/* The kernel is going to give us whole pages anyway. And we
* also need 4KB alignment for 1MB AUX buffer that follows
struct anv_bo_cache *cache = &device->bo_cache;
const uint32_t bo_flags =
- anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
- assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+ device->kmd_backend->bo_alloc_flags_to_bo_flags(device, alloc_flags);
uint32_t gem_handle = device->kmd_backend->gem_create_userptr(device, host_ptr, size);
if (!gem_handle)
struct anv_bo_cache *cache = &device->bo_cache;
const uint32_t bo_flags =
- anv_bo_alloc_flags_to_bo_flags(device, alloc_flags);
- assert(bo_flags == (bo_flags & ANV_BO_CACHE_SUPPORTED_FLAGS));
+ device->kmd_backend->bo_alloc_flags_to_bo_flags(device, alloc_flags);
pthread_mutex_lock(&cache->mutex);
return VK_ERROR_UNKNOWN;
}
+static VkResult
+stub_queue_exec_trace(struct anv_queue *queue, struct anv_utrace_submit *submit)
+{
+ return VK_ERROR_UNKNOWN;
+}
+
+static uint32_t
+stub_bo_alloc_flags_to_bo_flags(struct anv_device *device,
+ enum anv_bo_alloc_flags alloc_flags)
+{
+ return 0;
+}
+
void *
anv_gem_mmap(struct anv_device *device, struct anv_bo *bo, uint64_t offset,
uint64_t size, VkMemoryPropertyFlags property_flags)
.vm_unbind_bo = stub_vm_bind_bo,
.execute_simple_batch = stub_execute_simple_batch,
.queue_exec_locked = stub_queue_exec_locked,
+ .queue_exec_trace = stub_queue_exec_trace,
+ .bo_alloc_flags_to_bo_flags = stub_bo_alloc_flags_to_bo_flags,
};
return &stub_backend;
}
uint32_t perf_query_pass);
VkResult (*queue_exec_trace)(struct anv_queue *queue,
struct anv_utrace_submit *submit);
+ uint32_t (*bo_alloc_flags_to_bo_flags)(struct anv_device *device,
+ enum anv_bo_alloc_flags alloc_flags);
};
const struct anv_kmd_backend *anv_kmd_backend_get(enum intel_kmd_type type);
return userptr.handle;
}
+static uint32_t
+i915_bo_alloc_flags_to_bo_flags(struct anv_device *device,
+ enum anv_bo_alloc_flags alloc_flags)
+{
+ struct anv_physical_device *pdevice = device->physical;
+
+ uint64_t bo_flags = EXEC_OBJECT_PINNED;
+
+ if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS))
+ bo_flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+ if (((alloc_flags & ANV_BO_ALLOC_CAPTURE) ||
+ INTEL_DEBUG(DEBUG_CAPTURE_ALL)) &&
+ pdevice->has_exec_capture)
+ bo_flags |= EXEC_OBJECT_CAPTURE;
+
+ if (alloc_flags & ANV_BO_ALLOC_IMPLICIT_WRITE) {
+ assert(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC);
+ bo_flags |= EXEC_OBJECT_WRITE;
+ }
+
+ if (!(alloc_flags & ANV_BO_ALLOC_IMPLICIT_SYNC) && pdevice->has_exec_async)
+ bo_flags |= EXEC_OBJECT_ASYNC;
+
+ return bo_flags;
+}
+
const struct anv_kmd_backend *
anv_i915_kmd_backend_get(void)
{
.execute_simple_batch = i915_execute_simple_batch,
.queue_exec_locked = i915_queue_exec_locked,
.queue_exec_trace = i915_queue_exec_trace,
+ .bo_alloc_flags_to_bo_flags = i915_bo_alloc_flags_to_bo_flags,
};
return &i915_backend;
}
return device->workaround_bo->gem_handle;
}
+static uint32_t
+xe_bo_alloc_flags_to_bo_flags(struct anv_device *device,
+ enum anv_bo_alloc_flags alloc_flags)
+{
+ return 0;
+}
+
const struct anv_kmd_backend *
anv_xe_kmd_backend_get(void)
{
.execute_simple_batch = xe_execute_simple_batch,
.queue_exec_locked = xe_queue_exec_locked,
.queue_exec_trace = xe_queue_exec_utrace_locked,
+ .bo_alloc_flags_to_bo_flags = xe_bo_alloc_flags_to_bo_flags,
};
return &xe_backend;
}