From: José Roberto de Souza Date: Thu, 9 Feb 2023 16:57:11 +0000 (-0800) Subject: anv: Add gem VM bind and unbind to backend X-Git-Tag: upstream/23.3.3~11789 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=37fa2fa30e8d0aae8549ab203b94084956fef6f3;p=platform%2Fupstream%2Fmesa.git anv: Add gem VM bind and unbind to backend Not using it yet, that will be done in the next patch. Xe only supports submission using VM. For i915 the backend functions are just a noop. Signed-off-by: José Roberto de Souza Reviewed-by: Lionel Landwerlin Part-of: --- diff --git a/src/intel/vulkan/anv_device.c b/src/intel/vulkan/anv_device.c index a875c31..42b8d4d 100644 --- a/src/intel/vulkan/anv_device.c +++ b/src/intel/vulkan/anv_device.c @@ -3169,7 +3169,7 @@ anv_device_setup_context_or_vm(struct anv_device *device, const VkDeviceCreateInfo *pCreateInfo, const uint32_t num_queues) { - switch (anv_kmd_type_get(device)) { + switch (device->info->kmd_type) { case INTEL_KMD_TYPE_I915: return anv_i915_device_setup_context(device, pCreateInfo, num_queues); case INTEL_KMD_TYPE_XE: @@ -3183,7 +3183,7 @@ anv_device_setup_context_or_vm(struct anv_device *device, static bool anv_device_destroy_context_or_vm(struct anv_device *device) { - switch (anv_kmd_type_get(device)) { + switch (device->info->kmd_type) { case INTEL_KMD_TYPE_I915: return intel_gem_destroy_context(device->fd, device->context_id); case INTEL_KMD_TYPE_XE: diff --git a/src/intel/vulkan/anv_gem_stubs.c b/src/intel/vulkan/anv_gem_stubs.c index 1508d5c..e9e5814 100644 --- a/src/intel/vulkan/anv_gem_stubs.c +++ b/src/intel/vulkan/anv_gem_stubs.c @@ -150,12 +150,26 @@ anv_gem_fd_to_handle(struct anv_device *device, int fd) unreachable("Unused"); } +static int +stub_gem_vm_bind(struct anv_device *device, struct anv_bo *bo) +{ + return 0; +} + +static int +stub_gem_vm_unbind(struct anv_device *device, struct anv_bo *bo) +{ + return 0; +} + const struct anv_kmd_backend *anv_stub_kmd_backend_get(void) { static const struct anv_kmd_backend stub_backend = { .gem_create = stub_gem_create, .gem_close = stub_gem_close, .gem_mmap = stub_gem_mmap, + .gem_vm_bind = stub_gem_vm_bind, + .gem_vm_unbind = stub_gem_vm_unbind, .execute_simple_batch = stub_execute_simple_batch, .queue_exec_locked = stub_queue_exec_locked, }; diff --git a/src/intel/vulkan/anv_kmd_backend.c b/src/intel/vulkan/anv_kmd_backend.c index f72d87d..8ce882b 100644 --- a/src/intel/vulkan/anv_kmd_backend.c +++ b/src/intel/vulkan/anv_kmd_backend.c @@ -40,9 +40,3 @@ anv_kmd_backend_get(enum intel_kmd_type type) return NULL; } } - -inline enum intel_kmd_type -anv_kmd_type_get(struct anv_device *device) -{ - return device->info->kmd_type; -} diff --git a/src/intel/vulkan/anv_kmd_backend.h b/src/intel/vulkan/anv_kmd_backend.h index 97479f1..b3961cf 100644 --- a/src/intel/vulkan/anv_kmd_backend.h +++ b/src/intel/vulkan/anv_kmd_backend.h @@ -52,6 +52,8 @@ struct anv_kmd_backend { void *(*gem_mmap)(struct anv_device *device, struct anv_bo *bo, uint64_t offset, uint64_t size, VkMemoryPropertyFlags property_flags); + int (*gem_vm_bind)(struct anv_device *device, struct anv_bo *bo); + int (*gem_vm_unbind)(struct anv_device *device, struct anv_bo *bo); VkResult (*execute_simple_batch)(struct anv_queue *queue, struct anv_bo *batch_bo, uint32_t batch_bo_size); @@ -68,9 +70,6 @@ struct anv_kmd_backend { const struct anv_kmd_backend *anv_kmd_backend_get(enum intel_kmd_type type); -enum intel_kmd_type -anv_kmd_type_get(struct anv_device *device); - /* Internal functions, should only be called by anv_kmd_backend_get() */ const struct anv_kmd_backend *anv_i915_kmd_backend_get(void); const struct anv_kmd_backend *anv_xe_kmd_backend_get(void); diff --git a/src/intel/vulkan/i915/anv_kmd_backend.c b/src/intel/vulkan/i915/anv_kmd_backend.c index 3b77a98..1b21953 100644 --- a/src/intel/vulkan/i915/anv_kmd_backend.c +++ b/src/intel/vulkan/i915/anv_kmd_backend.c @@ -149,6 +149,18 @@ i915_gem_mmap(struct anv_device *device, struct anv_bo *bo, uint64_t offset, return i915_gem_mmap_legacy(device, bo, offset, size, flags); } +static int +i915_gem_vm_bind(struct anv_device *device, struct anv_bo *bo) +{ + return 0; +} + +static int +i915_gem_vm_unbind(struct anv_device *device, struct anv_bo *bo) +{ + return 0; +} + const struct anv_kmd_backend * anv_i915_kmd_backend_get(void) { @@ -156,6 +168,8 @@ anv_i915_kmd_backend_get(void) .gem_create = i915_gem_create, .gem_close = i915_gem_close, .gem_mmap = i915_gem_mmap, + .gem_vm_bind = i915_gem_vm_bind, + .gem_vm_unbind = i915_gem_vm_unbind, .execute_simple_batch = i915_execute_simple_batch, .queue_exec_locked = i915_queue_exec_locked }; diff --git a/src/intel/vulkan/xe/anv_kmd_backend.c b/src/intel/vulkan/xe/anv_kmd_backend.c index 8342d1c..c530ff4 100644 --- a/src/intel/vulkan/xe/anv_kmd_backend.c +++ b/src/intel/vulkan/xe/anv_kmd_backend.c @@ -22,6 +22,7 @@ */ #include +#include #include "anv_private.h" @@ -69,6 +70,59 @@ xe_gem_mmap(struct anv_device *device, struct anv_bo *bo, uint64_t offset, device->fd, args.offset); } +static inline int +xe_gem_vm_bind_op(struct anv_device *device, struct anv_bo *bo, uint32_t op) +{ + uint32_t syncobj_handle; + int ret = drmSyncobjCreate(device->fd, 0, &syncobj_handle); + + if (ret) + return ret; + + struct drm_xe_sync sync = { + .flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL, + .handle = syncobj_handle, + }; + struct drm_xe_vm_bind args = { + .vm_id = device->vm_id, + .num_binds = 1, + .bind.obj = op == XE_VM_BIND_OP_UNMAP ? 0 : bo->gem_handle, + .bind.obj_offset = 0, + .bind.range = bo->size + bo->_ccs_size, + .bind.addr = intel_48b_address(bo->offset), + .bind.op = op, + .num_syncs = 1, + .syncs = (uintptr_t)&sync, + }; + ret = intel_ioctl(device->fd, DRM_IOCTL_XE_VM_BIND, &args); + if (ret) + goto bind_error; + + struct drm_syncobj_wait wait = { + .handles = (uintptr_t)&syncobj_handle, + .timeout_nsec = INT64_MAX, + .count_handles = 1, + .flags = 0, + .first_signaled = 0, + .pad = 0, + }; + intel_ioctl(device->fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait); + +bind_error: + drmSyncobjDestroy(device->fd, syncobj_handle); + return ret; +} + +static int xe_gem_vm_bind(struct anv_device *device, struct anv_bo *bo) +{ + return xe_gem_vm_bind_op(device, bo, XE_VM_BIND_OP_MAP); +} + +static int xe_gem_vm_unbind(struct anv_device *device, struct anv_bo *bo) +{ + return xe_gem_vm_bind_op(device, bo, XE_VM_BIND_OP_UNMAP); +} + const struct anv_kmd_backend * anv_xe_kmd_backend_get(void) { @@ -76,6 +130,8 @@ anv_xe_kmd_backend_get(void) .gem_create = xe_gem_create, .gem_close = xe_gem_close, .gem_mmap = xe_gem_mmap, + .gem_vm_bind = xe_gem_vm_bind, + .gem_vm_unbind = xe_gem_vm_unbind, }; return &xe_backend; }