}
}
+static void
+anv_physical_device_max_priority_update(struct anv_physical_device *device)
+{
+ switch (device->info.kmd_type) {
+ case INTEL_KMD_TYPE_I915:
+ break;
+ case INTEL_KMD_TYPE_XE:
+ anv_xe_physical_device_max_priority_update(device);
+ break;
+ default:
+ unreachable("Missing");
+ }
+}
+
static VkResult
anv_physical_device_try_create(struct vk_instance *vk_instance,
struct _drmDevice *drm_device,
device->info.has_compute_engine = intel_engines_count(device->engine_info,
INTEL_ENGINE_CLASS_COMPUTE);
anv_physical_device_init_queue_families(device);
+ anv_physical_device_max_priority_update(device);
anv_physical_device_init_perf(device, fd);
anv_xe_physical_device_get_parameters(struct anv_physical_device *device)
{
device->has_exec_timeline = true;
- /* TODO: fetch max_context_priority */
+ /* max_context_priority will be updated in
+ * anv_xe_physical_device_max_priority_update()
+ */
device->max_context_priority = VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR;
return VK_SUCCESS;
}
+/* TODO: include gpu_scheduler.h and spsc_queue.h and replace hard-coded values */
+uint64_t
+anv_vk_priority_to_xe(VkQueueGlobalPriorityKHR vk_priority)
+{
+ switch (vk_priority) {
+ case VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR:
+ return 0;
+ case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR:
+ return 1;
+ case VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR:
+ return 2;
+ default:
+ unreachable("Invalid priority");
+ return 0;
+ }
+}
+
+void
+anv_xe_physical_device_max_priority_update(struct anv_physical_device *device)
+{
+ if (!device->engine_info->num_engines)
+ return;
+
+ struct drm_xe_vm_create create_vm = {};
+ if (intel_ioctl(device->local_fd, DRM_IOCTL_XE_VM_CREATE, &create_vm))
+ return;
+
+ const VkQueueGlobalPriorityKHR priorities[] = {
+ VK_QUEUE_GLOBAL_PRIORITY_LOW_KHR,
+ VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR,
+ VK_QUEUE_GLOBAL_PRIORITY_HIGH_KHR,
+ };
+ struct drm_xe_engine_destroy destroy_engine;
+ struct drm_xe_vm_destroy destroy_vm = {
+ .vm_id = create_vm.vm_id,
+ };
+ struct drm_xe_engine_create create_engine = {
+ .instances = (uintptr_t)device->engine_info->engines,
+ .width = 1,
+ .num_placements = 1,
+ .vm_id = create_vm.vm_id,
+ };
+ if (intel_ioctl(device->local_fd, DRM_IOCTL_XE_ENGINE_CREATE,
+ &create_engine))
+ goto destroy_vm;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(priorities); i++) {
+ struct drm_xe_engine_set_property engine_property = {
+ .engine_id = create_engine.engine_id,
+ .property = XE_ENGINE_SET_PROPERTY_PRIORITY,
+ engine_property.value = anv_vk_priority_to_xe(priorities[i]),
+ };
+ if (intel_ioctl(device->local_fd, DRM_IOCTL_XE_ENGINE_SET_PROPERTY,
+ &engine_property))
+ break;
+ device->max_context_priority = priorities[i];
+ }
+
+ destroy_engine.engine_id = create_engine.engine_id;
+ intel_ioctl(device->local_fd, DRM_IOCTL_XE_ENGINE_DESTROY, &destroy_engine);
+destroy_vm:
+ intel_ioctl(device->local_fd, DRM_IOCTL_XE_VM_DESTROY, &destroy_vm);
+}
+
VkResult
anv_xe_device_check_status(struct vk_device *vk_device)
{
VkResult
anv_xe_physical_device_get_parameters(struct anv_physical_device *device);
+void
+anv_xe_physical_device_max_priority_update(struct anv_physical_device *device);
+uint64_t anv_vk_priority_to_xe(VkQueueGlobalPriorityKHR vk_priority);
#include "common/xe/intel_engine.h"
#include "common/intel_gem.h"
+#include "xe/anv_device.h"
+
#include "drm-uapi/xe_drm.h"
VkResult
}
assert(device->vm_id != 0);
- /* TODO: drm_xe_engine_set_property XE_ENGINE_PROPERTY_PRIORITY */
struct drm_xe_engine_create create = {
/* Allows KMD to pick one of those engines for the submission queue */
.instances = (uintptr_t)instances,
return vk_errorf(device, VK_ERROR_UNKNOWN, "Unable to create engine");
queue->engine_id = create.engine_id;
+
+ const VkDeviceQueueGlobalPriorityCreateInfoKHR *queue_priority =
+ vk_find_struct_const(pCreateInfo->pNext,
+ DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_KHR);
+ const VkQueueGlobalPriorityKHR priority = queue_priority ?
+ queue_priority->globalPriority :
+ VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR;
+
+ /* As per spec, the driver implementation may deny requests to acquire
+ * a priority above the default priority (MEDIUM) if the caller does not
+ * have sufficient privileges. In this scenario VK_ERROR_NOT_PERMITTED_KHR
+ * is returned.
+ */
+ if (physical->max_context_priority >= VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR) {
+ if (priority > physical->max_context_priority)
+ goto priority_error;
+
+ struct drm_xe_engine_set_property engine_property = {
+ .engine_id = create.engine_id,
+ .property = XE_ENGINE_SET_PROPERTY_PRIORITY,
+ .value = anv_vk_priority_to_xe(priority),
+ };
+ ret = intel_ioctl(device->fd, DRM_IOCTL_XE_ENGINE_SET_PROPERTY,
+ &engine_property);
+ if (ret && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_KHR)
+ goto priority_error;
+ }
+
return VK_SUCCESS;
+
+priority_error:
+ anv_xe_destroy_engine(device, queue);
+ return vk_error(device, VK_ERROR_NOT_PERMITTED_KHR);
}
void