struct loader_phys_dev_per_icd {
uint32_t count;
VkPhysicalDevice *phys_devs;
+ struct loader_icd *this_icd;
};
enum loader_debug {
debug_report_add_instance_extensions(inst, inst_exts);
}
-struct loader_physical_device *
-loader_get_physical_device(const VkPhysicalDevice physdev) {
- uint32_t i;
- for (struct loader_instance *inst = loader.instances; inst;
- inst = inst->next) {
- for (i = 0; i < inst->total_gpu_count; i++) {
- // TODO this aliases physDevices within instances, need for this
- // function to go away
- if (inst->phys_devs[i].disp ==
- loader_get_instance_dispatch(physdev)) {
- return &inst->phys_devs[i];
- }
- }
- }
- return NULL;
-}
-
struct loader_icd *loader_get_icd_and_device(const VkDevice device,
struct loader_device **found_dev) {
*found_dev = NULL;
return err;
}
-VkResult loader_create_device_chain(VkPhysicalDevice physicalDevice,
+VkResult loader_create_device_chain(const struct loader_physical_device *pd,
const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
- struct loader_instance *inst,
+ const struct loader_instance *inst,
struct loader_icd *icd,
struct loader_device *dev) {
uint32_t activated_layers = 0;
PFN_vkCreateDevice fpCreateDevice =
(PFN_vkCreateDevice)nextGIPA((VkInstance)inst, "vkCreateDevice");
if (fpCreateDevice) {
- res = fpCreateDevice(physicalDevice, &loader_create_info, pAllocator,
+ res = fpCreateDevice(pd->phys_dev, &loader_create_info, pAllocator,
&dev->device);
} else {
// Couldn't find CreateDevice function!
VkResult loader_validate_device_extensions(
struct loader_physical_device *phys_dev,
const struct loader_layer_list *activated_device_layers,
+ const struct loader_extension_list *icd_exts,
const VkDeviceCreateInfo *pCreateInfo) {
VkExtensionProperties *extension_prop;
struct loader_layer_properties *layer_prop;
VkStringErrorFlags result = vk_string_validate(
MaxLoaderStringLength, pCreateInfo->ppEnabledExtensionNames[i]);
if (result != VK_STRING_ERROR_NONE) {
- loader_log(phys_dev->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- 0, "Loader: Device ppEnabledExtensionNames contains "
- "string that is too long or is badly formed");
+ loader_log(phys_dev->this_icd->this_instance,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
+ "Loader: Device ppEnabledExtensionNames contains "
+ "string that is too long or is badly formed");
return VK_ERROR_EXTENSION_NOT_PRESENT;
}
const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
- extension_prop = get_extension_property(
- extension_name, &phys_dev->device_extension_cache);
+ extension_prop = get_extension_property(extension_name, icd_exts);
if (extension_prop) {
continue;
return VK_SUCCESS;
}
-VkResult
-loader_init_physical_device_info(struct loader_instance *ptr_instance) {
- struct loader_icd *icd;
- uint32_t i, j, idx, count = 0;
- VkResult res;
- struct loader_phys_dev_per_icd *phys_devs;
-
- ptr_instance->total_gpu_count = 0;
- phys_devs = (struct loader_phys_dev_per_icd *)loader_stack_alloc(
- sizeof(struct loader_phys_dev_per_icd) * ptr_instance->total_icd_count);
- if (!phys_devs)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
-
- icd = ptr_instance->icds;
- for (i = 0; i < ptr_instance->total_icd_count; i++) {
- assert(icd);
- res = icd->EnumeratePhysicalDevices(icd->instance, &phys_devs[i].count,
- NULL);
- if (res != VK_SUCCESS)
- return res;
- count += phys_devs[i].count;
- icd = icd->next;
- }
-
- ptr_instance->phys_devs =
- (struct loader_physical_device *)loader_heap_alloc(
- ptr_instance, count * sizeof(struct loader_physical_device),
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
- if (!ptr_instance->phys_devs)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
-
- icd = ptr_instance->icds;
-
- struct loader_physical_device *inst_phys_devs = ptr_instance->phys_devs;
- idx = 0;
- for (i = 0; i < ptr_instance->total_icd_count; i++) {
- assert(icd);
-
- phys_devs[i].phys_devs = (VkPhysicalDevice *)loader_stack_alloc(
- phys_devs[i].count * sizeof(VkPhysicalDevice));
- if (!phys_devs[i].phys_devs) {
- loader_heap_free(ptr_instance, ptr_instance->phys_devs);
- ptr_instance->phys_devs = NULL;
- return VK_ERROR_OUT_OF_HOST_MEMORY;
- }
- res = icd->EnumeratePhysicalDevices(
- icd->instance, &(phys_devs[i].count), phys_devs[i].phys_devs);
- if ((res == VK_SUCCESS)) {
- ptr_instance->total_gpu_count += phys_devs[i].count;
- for (j = 0; j < phys_devs[i].count; j++) {
-
- // initialize the loader's physicalDevice object
- loader_set_dispatch((void *)&inst_phys_devs[idx],
- ptr_instance->disp);
- inst_phys_devs[idx].this_instance = ptr_instance;
- inst_phys_devs[idx].this_icd = icd;
- inst_phys_devs[idx].phys_dev = phys_devs[i].phys_devs[j];
- memset(&inst_phys_devs[idx].device_extension_cache, 0,
- sizeof(struct loader_extension_list));
-
- idx++;
- }
- } else {
- loader_heap_free(ptr_instance, ptr_instance->phys_devs);
- ptr_instance->phys_devs = NULL;
- return res;
- }
-
- icd = icd->next;
- }
-
- return VK_SUCCESS;
-}
-
/**
* Terminator functions for the Instance chain
* All named terminator_<Vulakn API name>
loader_scanned_icd_clear(ptr_instance, &ptr_instance->icd_libs);
loader_destroy_generic_list(
ptr_instance, (struct loader_generic_list *)&ptr_instance->ext_list);
- for (uint32_t i = 0; i < ptr_instance->total_gpu_count; i++)
- loader_destroy_generic_list(
- ptr_instance,
- (struct loader_generic_list *)&ptr_instance->phys_devs[i]
- .device_extension_cache);
- loader_heap_free(ptr_instance, ptr_instance->phys_devs);
+ if (ptr_instance->phys_devs_term)
+ loader_heap_free(ptr_instance, ptr_instance->phys_devs_term);
loader_free_dev_ext_table(ptr_instance);
}
const VkAllocationCallbacks *pAllocator,
VkDevice *pDevice) {
struct loader_physical_device *phys_dev;
- phys_dev = loader_get_physical_device(physicalDevice);
+ phys_dev = (struct loader_physical_device *)physicalDevice;
VkLayerDeviceCreateInfo *chain_info =
(VkLayerDeviceCreateInfo *)pCreateInfo->pNext;
localCreateInfo.ppEnabledExtensionNames =
(const char *const *)filtered_extension_names;
+ /* Get the physical device (ICD) extensions */
+ struct loader_extension_list icd_exts;
+ VkResult res;
+ if (!loader_init_generic_list(phys_dev->this_icd->this_instance,
+ (struct loader_generic_list *)&icd_exts,
+ sizeof(VkExtensionProperties))) {
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ res = loader_add_device_extensions(
+ phys_dev->this_icd->this_instance, phys_dev->this_icd,
+ phys_dev->phys_dev, phys_dev->this_icd->this_icd_lib->lib_name,
+ &icd_exts);
+ if (res != VK_SUCCESS) {
+ return res;
+ }
+
for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
const char *extension_name = pCreateInfo->ppEnabledExtensionNames[i];
- VkExtensionProperties *prop = get_extension_property(
- extension_name, &phys_dev->device_extension_cache);
+ VkExtensionProperties *prop =
+ get_extension_property(extension_name, &icd_exts);
if (prop) {
filtered_extension_names[localCreateInfo.enabledExtensionCount] =
(char *)extension_name;
// this_icd->CreateDevice?
// VkResult res = fpCreateDevice(phys_dev->phys_dev, &localCreateInfo,
// pAllocator, &localDevice);
- VkResult res = phys_dev->this_icd->CreateDevice(
- phys_dev->phys_dev, &localCreateInfo, pAllocator, &localDevice);
+ res = phys_dev->this_icd->CreateDevice(phys_dev->phys_dev, &localCreateInfo,
+ pAllocator, &localDevice);
if (res != VK_SUCCESS) {
return res;
uint32_t *pPhysicalDeviceCount,
VkPhysicalDevice *pPhysicalDevices) {
uint32_t i;
- uint32_t copy_count = 0;
- struct loader_instance *ptr_instance = (struct loader_instance *)instance;
+ struct loader_instance *inst = (struct loader_instance *)instance;
VkResult res = VK_SUCCESS;
- if (ptr_instance->total_gpu_count == 0) {
- res = loader_init_physical_device_info(ptr_instance);
+ struct loader_icd *icd;
+ struct loader_phys_dev_per_icd *phys_devs;
+
+ inst->total_gpu_count = 0;
+ phys_devs = (struct loader_phys_dev_per_icd *)loader_stack_alloc(
+ sizeof(struct loader_phys_dev_per_icd) * inst->total_icd_count);
+ if (!phys_devs)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ icd = inst->icds;
+ for (i = 0; i < inst->total_icd_count; i++) {
+ assert(icd);
+ res = icd->EnumeratePhysicalDevices(icd->instance, &phys_devs[i].count,
+ NULL);
+ if (res != VK_SUCCESS)
+ return res;
+ icd = icd->next;
+ }
+
+ icd = inst->icds;
+ for (i = 0; i < inst->total_icd_count; i++) {
+ assert(icd);
+ phys_devs[i].phys_devs = (VkPhysicalDevice *)loader_stack_alloc(
+ phys_devs[i].count * sizeof(VkPhysicalDevice));
+ if (!phys_devs[i].phys_devs) {
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+ res = icd->EnumeratePhysicalDevices(
+ icd->instance, &(phys_devs[i].count), phys_devs[i].phys_devs);
+ if ((res == VK_SUCCESS)) {
+ inst->total_gpu_count += phys_devs[i].count;
+ } else {
+ return res;
+ }
+ phys_devs[i].this_icd = icd;
+ icd = icd->next;
}
- *pPhysicalDeviceCount = ptr_instance->total_gpu_count;
+ *pPhysicalDeviceCount = inst->total_gpu_count;
if (!pPhysicalDevices) {
return res;
}
- copy_count = (ptr_instance->total_gpu_count < *pPhysicalDeviceCount)
- ? ptr_instance->total_gpu_count
+ /* Initialize the output pPhysicalDevices with wrapped loader terminator
+ * physicalDevice objects; save this list of wrapped objects in instance
+ * struct for later cleanup and use by trampoline code */
+ uint32_t j, idx = 0;
+ uint32_t copy_count = 0;
+
+ copy_count = (inst->total_gpu_count < *pPhysicalDeviceCount)
+ ? inst->total_gpu_count
: *pPhysicalDeviceCount;
- for (i = 0; i < copy_count; i++) {
- pPhysicalDevices[i] = (VkPhysicalDevice)&ptr_instance->phys_devs[i];
+
+ // phys_devs_term is used to pass the "this_icd" info to trampoline code
+ if (inst->phys_devs_term)
+ loader_heap_free(inst, inst->phys_devs_term);
+ inst->phys_devs_term = loader_heap_alloc(
+ inst, sizeof(struct loader_physical_device) * copy_count,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ if (!inst->phys_devs_term)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ for (i = 0; idx < copy_count && i < inst->total_icd_count; i++) {
+ for (j = 0; j < phys_devs[i].count && idx < copy_count; j++) {
+ loader_set_dispatch((void *)&inst->phys_devs_term[idx], inst->disp);
+ inst->phys_devs_term[idx].this_icd = phys_devs[i].this_icd;
+ inst->phys_devs_term[idx].phys_dev = phys_devs[i].phys_devs[j];
+ pPhysicalDevices[idx] =
+ (VkPhysicalDevice)&inst->phys_devs_term[idx];
+ idx++;
+ }
}
*pPhysicalDeviceCount = copy_count;
- if (copy_count < ptr_instance->total_gpu_count) {
+ if (copy_count < inst->total_gpu_count) {
+ inst->total_gpu_count = copy_count;
return VK_INCOMPLETE;
}
-
return res;
}
/* Any layer or trampoline wrapping should be removed at this point in time
* can just cast to the expected type for VkPhysicalDevice. */
- phys_dev = (struct loader_physical_device *) physicalDevice;
+ phys_dev = (struct loader_physical_device *)physicalDevice;
/* this case is during the call down the instance chain with pLayerName
* == NULL*/
VkResult res;
/* get device extensions */
- res = icd->EnumerateDeviceExtensionProperties(
- phys_dev->phys_dev, NULL, &icd_ext_count, pProperties);
+ res = icd->EnumerateDeviceExtensionProperties(phys_dev->phys_dev, NULL,
+ &icd_ext_count, pProperties);
if (res != VK_SUCCESS)
return res;
- loader_init_layer_list(phys_dev->this_instance, &implicit_layer_list);
+ loader_init_layer_list(icd->this_instance, &implicit_layer_list);
loader_add_layer_implicit(
- phys_dev->this_instance, VK_LAYER_TYPE_INSTANCE_IMPLICIT,
- &implicit_layer_list,
- &phys_dev->this_instance->instance_layer_list);
+ icd->this_instance, VK_LAYER_TYPE_INSTANCE_IMPLICIT,
+ &implicit_layer_list, &icd->this_instance->instance_layer_list);
/* we need to determine which implicit layers are active,
* and then add their extensions. This can't be cached as
* it depends on results of environment variables (which can change).
*/
if (pProperties != NULL) {
+ struct loader_extension_list icd_exts;
/* initialize dev_extension list within the physicalDevice object */
- res = loader_init_device_extensions(
- phys_dev->this_instance, phys_dev, icd_ext_count, pProperties,
- &phys_dev->device_extension_cache);
+ res = loader_init_device_extensions(icd->this_instance, phys_dev,
+ icd_ext_count, pProperties,
+ &icd_exts);
if (res != VK_SUCCESS)
return res;
* change).
*/
struct loader_extension_list all_exts = {0};
- loader_add_to_ext_list(phys_dev->this_instance, &all_exts,
- phys_dev->device_extension_cache.count,
- phys_dev->device_extension_cache.list);
+ loader_add_to_ext_list(icd->this_instance, &all_exts, icd_exts.count,
+ icd_exts.list);
- loader_init_layer_list(phys_dev->this_instance,
- &implicit_layer_list);
+ loader_init_layer_list(icd->this_instance, &implicit_layer_list);
loader_add_layer_implicit(
- phys_dev->this_instance, VK_LAYER_TYPE_INSTANCE_IMPLICIT,
- &implicit_layer_list,
- &phys_dev->this_instance->instance_layer_list);
+ icd->this_instance, VK_LAYER_TYPE_INSTANCE_IMPLICIT,
+ &implicit_layer_list, &icd->this_instance->instance_layer_list);
for (uint32_t i = 0; i < implicit_layer_list.count; i++) {
for (uint32_t j = 0;
- j < implicit_layer_list.list[i].device_extension_list.count;
- j++) {
- loader_add_to_ext_list(phys_dev->this_instance, &all_exts,
- 1,
- &implicit_layer_list.list[i]
- .device_extension_list.list[j]
- .props);
+ j < implicit_layer_list.list[i].device_extension_list.count;
+ j++) {
+ loader_add_to_ext_list(icd->this_instance, &all_exts, 1,
+ &implicit_layer_list.list[i]
+ .device_extension_list.list[j]
+ .props);
}
}
uint32_t capacity = *pPropertyCount;
} else {
*pPropertyCount = all_exts.count;
}
- loader_destroy_generic_list(
- phys_dev->this_instance,
- (struct loader_generic_list *) &all_exts);
+ loader_destroy_generic_list(icd->this_instance,
+ (struct loader_generic_list *)&all_exts);
} else {
/* just return the count; need to add in the count of implicit layer
* extensions
for (uint32_t i = 0; i < implicit_layer_list.count; i++) {
*pPropertyCount +=
- implicit_layer_list.list[i].device_extension_list.count;
+ implicit_layer_list.list[i].device_extension_list.count;
}
res = VK_SUCCESS;
}
loader_destroy_generic_list(
- phys_dev->this_instance,
- (struct loader_generic_list *) &implicit_layer_list);
+ icd->this_instance, (struct loader_generic_list *)&implicit_layer_list);
return res;
}
struct loader_instance {
VkLayerInstanceDispatchTable *disp; // must be first entry in structure
- uint32_t total_gpu_count;
- struct loader_physical_device *phys_devs;
+ uint32_t total_gpu_count; // count of the next two arrays
+ struct loader_physical_device *phys_devs_term;
+ struct loader_physical_device *phys_devs; // tramp wrapped physDev obj list
uint32_t total_icd_count;
struct loader_icd *icds;
struct loader_instance *next;
#endif
};
-/* per enumerated PhysicalDevice structure */
+/* VkPhysicalDevice requires special treatment by loader. Firstly, terminator
+ * code must be able to get the struct loader_icd to call into the proper
+ * driver (multiple ICD/gpu case). This can be accomplished by wrapping the
+ * created VkPhysicalDevice in loader terminate_EnumeratePhysicalDevices().
+ * Secondly, loader must be able to find the instance and icd in trampoline
+ * code.
+ * Thirdly, the loader must be able to handle wrapped by layer VkPhysicalDevice
+ * in trampoline code. This implies, that the loader trampoline code must also
+ * wrap the VkPhysicalDevice object in trampoline code. Thus, loader has to
+ * wrap the VkPhysicalDevice created object twice. In trampoline code it can't
+ * rely on the terminator object wrapping since a layer may also wrap. Since
+ * trampoline code wraps the VkPhysicalDevice this means all loader trampoline
+ * code that passes a VkPhysicalDevice should unwrap it. */
+
+/* per enumerated PhysicalDevice structure, used to wrap in trampoline code and
+ also same structure used to wrap in terminator code */
struct loader_physical_device {
VkLayerInstanceDispatchTable *disp; // must be first entry in structure
- struct loader_instance *this_instance;
struct loader_icd *this_icd;
- VkPhysicalDevice phys_dev; // object from ICD
- /*
- * Fill in the cache of available device extensions from
- * this physical device. This cache can be used during CreateDevice
- */
- struct loader_extension_list device_extension_cache;
+ VkPhysicalDevice phys_dev; // object from ICD/layers/loader terminator
};
struct loader_struct {
return (struct loader_instance *)instance;
}
+static inline VkPhysicalDevice
+loader_unwrap_physical_device(VkPhysicalDevice physicalDevice) {
+ struct loader_physical_device *phys_dev =
+ (struct loader_physical_device *)physicalDevice;
+ return phys_dev->phys_dev;
+}
+
static inline void loader_set_dispatch(void *obj, const void *data) {
*((const void **)obj) = data;
}
void loader_get_icd_loader_instance_extensions(
const struct loader_instance *inst, struct loader_icd_libs *icd_libs,
struct loader_extension_list *inst_exts);
-struct loader_physical_device *
-loader_get_physical_device(const VkPhysicalDevice physdev);
struct loader_icd *loader_get_icd_and_device(const VkDevice device,
struct loader_device **found_dev);
void loader_init_dispatch_dev_ext(struct loader_instance *inst,
const VkDeviceCreateInfo *pCreateInfo,
const struct loader_layer_list *device_layers);
-VkResult loader_create_device_chain(VkPhysicalDevice physicalDevice,
+VkResult loader_create_device_chain(const struct loader_physical_device *pd,
const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
- struct loader_instance *inst,
+ const struct loader_instance *inst,
struct loader_icd *icd,
struct loader_device *dev);
VkResult loader_validate_device_extensions(
struct loader_physical_device *phys_dev,
const struct loader_layer_list *activated_device_layers,
+ const struct loader_extension_list *icd_exts,
const VkDeviceCreateInfo *pCreateInfo);
/* instance layer chain termination entrypoint definitions */
disp->DestroyInstance(instance, pAllocator);
loader_deactivate_instance_layers(ptr_instance);
+ if (ptr_instance->phys_devs)
+ loader_heap_free(ptr_instance, ptr_instance->phys_devs);
loader_heap_free(ptr_instance, ptr_instance->disp);
loader_heap_free(ptr_instance, ptr_instance);
loader_platform_thread_unlock_mutex(&loader_lock);
VkPhysicalDevice *pPhysicalDevices) {
const VkLayerInstanceDispatchTable *disp;
VkResult res;
+ uint32_t count, i;
+ struct loader_instance *inst;
disp = loader_get_instance_dispatch(instance);
loader_platform_thread_lock_mutex(&loader_lock);
res = disp->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount,
pPhysicalDevices);
+
+ if (res != VK_SUCCESS && res != VK_INCOMPLETE) {
+ loader_platform_thread_unlock_mutex(&loader_lock);
+ return res;
+ }
+
+ if (!pPhysicalDevices) {
+ loader_platform_thread_unlock_mutex(&loader_lock);
+ return res;
+ }
+
+ // wrap the PhysDev object for loader usage, return wrapped objects
+ inst = loader_get_instance(instance);
+ if (!inst) {
+ loader_platform_thread_unlock_mutex(&loader_lock);
+ return VK_ERROR_INITIALIZATION_FAILED;
+ }
+ if (inst->phys_devs)
+ loader_heap_free(inst, inst->phys_devs);
+ count = inst->total_gpu_count;
+ inst->phys_devs = (struct loader_physical_device *)loader_heap_alloc(
+ inst, count * sizeof(struct loader_physical_device),
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ if (!inst->phys_devs) {
+ loader_platform_thread_unlock_mutex(&loader_lock);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ for (i = 0; i < count; i++) {
+
+ // initialize the loader's physicalDevice object
+ loader_set_dispatch((void *)&inst->phys_devs[i], inst->disp);
+ inst->phys_devs[i].this_icd = inst->phys_devs_term[i].this_icd;
+ inst->phys_devs[i].phys_dev = pPhysicalDevices[i];
+
+ // copy wrapped object into Application provided array
+ pPhysicalDevices[i] = (VkPhysicalDevice)&inst->phys_devs[i];
+ }
loader_platform_thread_unlock_mutex(&loader_lock);
return res;
}
LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkGetPhysicalDeviceFeatures(VkPhysicalDevice gpu,
+vkGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice,
VkPhysicalDeviceFeatures *pFeatures) {
const VkLayerInstanceDispatchTable *disp;
-
- disp = loader_get_instance_dispatch(gpu);
- disp->GetPhysicalDeviceFeatures(gpu, pFeatures);
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
+ disp = loader_get_instance_dispatch(physicalDevice);
+ disp->GetPhysicalDeviceFeatures(unwrapped_phys_dev, pFeatures);
}
LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkGetPhysicalDeviceFormatProperties(VkPhysicalDevice gpu, VkFormat format,
+vkGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice,
+ VkFormat format,
VkFormatProperties *pFormatInfo) {
const VkLayerInstanceDispatchTable *disp;
-
- disp = loader_get_instance_dispatch(gpu);
- disp->GetPhysicalDeviceFormatProperties(gpu, format, pFormatInfo);
+ VkPhysicalDevice unwrapped_pd =
+ loader_unwrap_physical_device(physicalDevice);
+ disp = loader_get_instance_dispatch(physicalDevice);
+ disp->GetPhysicalDeviceFormatProperties(unwrapped_pd, format, pFormatInfo);
}
LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags,
VkImageFormatProperties *pImageFormatProperties) {
const VkLayerInstanceDispatchTable *disp;
-
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
disp = loader_get_instance_dispatch(physicalDevice);
return disp->GetPhysicalDeviceImageFormatProperties(
- physicalDevice, format, type, tiling, usage, flags,
+ unwrapped_phys_dev, format, type, tiling, usage, flags,
pImageFormatProperties);
}
LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL
-vkGetPhysicalDeviceProperties(VkPhysicalDevice gpu,
+vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties *pProperties) {
const VkLayerInstanceDispatchTable *disp;
-
- disp = loader_get_instance_dispatch(gpu);
- disp->GetPhysicalDeviceProperties(gpu, pProperties);
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
+ disp = loader_get_instance_dispatch(physicalDevice);
+ disp->GetPhysicalDeviceProperties(unwrapped_phys_dev, pProperties);
}
LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL
vkGetPhysicalDeviceQueueFamilyProperties(
- VkPhysicalDevice gpu, uint32_t *pQueueFamilyPropertyCount,
+ VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount,
VkQueueFamilyProperties *pQueueProperties) {
const VkLayerInstanceDispatchTable *disp;
-
- disp = loader_get_instance_dispatch(gpu);
- disp->GetPhysicalDeviceQueueFamilyProperties(gpu, pQueueFamilyPropertyCount,
- pQueueProperties);
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
+ disp = loader_get_instance_dispatch(physicalDevice);
+ disp->GetPhysicalDeviceQueueFamilyProperties(
+ unwrapped_phys_dev, pQueueFamilyPropertyCount, pQueueProperties);
}
LOADER_EXPORT VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties(
- VkPhysicalDevice gpu, VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties *pMemoryProperties) {
const VkLayerInstanceDispatchTable *disp;
-
- disp = loader_get_instance_dispatch(gpu);
- disp->GetPhysicalDeviceMemoryProperties(gpu, pMemoryProperties);
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
+ disp = loader_get_instance_dispatch(physicalDevice);
+ disp->GetPhysicalDeviceMemoryProperties(unwrapped_phys_dev,
+ pMemoryProperties);
}
LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
loader_platform_thread_lock_mutex(&loader_lock);
- // TODO this only works for one physical device per instance
- // once CreateDevice layer bootstrapping is done via DeviceCreateInfo
- // hopefully don't need this anymore in trampoline code
- phys_dev = loader_get_physical_device(physicalDevice);
+ phys_dev = (struct loader_physical_device *)physicalDevice;
icd = phys_dev->this_icd;
if (!icd) {
loader_platform_thread_unlock_mutex(&loader_lock);
return VK_ERROR_INITIALIZATION_FAILED;
}
- inst = phys_dev->this_instance;
+ inst = (struct loader_instance *)phys_dev->this_icd->this_instance;
if (!icd->CreateDevice) {
loader_platform_thread_unlock_mutex(&loader_lock);
}
}
- /* Get the physical device extensions if they haven't been retrieved yet */
- if (phys_dev->device_extension_cache.capacity == 0) {
- if (!loader_init_generic_list(
- inst,
- (struct loader_generic_list *)&phys_dev->device_extension_cache,
- sizeof(VkExtensionProperties))) {
- loader_platform_thread_unlock_mutex(&loader_lock);
- return VK_ERROR_OUT_OF_HOST_MEMORY;
- }
+ /* Get the physical device (ICD) extensions */
+ struct loader_extension_list icd_exts;
+ if (!loader_init_generic_list(inst, (struct loader_generic_list *)&icd_exts,
+ sizeof(VkExtensionProperties))) {
+ loader_platform_thread_unlock_mutex(&loader_lock);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
- res = loader_add_device_extensions(
- inst, icd, phys_dev->phys_dev,
- phys_dev->this_icd->this_icd_lib->lib_name,
- &phys_dev->device_extension_cache);
- if (res != VK_SUCCESS) {
- loader_platform_thread_unlock_mutex(&loader_lock);
- return res;
- }
+ res = loader_add_device_extensions(
+ inst, icd, phys_dev->phys_dev,
+ phys_dev->this_icd->this_icd_lib->lib_name, &icd_exts);
+ if (res != VK_SUCCESS) {
+ loader_platform_thread_unlock_mutex(&loader_lock);
+ return res;
}
/* convert any meta layers to the actual layers makes a copy of layer name*/
/* make sure requested extensions to be enabled are supported */
res = loader_validate_device_extensions(phys_dev, &activated_layer_list,
- pCreateInfo);
+ &icd_exts, pCreateInfo);
if (res != VK_SUCCESS) {
loader_unexpand_dev_layer_names(inst, saved_layer_count,
saved_layer_names, saved_layer_ptr,
return res;
}
- res = loader_create_device_chain(physicalDevice, pCreateInfo, pAllocator,
- inst, icd, dev);
+ res = loader_create_device_chain(phys_dev, pCreateInfo, pAllocator, inst,
+ icd, dev);
if (res != VK_SUCCESS) {
loader_unexpand_dev_layer_names(inst, saved_layer_count,
saved_layer_names, saved_layer_ptr,
uint32_t *pPropertyCount,
VkExtensionProperties *pProperties) {
VkResult res = VK_SUCCESS;
+ struct loader_physical_device *phys_dev;
+ phys_dev = (struct loader_physical_device *)physicalDevice;
loader_platform_thread_lock_mutex(&loader_lock);
disp = loader_get_instance_dispatch(physicalDevice);
res = disp->EnumerateDeviceExtensionProperties(
- physicalDevice, NULL, pPropertyCount, pProperties);
+ phys_dev->phys_dev, NULL, pPropertyCount, pProperties);
} else {
- struct loader_physical_device *phys_dev;
+
uint32_t count;
uint32_t copy_size;
- // TODO fix this aliases physical devices
- phys_dev = loader_get_physical_device(physicalDevice);
+ const struct loader_instance *inst = phys_dev->this_icd->this_instance;
if (vk_string_validate(MaxLoaderStringLength, pLayerName) ==
- VK_STRING_ERROR_NONE) {
+ VK_STRING_ERROR_NONE) {
struct loader_device_extension_list *dev_ext_list = NULL;
- for (uint32_t i = 0;
- i < phys_dev->this_instance->device_layer_list.count; i++) {
+ for (uint32_t i = 0; i < inst->device_layer_list.count; i++) {
struct loader_layer_properties *props =
- &phys_dev->this_instance->device_layer_list.list[i];
+ &inst->device_layer_list.list[i];
if (strcmp(props->info.layerName, pLayerName) == 0) {
dev_ext_list = &props->device_extension_list;
}
copy_size = *pPropertyCount < count ? *pPropertyCount : count;
for (uint32_t i = 0; i < copy_size; i++) {
memcpy(&pProperties[i], &dev_ext_list->list[i].props,
- sizeof (VkExtensionProperties));
+ sizeof(VkExtensionProperties));
}
*pPropertyCount = copy_size;
return VK_INCOMPLETE;
}
} else {
- loader_log(phys_dev->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT,
- 0, "vkEnumerateDeviceExtensionProperties: pLayerName "
- "is too long or is badly formed");
- loader_platform_thread_unlock_mutex(&loader_lock);
+ loader_log(inst, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
+ "vkEnumerateDeviceExtensionProperties: pLayerName "
+ "is too long or is badly formed");
+ loader_platform_thread_unlock_mutex(&loader_lock);
return VK_ERROR_EXTENSION_NOT_PRESENT;
}
}
/* Don't dispatch this call down the instance chain, want all device layers
enumerated and instance chain may not contain all device layers */
- // TODO fix this, aliases physical devices
- phys_dev = loader_get_physical_device(physicalDevice);
- uint32_t count = phys_dev->this_instance->device_layer_list.count;
+ phys_dev = (struct loader_physical_device *)physicalDevice;
+ const struct loader_instance *inst = phys_dev->this_icd->this_instance;
+ uint32_t count = inst->device_layer_list.count;
if (pProperties == NULL) {
*pPropertyCount = count;
copy_size = (*pPropertyCount < count) ? *pPropertyCount : count;
for (uint32_t i = 0; i < copy_size; i++) {
- memcpy(&pProperties[i],
- &(phys_dev->this_instance->device_layer_list.list[i].info),
+ memcpy(&pProperties[i], &(inst->device_layer_list.list[i].info),
sizeof(VkLayerProperties));
}
*pPropertyCount = copy_size;
VkImageTiling tiling, uint32_t *pPropertyCount,
VkSparseImageFormatProperties *pProperties) {
const VkLayerInstanceDispatchTable *disp;
-
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
disp = loader_get_instance_dispatch(physicalDevice);
disp->GetPhysicalDeviceSparseImageFormatProperties(
- physicalDevice, format, type, samples, usage, tiling, pPropertyCount,
- pProperties);
+ unwrapped_phys_dev, format, type, samples, usage, tiling,
+ pPropertyCount, pProperties);
}
LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
VkSurfaceKHR surface,
VkBool32 *pSupported) {
const VkLayerInstanceDispatchTable *disp;
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
disp = loader_get_instance_dispatch(physicalDevice);
VkResult res = disp->GetPhysicalDeviceSurfaceSupportKHR(
- physicalDevice, queueFamilyIndex, surface, pSupported);
+ unwrapped_phys_dev, queueFamilyIndex, surface, pSupported);
return res;
}
vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
+
const VkLayerInstanceDispatchTable *disp;
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
disp = loader_get_instance_dispatch(physicalDevice);
VkResult res = disp->GetPhysicalDeviceSurfaceCapabilitiesKHR(
- physicalDevice, surface, pSurfaceCapabilities);
+ unwrapped_phys_dev, surface, pSurfaceCapabilities);
return res;
}
VkSurfaceKHR surface,
uint32_t *pSurfaceFormatCount,
VkSurfaceFormatKHR *pSurfaceFormats) {
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
const VkLayerInstanceDispatchTable *disp;
disp = loader_get_instance_dispatch(physicalDevice);
VkResult res = disp->GetPhysicalDeviceSurfaceFormatsKHR(
- physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
+ unwrapped_phys_dev, surface, pSurfaceFormatCount, pSurfaceFormats);
return res;
}
VkSurfaceKHR surface,
uint32_t *pPresentModeCount,
VkPresentModeKHR *pPresentModes) {
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
const VkLayerInstanceDispatchTable *disp;
disp = loader_get_instance_dispatch(physicalDevice);
VkResult res = disp->GetPhysicalDeviceSurfacePresentModesKHR(
- physicalDevice, surface, pPresentModeCount, pPresentModes);
+ unwrapped_phys_dev, surface, pPresentModeCount, pPresentModes);
return res;
}
LOADER_EXPORT VKAPI_ATTR VkBool32 VKAPI_CALL
vkGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
const VkLayerInstanceDispatchTable *disp;
disp = loader_get_instance_dispatch(physicalDevice);
VkBool32 res = disp->GetPhysicalDeviceWin32PresentationSupportKHR(
- physicalDevice, queueFamilyIndex);
+ unwrapped_phys_dev, queueFamilyIndex);
return res;
}
vkGetPhysicalDeviceMirPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
MirConnection *connection) {
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
const VkLayerInstanceDispatchTable *disp;
disp = loader_get_instance_dispatch(physicalDevice);
VkBool32 res = disp->GetPhysicalDeviceMirPresentationSupportKHR(
- physicalDevice, queueFamilyIndex, connection);
+ unwrapped_phys_dev, queueFamilyIndex, connection);
return res;
}
vkGetPhysicalDeviceWaylandPresentationSupportKHR(
VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
struct wl_display *display) {
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
const VkLayerInstanceDispatchTable *disp;
disp = loader_get_instance_dispatch(physicalDevice);
VkBool32 res = disp->GetPhysicalDeviceWaylandPresentationSupportKHR(
- physicalDevice, queueFamilyIndex, display);
+ unwrapped_phys_dev, queueFamilyIndex, display);
return res;
}
uint32_t queueFamilyIndex,
xcb_connection_t *connection,
xcb_visualid_t visual_id) {
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
const VkLayerInstanceDispatchTable *disp;
disp = loader_get_instance_dispatch(physicalDevice);
VkBool32 res = disp->GetPhysicalDeviceXcbPresentationSupportKHR(
- physicalDevice, queueFamilyIndex, connection, visual_id);
+ unwrapped_phys_dev, queueFamilyIndex, connection, visual_id);
return res;
}
vkGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
Display *dpy, VisualID visualID) {
+ VkPhysicalDevice unwrapped_phys_dev =
+ loader_unwrap_physical_device(physicalDevice);
const VkLayerInstanceDispatchTable *disp;
disp = loader_get_instance_dispatch(physicalDevice);
VkBool32 res = disp->GetPhysicalDeviceXlibPresentationSupportKHR(
- physicalDevice, queueFamilyIndex, dpy, visualID);
+ unwrapped_phys_dev, queueFamilyIndex, dpy, visualID);
return res;
}