Replace naked uses of malloc & free with loader_alloc & loader_free.
struct loader_layer_list *layer_list) {
if (layer_list->capacity == 0) {
layer_list->list =
- loader_instance_heap_alloc(inst, sizeof(struct loader_layer_properties) * 64, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ loader_instance_heap_calloc(inst, sizeof(struct loader_layer_properties) * 64, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (layer_list->list == NULL) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"loader_get_next_layer_property_slot: Out of memory can not add any layer properties to list");
return NULL;
}
- memset(layer_list->list, 0, sizeof(struct loader_layer_properties) * 64);
layer_list->capacity = sizeof(struct loader_layer_properties) * 64;
}
size_t capacity = 32 * element_size;
list_info->count = 0;
list_info->capacity = 0;
- list_info->list = loader_instance_heap_alloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ list_info->list = loader_instance_heap_calloc(inst, capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (list_info->list == NULL) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0, "loader_init_generic_list: Failed to allocate space for generic list");
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
- memset(list_info->list, 0, capacity);
list_info->capacity = capacity;
return VK_SUCCESS;
}
// Manage lists of VkLayerProperties
static bool loader_init_layer_list(const struct loader_instance *inst, struct loader_layer_list *list) {
list->capacity = 32 * sizeof(struct loader_layer_properties);
- list->list = loader_instance_heap_alloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ list->list = loader_instance_heap_calloc(inst, list->capacity, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (list->list == NULL) {
return false;
}
- memset(list->list, 0, list->capacity);
list->count = 0;
return true;
}
static struct loader_icd_term *loader_icd_create(const struct loader_instance *inst) {
struct loader_icd_term *icd_term;
- icd_term = loader_instance_heap_alloc(inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ icd_term = loader_instance_heap_calloc(inst, sizeof(struct loader_icd_term), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (!icd_term) {
return NULL;
}
- memset(icd_term, 0, sizeof(struct loader_icd_term));
-
return icd_term;
}
// Something is different, so do the full path of checking every device and creating a new array to use.
// This can happen if a device was added, or removed, or we hadn't previously queried all the data and we
// have more to store.
- new_phys_devs = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_tramp *) * new_count,
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_tramp *) * new_count,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == new_phys_devs) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"setup_loader_tramp_phys_devs: Failed to allocate new physical device array of size %d", new_count);
res = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
- memset(new_phys_devs, 0, sizeof(struct loader_physical_device_tramp *) * new_count);
if (new_count > phys_dev_count) {
found_count = phys_dev_count;
}
// Create an allocation large enough to hold both the windows sorting enumeration and non-windows physical device enumeration
- new_phys_devs = loader_instance_heap_alloc(inst, sizeof(struct loader_physical_device_term *) * new_phys_devs_count,
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
+ new_phys_devs = loader_instance_heap_calloc(inst, sizeof(struct loader_physical_device_term *) * new_phys_devs_count,
+ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == new_phys_devs) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
"setup_loader_term_phys_devs: Failed to allocate new physical device array of size %d", new_phys_devs_count);
res = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
- memset(new_phys_devs, 0, sizeof(struct loader_physical_device_term *) * new_phys_devs_count);
// Current index into the new_phys_devs array - increment whenever we've written in.
uint32_t idx = 0;
if (NULL != pPhysicalDeviceGroupProperties) {
// Create an array for the new physical device groups, which will be stored
// in the instance for the Terminator code.
- new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_alloc(
+ new_phys_dev_groups = (VkPhysicalDeviceGroupProperties **)loader_instance_heap_calloc(
inst, total_count * sizeof(VkPhysicalDeviceGroupProperties *), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (NULL == new_phys_dev_groups) {
loader_log(inst, VULKAN_LOADER_ERROR_BIT, 0,
res = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
- memset(new_phys_dev_groups, 0, total_count * sizeof(VkPhysicalDeviceGroupProperties *));
// Create a temporary array (on the stack) to keep track of the
// returned VkPhysicalDevice values.
VkResult res = VK_SUCCESS;
bool app_is_vulkan_1_1 = loader_check_version_meets_required(LOADER_VERSION_1_1_0, inst->app_api_version);
- struct LinuxSortedDeviceInfo *sorted_device_info =
- loader_instance_heap_alloc(inst, phys_dev_count * sizeof(struct LinuxSortedDeviceInfo), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+ struct LinuxSortedDeviceInfo *sorted_device_info = loader_instance_heap_calloc(
+ inst, phys_dev_count * sizeof(struct LinuxSortedDeviceInfo), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (NULL == sorted_device_info) {
res = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
- memset(sorted_device_info, 0, phys_dev_count * sizeof(struct LinuxSortedDeviceInfo));
loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, "linux_read_sorted_physical_devices:");
loader_log(inst, VULKAN_LOADER_INFO_BIT | VULKAN_LOADER_DRIVER_BIT, 0, " Original order:");
goto out;
}
sorted_alloc = 16;
- *sorted_devices =
- loader_instance_heap_alloc(inst, sorted_alloc * sizeof(struct loader_phys_dev_per_icd), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
+ *sorted_devices = loader_instance_heap_calloc(inst, sorted_alloc * sizeof(struct loader_phys_dev_per_icd),
+ VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (*sorted_devices == NULL) {
res = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
- memset(*sorted_devices, 0, sorted_alloc * sizeof(struct loader_phys_dev_per_icd));
-
for (uint32_t i = 0;; ++i) {
IDXGIAdapter1 *adapter;
hres = dxgi_factory->lpVtbl->EnumAdapterByGpuPreference(dxgi_factory, i, DXGI_GPU_PREFERENCE_UNSPECIFIED,
return VK_SUCCESS;
}
-#endif // _WIN32
\ No newline at end of file
+#endif // _WIN32
loader_scan_for_implicit_layers(NULL, &layers);
// We'll need to save the dl handles so we can close them later
- loader_platform_dl_handle *libs = malloc(sizeof(loader_platform_dl_handle) * layers.count);
+ loader_platform_dl_handle *libs =
+ loader_calloc(NULL, sizeof(loader_platform_dl_handle) * layers.count, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (libs == NULL && layers.count > 0) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
- memset(libs, 0, sizeof(loader_platform_dl_handle) * layers.count);
size_t lib_count = 0;
// Prepend layers onto the chain if they implement this entry point
continue;
}
- VkEnumerateInstanceExtensionPropertiesChain *chain_link = malloc(sizeof(VkEnumerateInstanceExtensionPropertiesChain));
+ VkEnumerateInstanceExtensionPropertiesChain *chain_link =
+ loader_alloc(NULL, sizeof(VkEnumerateInstanceExtensionPropertiesChain), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (chain_link == NULL) {
res = VK_ERROR_OUT_OF_HOST_MEMORY;
break;
while (chain_head != &chain_tail) {
VkEnumerateInstanceExtensionPropertiesChain *holder = chain_head;
chain_head = (VkEnumerateInstanceExtensionPropertiesChain *)chain_head->pNextLink;
- free(holder);
+ loader_free(NULL, holder);
}
// Close the dl handles
for (size_t i = 0; i < lib_count; ++i) {
loader_platform_close_library(libs[i]);
}
- free(libs);
+ loader_free(NULL, libs);
return res;
}
loader_scan_for_implicit_layers(NULL, &layers);
// We'll need to save the dl handles so we can close them later
- loader_platform_dl_handle *libs = malloc(sizeof(loader_platform_dl_handle) * layers.count);
- memset(libs, 0, sizeof(loader_platform_dl_handle) * layers.count);
+ loader_platform_dl_handle *libs =
+ loader_calloc(NULL, sizeof(loader_platform_dl_handle) * layers.count, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (libs == NULL && layers.count > 0) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
continue;
}
- VkEnumerateInstanceLayerPropertiesChain *chain_link = malloc(sizeof(VkEnumerateInstanceLayerPropertiesChain));
+ VkEnumerateInstanceLayerPropertiesChain *chain_link =
+ loader_alloc(NULL, sizeof(VkEnumerateInstanceLayerPropertiesChain), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (chain_link == NULL) {
res = VK_ERROR_OUT_OF_HOST_MEMORY;
break;
while (chain_head != &chain_tail) {
VkEnumerateInstanceLayerPropertiesChain *holder = chain_head;
chain_head = (VkEnumerateInstanceLayerPropertiesChain *)chain_head->pNextLink;
- free(holder);
+ loader_free(NULL, holder);
}
// Close the dl handles
for (size_t i = 0; i < lib_count; ++i) {
loader_platform_close_library(libs[i]);
}
- free(libs);
+ loader_free(NULL, libs);
return res;
}
loader_scan_for_implicit_layers(NULL, &layers);
// We'll need to save the dl handles so we can close them later
- loader_platform_dl_handle *libs = malloc(sizeof(loader_platform_dl_handle) * layers.count);
+ loader_platform_dl_handle *libs =
+ loader_calloc(NULL, sizeof(loader_platform_dl_handle) * layers.count, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (libs == NULL && layers.count > 0) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
- memset(libs, 0, sizeof(loader_platform_dl_handle) * layers.count);
size_t lib_count = 0;
// Prepend layers onto the chain if they implement this entry point
continue;
}
- VkEnumerateInstanceVersionChain *chain_link = malloc(sizeof(VkEnumerateInstanceVersionChain));
+ VkEnumerateInstanceVersionChain *chain_link =
+ loader_alloc(NULL, sizeof(VkEnumerateInstanceVersionChain), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (chain_link == NULL) {
res = VK_ERROR_OUT_OF_HOST_MEMORY;
break;
while (chain_head != &chain_tail) {
VkEnumerateInstanceVersionChain *holder = chain_head;
chain_head = (VkEnumerateInstanceVersionChain *)chain_head->pNextLink;
- free(holder);
+ loader_free(NULL, holder);
}
// Close the dl handles
for (size_t i = 0; i < lib_count; ++i) {
loader_platform_close_library(libs[i]);
}
- free(libs);
+ loader_free(NULL, libs);
return res;
}
pIcdSurface->non_platform_offset = (uint32_t)((uint8_t *)(&pIcdSurface->base_size) - (uint8_t *)pIcdSurface);
pIcdSurface->entire_size = sizeof(VkIcdSurface);
- pIcdSurface->real_icd_surfaces = loader_instance_heap_alloc(instance, sizeof(VkSurfaceKHR) * instance->total_icd_count,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ pIcdSurface->real_icd_surfaces = loader_instance_heap_calloc(instance, sizeof(VkSurfaceKHR) * instance->total_icd_count,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pIcdSurface->real_icd_surfaces == NULL) {
loader_instance_heap_free(instance, pIcdSurface);
pIcdSurface = NULL;
- } else {
- memset(pIcdSurface->real_icd_surfaces, 0, sizeof(VkSurfaceKHR) * instance->total_icd_count);
}
}
return pIcdSurface;