return VK_SUCCESS;
}
-bool loader_add_meta_layer(const struct loader_instance *inst, const struct loader_envvar_filter *enable_filter,
- const struct loader_envvar_disable_layers_filter *disable_filter,
- const struct loader_layer_properties *prop, struct loader_layer_list *target_list,
- struct loader_layer_list *expanded_target_list, const struct loader_layer_list *source_list);
-
// Manage lists of VkLayerProperties
static bool loader_init_layer_list(const struct loader_instance *inst, struct loader_layer_list *list) {
list->capacity = 32 * sizeof(struct loader_layer_properties);
// If not a meta-layer, simply add it.
if (0 == (layer_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
- loader_add_layer_properties_to_list(inst, output_list, 1, layer_prop);
- loader_add_layer_properties_to_list(inst, expanded_output_list, 1, layer_prop);
+ err = loader_add_layer_properties_to_list(inst, output_list, 1, layer_prop);
+ if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
+ err = loader_add_layer_properties_to_list(inst, expanded_output_list, 1, layer_prop);
+ if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
} else {
- loader_add_meta_layer(inst, enable_filter, disable_filter, layer_prop, output_list, expanded_output_list, source_list);
+ err = loader_add_meta_layer(inst, enable_filter, disable_filter, layer_prop, output_list, expanded_output_list,
+ source_list, NULL);
+ if (err == VK_ERROR_OUT_OF_HOST_MEMORY) return err;
}
}
// Check the individual implicit layer for the enable/disable environment variable settings. Only add it after
// every check has passed indicating it should be used.
-static void loader_add_implicit_layer(const struct loader_instance *inst, const struct loader_layer_properties *prop,
- const struct loader_envvar_filter *enable_filter,
- const struct loader_envvar_disable_layers_filter *disable_filter,
- struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list,
- const struct loader_layer_list *source_list) {
+static VkResult loader_add_implicit_layer(const struct loader_instance *inst, const struct loader_layer_properties *prop,
+ const struct loader_envvar_filter *enable_filter,
+ const struct loader_envvar_disable_layers_filter *disable_filter,
+ struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list,
+ const struct loader_layer_list *source_list) {
+ VkResult result = VK_SUCCESS;
if (loader_implicit_layer_is_enabled(inst, enable_filter, disable_filter, prop)) {
if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
- loader_add_layer_properties_to_list(inst, target_list, 1, prop);
+ result = loader_add_layer_properties_to_list(inst, target_list, 1, prop);
+ if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
if (NULL != expanded_target_list) {
- loader_add_layer_properties_to_list(inst, expanded_target_list, 1, prop);
+ result = loader_add_layer_properties_to_list(inst, expanded_target_list, 1, prop);
}
} else {
- loader_add_meta_layer(inst, enable_filter, disable_filter, prop, target_list, expanded_target_list, source_list);
+ result = loader_add_meta_layer(inst, enable_filter, disable_filter, prop, target_list, expanded_target_list,
+ source_list, NULL);
}
}
+ return result;
}
// Add the component layers of a meta-layer to the active list of layers
-bool loader_add_meta_layer(const struct loader_instance *inst, const struct loader_envvar_filter *enable_filter,
- const struct loader_envvar_disable_layers_filter *disable_filter,
- const struct loader_layer_properties *prop, struct loader_layer_list *target_list,
- struct loader_layer_list *expanded_target_list, const struct loader_layer_list *source_list) {
- bool found = true;
+VkResult loader_add_meta_layer(const struct loader_instance *inst, const struct loader_envvar_filter *enable_filter,
+ const struct loader_envvar_disable_layers_filter *disable_filter,
+ const struct loader_layer_properties *prop, struct loader_layer_list *target_list,
+ struct loader_layer_list *expanded_target_list, const struct loader_layer_list *source_list,
+ bool *out_found_all_component_layers) {
+ VkResult result = VK_SUCCESS;
+ bool found_all_component_layers = true;
// We need to add all the individual component layers
loader_api_version meta_layer_api_version = loader_make_version(prop->info.specVersion);
// If the component layer is itself an implicit layer, we need to do the implicit layer enable
// checks
if (0 == (search_prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
- loader_add_implicit_layer(inst, search_prop, enable_filter, disable_filter, target_list, expanded_target_list,
- source_list);
+ result = loader_add_implicit_layer(inst, search_prop, enable_filter, disable_filter, target_list,
+ expanded_target_list, source_list);
+ if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
} else {
if (0 != (search_prop->type_flags & VK_LAYER_TYPE_FLAG_META_LAYER)) {
- found = loader_add_meta_layer(inst, enable_filter, disable_filter, search_prop, target_list,
- expanded_target_list, source_list);
+ bool found_layers_in_component_meta_layer = true;
+ result = loader_add_meta_layer(inst, enable_filter, disable_filter, search_prop, target_list,
+ expanded_target_list, source_list, &found_layers_in_component_meta_layer);
+ if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
+ if (!found_layers_in_component_meta_layer) found_all_component_layers = false;
} else {
- loader_add_layer_properties_to_list(inst, target_list, 1, search_prop);
+ result = loader_add_layer_properties_to_list(inst, target_list, 1, search_prop);
+ if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
if (NULL != expanded_target_list) {
- loader_add_layer_properties_to_list(inst, expanded_target_list, 1, search_prop);
+ result = loader_add_layer_properties_to_list(inst, expanded_target_list, 1, search_prop);
+ if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
}
}
}
loader_log(inst, VULKAN_LOADER_WARN_BIT | VULKAN_LOADER_LAYER_BIT, 0,
"Failed to find layer name \"%s\" component layer \"%s\" to activate (Policy #LLP_LAYER_7)",
prop->component_layer_names[comp_layer], prop->component_layer_names[comp_layer]);
- found = false;
+ found_all_component_layers = false;
}
}
// Add this layer to the overall target list (not the expanded one)
- if (found) {
- loader_add_layer_properties_to_list(inst, target_list, 1, prop);
+ if (found_all_component_layers) {
+ result = loader_add_layer_properties_to_list(inst, target_list, 1, prop);
+ if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
+ // Write the result to out_found_all_component_layers in case this function is being recursed
+ if (out_found_all_component_layers) *out_found_all_component_layers = found_all_component_layers;
}
- return found;
+ return result;
}
static VkExtensionProperties *get_extension_property(const char *name, const struct loader_extension_list *list) {
}
file_vers = cJSON_PrintUnformatted(item);
if (NULL == file_vers) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto out;
}
loader_log(inst, VULKAN_LOADER_INFO_BIT, 0, "Found manifest file %s (file version %s)", filename, file_vers);
res = VK_ERROR_INCOMPATIBLE_DRIVER;
goto out;
}
+ } else {
+ res = VK_ERROR_OUT_OF_HOST_MEMORY;
+ goto out;
}
}
out:
// Go through the search_list and find any layers which match type. If layer
// type match is found in then add it to ext_list.
-static void loader_add_implicit_layers(const struct loader_instance *inst, const struct loader_envvar_filter *enable_filter,
- const struct loader_envvar_disable_layers_filter *disable_filter,
- struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list,
- const struct loader_layer_list *source_list) {
+static VkResult loader_add_implicit_layers(const struct loader_instance *inst, const struct loader_envvar_filter *enable_filter,
+ const struct loader_envvar_disable_layers_filter *disable_filter,
+ struct loader_layer_list *target_list, struct loader_layer_list *expanded_target_list,
+ const struct loader_layer_list *source_list) {
for (uint32_t src_layer = 0; src_layer < source_list->count; src_layer++) {
const struct loader_layer_properties *prop = &source_list->list[src_layer];
if (0 == (prop->type_flags & VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER)) {
- loader_add_implicit_layer(inst, prop, enable_filter, disable_filter, target_list, expanded_target_list, source_list);
+ VkResult result = loader_add_implicit_layer(inst, prop, enable_filter, disable_filter, target_list,
+ expanded_target_list, source_list);
+ if (result == VK_ERROR_OUT_OF_HOST_MEMORY) return result;
}
}
+ return VK_SUCCESS;
}
VkResult loader_enable_instance_layers(struct loader_instance *inst, const VkInstanceCreateInfo *pCreateInfo,
}
// Add any implicit layers first
- loader_add_implicit_layers(inst, &layers_enable_filter, &layers_disable_filter, &inst->app_activated_layer_list,
- &inst->expanded_activated_layer_list, instance_layers);
+ res = loader_add_implicit_layers(inst, &layers_enable_filter, &layers_disable_filter, &inst->app_activated_layer_list,
+ &inst->expanded_activated_layer_list, instance_layers);
+ if (res != VK_SUCCESS) {
+ goto out;
+ }
// Add any layers specified via environment variable next
res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, "VK_INSTANCE_LAYERS", &layers_enable_filter,
// Build the lists of active layers (including metalayers) and expanded layers (with metalayers resolved to their
// components)
- loader_add_implicit_layers(inst, &layers_enable_filter, &layers_disable_filter, &active_layers, &expanded_layers,
- instance_layers);
+ res = loader_add_implicit_layers(inst, &layers_enable_filter, &layers_disable_filter, &active_layers, &expanded_layers,
+ instance_layers);
+ if (res != VK_SUCCESS) {
+ goto out;
+ }
res = loader_add_environment_layers(inst, VK_LAYER_TYPE_FLAG_EXPLICIT_LAYER, ENABLED_LAYERS_ENV, &layers_enable_filter,
&layers_disable_filter, &active_layers, &expanded_layers, instance_layers);
if (res != VK_SUCCESS) {
if (VK_SUCCESS != res) {
if (NULL != new_phys_devs) {
// We've encountered an error, so we should free the new buffers.
- for (uint32_t i = 0; i < new_phys_devs_count; i++) {
+ for (uint32_t i = 0; i < new_phys_devs_capacity; i++) {
+ // May not have allocated this far, skip it if we hadn't.
+ if (new_phys_devs[i] == NULL) continue;
+
// If an OOM occurred inside the copying of the new physical devices into the existing array
// will leave some of the old physical devices in the array which may have been copied into
// the new array, leading to them being freed twice. To avoid this we just make sure to not
VkAllocationCallbacks callbacks{};
// Implementation internals
struct AllocationDetails {
+ std::unique_ptr<char[]> allocation;
size_t requested_size_bytes;
size_t actual_size_bytes;
+ size_t alignment;
VkSystemAllocationScope alloc_scope;
};
const static size_t UNKNOWN_ALLOCATION = std::numeric_limits<size_t>::max();
size_t allocation_count = 0;
size_t call_count = 0;
- std::vector<std::unique_ptr<char[]>> allocations;
- std::vector<void*> allocations_aligned;
- std::vector<AllocationDetails> allocation_details;
- void add_element(std::unique_ptr<char[]>&& alloc, void* aligned_alloc, AllocationDetails detail) {
- allocations.push_back(std::move(alloc));
- allocations_aligned.push_back(aligned_alloc);
- allocation_details.push_back(detail);
- }
- void erase_index(size_t index) {
- allocations.erase(std::next(allocations.begin(), index));
- allocations_aligned.erase(std::next(allocations_aligned.begin(), index));
- allocation_details.erase(std::next(allocation_details.begin(), index));
- }
- size_t find_element(void* ptr) {
- auto it = std::find(allocations_aligned.begin(), allocations_aligned.end(), ptr);
- if (it == allocations_aligned.end()) return UNKNOWN_ALLOCATION;
- return it - allocations_aligned.begin();
- }
+ std::unordered_map<void*, AllocationDetails> allocations;
void* allocate(size_t size, size_t alignment, VkSystemAllocationScope alloc_scope) {
if ((settings.should_fail_on_allocation && allocation_count == settings.fail_after_allocations) ||
return nullptr;
}
call_count++;
- AllocationDetails detail{size, size + (alignment - 1), alloc_scope};
- auto alloc = std::unique_ptr<char[]>(new char[detail.actual_size_bytes]);
- if (!alloc) return nullptr;
- uint64_t addr = (uint64_t)alloc.get();
+ allocation_count++;
+ AllocationDetails detail{nullptr, size, size + (alignment - 1), alignment, alloc_scope};
+ detail.allocation = std::unique_ptr<char[]>(new char[detail.actual_size_bytes]);
+ if (!detail.allocation) {
+ abort();
+ };
+ uint64_t addr = (uint64_t)detail.allocation.get();
addr += (alignment - 1);
addr &= ~(alignment - 1);
void* aligned_alloc = (void*)addr;
- add_element(std::move(alloc), aligned_alloc, detail);
- allocation_count++;
- return allocations_aligned.back();
+ allocations.insert(std::make_pair(aligned_alloc, std::move(detail)));
+ return aligned_alloc;
}
void* reallocate(void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope alloc_scope) {
if (pOriginal == nullptr) {
return allocate(size, alignment, alloc_scope);
}
- size_t index = find_element(pOriginal);
- if (index == UNKNOWN_ALLOCATION) return nullptr;
- size_t original_size = allocation_details[index].requested_size_bytes;
+ auto elem = allocations.find(pOriginal);
+ if (elem == allocations.end()) return nullptr;
+ size_t original_size = elem->second.requested_size_bytes;
// We only care about the case where realloc is used to increase the size
if (size >= original_size && settings.should_fail_after_set_number_of_calls && call_count == settings.fail_after_calls)
return nullptr;
call_count++;
if (size == 0) {
- erase_index(index);
+ allocations.erase(elem);
allocation_count--;
return nullptr;
} else if (size < original_size) {
allocation_count--; // allocate() increments this, we we don't want that
call_count--; // allocate() also increments this, we don't want that
memcpy(new_alloc, pOriginal, original_size);
- erase_index(index);
+ allocations.erase(elem);
return new_alloc;
}
}
void free(void* pMemory) {
if (pMemory == nullptr) return;
- size_t index = find_element(pMemory);
- if (index == UNKNOWN_ALLOCATION) return;
- erase_index(index);
+ auto elem = allocations.find(pMemory);
+ if (elem == allocations.end()) return;
+ allocations.erase(elem);
assert(allocation_count != 0 && "Cant free when there are no valid allocations");
allocation_count--;
}
public:
MemoryTracker(MemoryTrackerSettings settings) noexcept : settings(settings) {
- allocations.reserve(512);
- allocations_aligned.reserve(512);
- allocation_details.reserve(512);
+ allocations.reserve(3000);
callbacks.pUserData = this;
callbacks.pfnAllocation = public_allocation;
bool empty() noexcept { return allocation_count == 0; }
- void update_settings(MemoryTrackerSettings new_settings) noexcept { settings = new_settings; }
- size_t current_allocation_count() const noexcept { return allocation_count; }
- size_t current_call_count() const noexcept { return call_count; }
// Static callbacks
static VKAPI_ATTR void* VKAPI_CALL public_allocation(void* pUserData, size_t size, size_t alignment,
VkSystemAllocationScope allocationScope) noexcept {
uint32_t num_physical_devices = 4;
uint32_t num_implicit_layers = 3;
for (uint32_t i = 0; i < num_physical_devices; i++) {
- env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2));
+ env.add_icd(TestICDDetails(TEST_ICD_PATH_VERSION_2)
+ .icd_manifest.set_is_portability_driver(false)
+ .set_library_arch(sizeof(void*) == 8 ? "64" : "32"));
auto& driver = env.get_test_icd(i);
+ driver.set_icd_api_version(VK_API_VERSION_1_1);
+ driver.add_instance_extension("VK_KHR_get_physical_device_properties2");
driver.physical_devices.emplace_back("physical_device_0");
driver.physical_devices[0].add_queue_family_properties({{VK_QUEUE_GRAPHICS_BIT, 1, 0, {1, 1, 1}}, false});
driver.physical_devices[0].add_extensions({"VK_EXT_one", "VK_EXT_two", "VK_EXT_three", "VK_EXT_four", "VK_EXT_five"});
}
+
+ env.add_icd(TestICDDetails(CURRENT_PLATFORM_DUMMY_BINARY_WRONG_TYPE).set_is_fake(true));
+
const char* layer_name = "VK_LAYER_ImplicitAllocFail";
env.add_implicit_layer(ManifestLayer{}.add_layer(ManifestLayer::LayerDescription{}
.set_name(layer_name)
size_t fail_index = 0;
VkResult result = VK_ERROR_OUT_OF_HOST_MEMORY;
while (result == VK_ERROR_OUT_OF_HOST_MEMORY && fail_index <= 10000) {
- MemoryTracker tracker(MemoryTrackerSettings{false, 0, true, fail_index});
+ MemoryTracker tracker{MemoryTrackerSettings{false, 0, true, fail_index}};
fail_index++; // applies to the next loop
VkInstance instance;