Document instance_private_data and device_private_data.
Add additional utility containers (util::unordered_set,
util::unordered_map) that ensure that the storage containers
we use allow the use of the Vulkan allocation callbacks.
Additionally, ensure that these allocations don't throw in
case host runs out of memory but rather return the appropriate
Vulkan error code.
Also keep a copy the allocators so they can be used in other layer
functionality to facilitate following the Vulkan specification around
memory allocation.
Add additional utility util::optional that currently makes it easier
to handle errors in the containers described above.
Fix some small issues with the vkCreateInstance/vkCreateDevice
handling and ensure that vkDestroyDevice is called in the layer
handler.
Change-Id: Ic9d8ece405c82e743a2c016cc3dabf984cf77fc1
Signed-off-by: Normunds Rieksts <normunds.rieksts@arm.com>
#include "util/extension_list.hpp"
#include "util/custom_allocator.hpp"
#include "wsi/wsi_factory.hpp"
+#include "util/log.hpp"
#define VK_LAYER_API_VERSION VK_MAKE_VERSION(1, 0, VK_HEADER_VERSION)
return chain_info;
}
+template <typename T>
+static T get_instance_proc_addr(PFN_vkGetInstanceProcAddr fp_get_instance_proc_addr, const char *name,
+ VkInstance instance = VK_NULL_HANDLE)
+{
+ T func = reinterpret_cast<T>(fp_get_instance_proc_addr(instance, name));
+ if (func == nullptr)
+ {
+ WSI_LOG_WARNING("Failed to get address of %s", name);
+ }
+
+ return func;
+}
+
/* This is where the layer is initialised and the instance dispatch table is constructed. */
VKAPI_ATTR VkResult create_instance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
VkInstance *pInstance)
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = layerCreateInfo->u.pLayerInfo->pfnNextGetInstanceProcAddr;
- PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(nullptr, "vkCreateInstance");
+ auto fpCreateInstance = get_instance_proc_addr<PFN_vkCreateInstance>(fpGetInstanceProcAddr, "vkCreateInstance");
if (nullptr == fpCreateInstance)
{
return VK_ERROR_INITIALIZATION_FAILED;
return result;
}
- instance_dispatch_table table;
+ instance_dispatch_table table{};
result = table.populate(*pInstance, fpGetInstanceProcAddr);
if (result != VK_SUCCESS)
{
+ if (table.DestroyInstance != nullptr)
+ {
+ table.DestroyInstance(*pInstance, pAllocator);
+ }
return result;
}
/* Find all the platforms that the layer can handle based on pCreateInfo->ppEnabledExtensionNames. */
auto layer_platforms_to_enable = wsi::find_enabled_layer_platforms(pCreateInfo);
- std::unique_ptr<instance_private_data> inst_data{
- new instance_private_data{table, loader_callback, layer_platforms_to_enable}};
- instance_private_data::set(*pInstance, std::move(inst_data));
- return VK_SUCCESS;
+
+ /* Following the spec: use the callbacks provided to vkCreateInstance() if not nullptr,
+ * otherwise use the default callbacks.
+ */
+ util::allocator instance_allocator{ VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE, pAllocator };
+ result = instance_private_data::associate(*pInstance, table, loader_callback, layer_platforms_to_enable,
+ instance_allocator);
+ if (result != VK_SUCCESS)
+ {
+ if (table.DestroyInstance != nullptr)
+ {
+ table.DestroyInstance(*pInstance, pAllocator);
+ }
+ }
+
+ return result;
}
VKAPI_ATTR VkResult create_device(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
/* Retrieve the vkGetDeviceProcAddr and the vkCreateDevice function pointers for the next layer in the chain. */
PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = layerCreateInfo->u.pLayerInfo->pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = layerCreateInfo->u.pLayerInfo->pfnNextGetDeviceProcAddr;
- PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(VK_NULL_HANDLE, "vkCreateDevice");
+
+ auto fpCreateDevice = get_instance_proc_addr<PFN_vkCreateDevice>(fpGetInstanceProcAddr, "vkCreateDevice");
if (nullptr == fpCreateDevice)
{
return VK_ERROR_INITIALIZATION_FAILED;
layerCreateInfo->u.pLayerInfo = layerCreateInfo->u.pLayerInfo->pNext;
/* Copy the extension to a util::extension_list. */
- util::allocator allocator{pAllocator, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND};
+ auto &inst_data = instance_private_data::get(physicalDevice);
+
+ util::allocator allocator{inst_data.get_allocator(), VK_SYSTEM_ALLOCATION_SCOPE_COMMAND, pAllocator};
util::extension_list enabled_extensions{allocator};
VkResult result;
result = enabled_extensions.add(pCreateInfo->ppEnabledExtensionNames, pCreateInfo->enabledExtensionCount);
}
/* Add the extensions required by the platforms that are being enabled in the layer. */
- auto &inst_data = instance_private_data::get(physicalDevice);
const util::wsi_platform_set& enabled_platforms = inst_data.get_enabled_platforms();
result = wsi::add_extensions_required_by_layer(physicalDevice, enabled_platforms, enabled_extensions);
if (result != VK_SUCCESS)
return result;
}
- device_dispatch_table table;
+ device_dispatch_table table{};
result = table.populate(*pDevice, fpGetDeviceProcAddr);
if (result != VK_SUCCESS)
{
+ if (table.DestroyDevice != nullptr)
+ {
+ table.DestroyDevice(*pDevice, pAllocator);
+ }
+
return result;
}
- std::unique_ptr<device_private_data> device{new device_private_data{inst_data, physicalDevice, *pDevice,
- table, loader_callback}};
- device_private_data::set(*pDevice, std::move(device));
- return VK_SUCCESS;
+ /* Following the spec: use the callbacks provided to vkCreateDevice() if not nullptr, otherwise use the callbacks
+ * provided to the instance (if no allocator callbacks was provided to the instance, it will use default ones).
+ */
+ util::allocator device_allocator{ inst_data.get_allocator(), VK_SYSTEM_ALLOCATION_SCOPE_DEVICE, pAllocator };
+ result =
+ device_private_data::associate(*pDevice, inst_data, physicalDevice, table, loader_callback, device_allocator);
+
+ if (result != VK_SUCCESS)
+ {
+ if (table.DestroyDevice != nullptr)
+ {
+ table.DestroyDevice(*pDevice, pAllocator);
+ }
+ }
+
+ return result;
}
} /* namespace layer */
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL wsi_layer_vkDestroyInstance(VkInstance instance,
const VkAllocationCallbacks *pAllocator)
{
- assert(instance);
- layer::instance_private_data::get(instance).disp.DestroyInstance(instance, pAllocator);
- layer::instance_private_data::destroy(instance);
+ if (instance == VK_NULL_HANDLE)
+ {
+ return;
+ }
+
+ auto fn_destroy_instance = layer::instance_private_data::get(instance).disp.DestroyInstance;
+
+ /* Call disassociate() before doing vkDestroyInstance as an instance may be created by a different thread
+ * just after we call vkDestroyInstance() and it could get the same address if we are unlucky.
+ */
+ layer::instance_private_data::disassociate(instance);
+
+ assert(fn_destroy_instance);
+ fn_destroy_instance(instance, pAllocator);
}
VK_LAYER_EXPORT VKAPI_ATTR void VKAPI_CALL wsi_layer_vkDestroyDevice(VkDevice device,
const VkAllocationCallbacks *pAllocator)
{
- layer::device_private_data::destroy(device);
+ if (device == VK_NULL_HANDLE)
+ {
+ return;
+ }
+
+ auto fn_destroy_device = layer::device_private_data::get(device).disp.DestroyDevice;
+
+ /* Call disassociate() before doing vkDestroyDevice as a device may be created by a different thread
+ * just after we call vkDestroyDevice().
+ */
+ layer::device_private_data::disassociate(device);
+
+ assert(fn_destroy_device);
+ fn_destroy_device(device, pAllocator);
}
VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL wsi_layer_vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo,
*/
#include "private_data.hpp"
-
#include "wsi/wsi_factory.hpp"
-
-#include <unordered_map>
+#include "util/unordered_map.hpp"
+#include "util/log.hpp"
namespace layer
{
static std::mutex g_data_lock;
-static std::unordered_map<void *, std::unique_ptr<instance_private_data>> g_instance_data;
-static std::unordered_map<void *, std::unique_ptr<device_private_data>> g_device_data;
+
+/* The dictionaries below use plain pointers to store the instance/device private data objects.
+ * This means that these objects are leaked if the application terminates without calling vkDestroyInstance
+ * or vkDestroyDevice. This is fine as it is the application's responsibility to call these.
+ */
+static util::unordered_map<void *, instance_private_data *> g_instance_data{ util::allocator::get_generic() };
+static util::unordered_map<void *, device_private_data *> g_device_data{ util::allocator::get_generic() };
template <typename object_type, typename get_proc_type>
static PFN_vkVoidFunction get_proc_helper(object_type obj, get_proc_type get_proc,
return ok ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
-instance_private_data::instance_private_data(const instance_dispatch_table& table,
+instance_private_data::instance_private_data(const instance_dispatch_table &table,
PFN_vkSetInstanceLoaderData set_loader_data,
- util::wsi_platform_set enabled_layer_platforms)
+ util::wsi_platform_set enabled_layer_platforms,
+ const util::allocator &alloc)
: disp(table)
, SetInstanceLoaderData(set_loader_data)
, enabled_layer_platforms(enabled_layer_platforms)
+ , allocator(alloc)
{
}
+/**
+ * @brief Obtain the loader's dispatch table for the given dispatchable object.
+ * @note Dispatchable objects are structures that have a VkLayerDispatchTable as their first member.
+ */
template <typename dispatchable_type>
static inline void *get_key(dispatchable_type dispatchable_object)
{
return *reinterpret_cast<void **>(dispatchable_object);
}
-void instance_private_data::set(VkInstance inst, std::unique_ptr<instance_private_data> inst_data)
+VkResult instance_private_data::associate(VkInstance instance, instance_dispatch_table &table,
+ PFN_vkSetInstanceLoaderData set_loader_data,
+ util::wsi_platform_set enabled_layer_platforms, const util::allocator &allocator)
{
+ auto *instance_data =
+ allocator.create<instance_private_data>(1, table, set_loader_data, enabled_layer_platforms, allocator);
+
+ if (instance_data == nullptr)
+ {
+ WSI_LOG_ERROR("Instance private data for instance(%p) could not be allocated. Out of memory.",
+ reinterpret_cast<void *>(instance));
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ const auto key = get_key(instance);
scoped_mutex lock(g_data_lock);
- g_instance_data[get_key(inst)] = std::move(inst_data);
+
+ auto it = g_instance_data.find(key);
+ if (it != g_instance_data.end())
+ {
+ WSI_LOG_WARNING("Hash collision when adding new instance (%p)", reinterpret_cast<void *>(instance));
+
+ destroy(it->second);
+ g_instance_data.erase(it);
+ }
+
+ auto result = g_instance_data.try_insert(std::make_pair(key, instance_data));
+ if (result.has_value())
+ {
+ return VK_SUCCESS;
+ }
+ else
+ {
+ WSI_LOG_WARNING("Failed to insert instance_private_data for instance (%p) as host is out of memory",
+ reinterpret_cast<void *>(instance));
+
+ destroy(instance_data);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+}
+
+void instance_private_data::disassociate(VkInstance instance)
+{
+ assert(instance != VK_NULL_HANDLE);
+ instance_private_data *instance_data = nullptr;
+ {
+ scoped_mutex lock(g_data_lock);
+ auto it = g_instance_data.find(get_key(instance));
+ if (it == g_instance_data.end())
+ {
+ WSI_LOG_WARNING("Failed to find private data for instance (%p)", reinterpret_cast<void *>(instance));
+ return;
+ }
+
+ instance_data = it->second;
+ g_instance_data.erase(it);
+ }
+
+ destroy(instance_data);
}
template <typename dispatchable_type>
static instance_private_data &get_instance_private_data(dispatchable_type dispatchable_object)
{
scoped_mutex lock(g_data_lock);
- return *g_instance_data[get_key(dispatchable_object)];
+ return *g_instance_data.at(get_key(dispatchable_object));
}
instance_private_data &instance_private_data::get(VkInstance instance)
return enabled_layer_platforms.contains(get_platform_of_surface(surface));
}
+void instance_private_data::destroy(instance_private_data *instance_data)
+{
+ assert(instance_data);
+
+ auto alloc = instance_data->get_allocator();
+ alloc.destroy<instance_private_data>(1, instance_data);
+}
+
bool instance_private_data::do_icds_support_surface(VkPhysicalDevice, VkSurfaceKHR)
{
/* For now assume ICDs do not support VK_KHR_surface. This means that the layer will handle all the surfaces it can
return ret;
}
-void instance_private_data::destroy(VkInstance inst)
+device_private_data::device_private_data(instance_private_data &inst_data, VkPhysicalDevice phys_dev, VkDevice dev,
+ const device_dispatch_table &table, PFN_vkSetDeviceLoaderData set_loader_data,
+ const util::allocator &alloc)
+ : disp{ table }
+ , instance_data{ inst_data }
+ , SetDeviceLoaderData{ set_loader_data }
+ , physical_device{ phys_dev }
+ , device{ dev }
+ , allocator{ alloc }
+ , swapchains{ allocator }
{
- scoped_mutex lock(g_data_lock);
- g_instance_data.erase(get_key(inst));
}
-device_private_data::device_private_data(instance_private_data &inst_data, VkPhysicalDevice phys_dev, VkDevice dev,
- const device_dispatch_table &table, PFN_vkSetDeviceLoaderData set_loader_data)
- : disp{table}
- , instance_data{inst_data}
- , SetDeviceLoaderData{set_loader_data}
- , physical_device{phys_dev}
- , device{dev}
+VkResult device_private_data::associate(VkDevice dev, instance_private_data &inst_data, VkPhysicalDevice phys_dev,
+ const device_dispatch_table &table, PFN_vkSetDeviceLoaderData set_loader_data,
+ const util::allocator &allocator)
{
+ auto *device_data =
+ allocator.create<device_private_data>(1, inst_data, phys_dev, dev, table, set_loader_data, allocator);
+
+ if (device_data == nullptr)
+ {
+ WSI_LOG_ERROR("Device private data for device(%p) could not be allocated. Out of memory.",
+ reinterpret_cast<void *>(dev));
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
+
+ const auto key = get_key(dev);
+ scoped_mutex lock(g_data_lock);
+
+ auto it = g_device_data.find(key);
+ if (it != g_device_data.end())
+ {
+ WSI_LOG_WARNING("Hash collision when adding new device (%p)", reinterpret_cast<void *>(dev));
+ destroy(it->second);
+ g_device_data.erase(it);
+ }
+
+ auto result = g_device_data.try_insert(std::make_pair(key, device_data));
+ if (result.has_value())
+ {
+ return VK_SUCCESS;
+ }
+ else
+ {
+ WSI_LOG_WARNING("Failed to insert device_private_data for device (%p) as host is out of memory",
+ reinterpret_cast<void *>(dev));
+
+ destroy(device_data);
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+ }
}
-void device_private_data::set(VkDevice dev, std::unique_ptr<device_private_data> dev_data)
+void device_private_data::disassociate(VkDevice dev)
{
- scoped_mutex lock(g_data_lock);
- g_device_data[get_key(dev)] = std::move(dev_data);
+ assert(dev != VK_NULL_HANDLE);
+ device_private_data *device_data = nullptr;
+ {
+ scoped_mutex lock(g_data_lock);
+ auto it = g_device_data.find(get_key(dev));
+ if (it == g_device_data.end())
+ {
+ WSI_LOG_WARNING("Failed to find private data for device (%p)", reinterpret_cast<void *>(dev));
+ return;
+ }
+
+ device_data = it->second;
+ g_device_data.erase(it);
+ }
+
+ destroy(device_data);
}
template <typename dispatchable_type>
static device_private_data &get_device_private_data(dispatchable_type dispatchable_object)
{
scoped_mutex lock(g_data_lock);
- return *g_device_data[get_key(dispatchable_object)];
+
+ return *g_device_data.at(get_key(dispatchable_object));
}
device_private_data &device_private_data::get(VkDevice device)
return get_device_private_data(queue);
}
-void device_private_data::add_layer_swapchain(VkSwapchainKHR swapchain)
+VkResult device_private_data::add_layer_swapchain(VkSwapchainKHR swapchain)
{
scoped_mutex lock(swapchains_lock);
- swapchains.insert(swapchain);
+ auto result = swapchains.try_insert(swapchain);
+ return result.has_value() ? VK_SUCCESS : VK_ERROR_OUT_OF_HOST_MEMORY;
}
bool device_private_data::layer_owns_all_swapchains(const VkSwapchainKHR *swapchain, uint32_t swapchain_count) const
return disp.CreateSwapchainKHR != nullptr;
}
-void device_private_data::destroy(VkDevice dev)
+void device_private_data::destroy(device_private_data *device_data)
{
- scoped_mutex lock(g_data_lock);
- g_device_data.erase(get_key(dev));
+ assert(device_data);
+
+ auto alloc = device_data->get_allocator();
+ alloc.destroy<device_private_data>(1, device_data);
}
+
} /* namespace layer */
#pragma once
#include "util/platform_set.hpp"
+#include "util/custom_allocator.hpp"
+#include "util/unordered_set.hpp"
#include <vulkan/vulkan.h>
#include <vulkan/vk_layer.h>
struct instance_dispatch_table
{
+ /**
+ * @brief Populate the instance dispatch table with functions that it requires.
+ * @note The function greedy fetches all the functions it needs so even in the
+ * case of failure functions that are not marked as nullptr are safe to call.
+ *
+ * @param instance The instance for which the dispatch table will be populated.
+ * @param get_proc The pointer to vkGetInstanceProcAddr function.
+ * @return VkResult VK_SUCCESS if successful, otherwise an error.
+ */
VkResult populate(VkInstance instance, PFN_vkGetInstanceProcAddr get_proc);
#define DISPATCH_TABLE_ENTRY(x) PFN_vk##x x{};
REQUIRED(DestroyFence) \
REQUIRED(ResetFences) \
REQUIRED(WaitForFences) \
+ REQUIRED(DestroyDevice) \
OPTIONAL(CreateSwapchainKHR) \
OPTIONAL(DestroySwapchainKHR) \
OPTIONAL(GetSwapchainImagesKHR) \
struct device_dispatch_table
{
+ /**
+ * @brief Populate the device dispatch table with functions that it requires.
+ * @note The function greedy fetches all the functions it needs so even in the
+ * case of failure functions that are not marked as nullptr are safe to call.
+ *
+ * @param device The device for which the dispatch table will be populated.
+ * @param get_proc The pointer to vkGetDeviceProcAddr function.
+ * @return VkResult VK_SUCCESS if successful, otherwise an error.
+ */
VkResult populate(VkDevice dev, PFN_vkGetDeviceProcAddr get_proc);
#define DISPATCH_TABLE_ENTRY(x) PFN_vk##x x{};
};
/**
- * @brief Layer "mirror object" for VkInstance.
+ * @brief Class representing the information that the layer associates to a VkInstance.
+ * @details The layer uses this object to store function pointers to use when intercepting a Vulkan call.
+ * Each function intercepted by the layer passes execution to the next layer calling one of these pointers.
+ * Note that the layer does not wrap VkInstance as this would require intercepting every Vulkan entrypoint that has
+ * a VkInstance among its arguments. Instead, the layer maintains a mapping which allows it to retrieve the
+ * #instance_private_data from the VkInstance. To be precise, the mapping uses the VkInstance's dispatch table as a
+ * key, because (1) this is unique for each VkInstance and (2) this allows to map any dispatchable object associated
+ * with the VkInstance (such as VkPhysicalDevice) to the corresponding #instance_private_data (see overloads of
+ * the instance_private_data::get method.)
*/
class instance_private_data
{
instance_private_data(const instance_private_data &) = delete;
instance_private_data &operator=(const instance_private_data &) = delete;
- instance_private_data(const instance_dispatch_table& table,
- PFN_vkSetInstanceLoaderData set_loader_data,
- util::wsi_platform_set enabled_layer_platforms);
- static void set(VkInstance inst, std::unique_ptr<instance_private_data> inst_data);
+ /**
+ * @brief Create and associate a new #instance_private_data to the given #VkInstance.
+ *
+ * @param instance The instance to associate to the instance_private_data.
+ * @param table A populated instance dispatch table.
+ * @param set_loader_data The instance loader data.
+ * @param enabled_layer_platforms The platforms that are enabled by the layer.
+ * @param allocator The allocator that the instance_private_data will use.
+ *
+ * @return VkResult VK_SUCCESS if successful, otherwise an error.
+ */
+ static VkResult associate(VkInstance instance, instance_dispatch_table &table,
+ PFN_vkSetInstanceLoaderData set_loader_data,
+ util::wsi_platform_set enabled_layer_platforms, const util::allocator &allocator);
+
+ /**
+ * @brief Disassociate and destroy the #instance_private_data associated to the given VkInstance.
+ *
+ * @param instance An instance that was previously associated with instance_private_data
+ */
+ static void disassociate(VkInstance instance);
/**
* @brief Get the mirror object that the layer associates to a given Vulkan instance.
*/
bool does_layer_support_surface(VkSurfaceKHR surface);
- static void destroy(VkInstance inst);
+ /**
+ * @brief Get the instance allocator
+ *
+ * @return const util::allocator& used for the instance
+ */
+ const util::allocator &get_allocator() const
+ {
+ return allocator;
+ }
const instance_dispatch_table disp;
private:
+ /* Allow util::allocator to access the private constructor */
+ friend util::allocator;
+
+ /**
+ * @brief Construct a new instance private data object. This is marked private in order to
+ * ensure that the instance object can only be allocated using the allocator callbacks
+ *
+ * @param table A populated instance dispatch table.
+ * @param set_loader_data The instance loader data.
+ * @param enabled_layer_platforms The platforms that are enabled by the layer.
+ * @param alloc The allocator that the instance_private_data will use.
+ */
+ instance_private_data(const instance_dispatch_table &table, PFN_vkSetInstanceLoaderData set_loader_data,
+ util::wsi_platform_set enabled_layer_platforms, const util::allocator &alloc);
+
+ /**
+ * @brief Destroy the instance_private_data properly with its allocator
+ *
+ * @param instance_data A valid pointer to instance_private_data
+ */
+ static void destroy(instance_private_data* instance_data);
+
/**
* @brief Check whether the given surface is already supported for presentation without the layer.
*/
const PFN_vkSetInstanceLoaderData SetInstanceLoaderData;
const util::wsi_platform_set enabled_layer_platforms;
+ const util::allocator allocator;
};
+/**
+ * @brief Class representing the information that the layer associates to a VkDevice.
+ * @note This serves a similar purpose of #instance_private_data, but for VkDevice. Similarly to
+ * #instance_private_data, the layer maintains a mapping from VkDevice to the associated #device_private_data.
+ */
class device_private_data
{
public:
device_private_data(const device_private_data &) = delete;
device_private_data &operator=(const device_private_data &) = delete;
- device_private_data(instance_private_data &inst_data, VkPhysicalDevice phys_dev, VkDevice dev,
- const device_dispatch_table &table, PFN_vkSetDeviceLoaderData set_loader_data);
- static void set(VkDevice dev, std::unique_ptr<device_private_data> dev_data);
+ /**
+ * @brief Create and associate a new #device_private_data to the given #VkDevice.
+ *
+ * @param dev The device to associate to the device_private_data.
+ * @param inst_data The instance that was used to create VkDevice.
+ * @param phys_dev The physical device that was used to create the VkDevice.
+ * @param table A populated device dispatch table.
+ * @param set_loader_data The device loader data.
+ * @param allocator The allocator that the device_private_data will use.
+ *
+ * @return VkResult VK_SUCCESS if successful, otherwise an error
+ */
+ static VkResult associate(VkDevice dev, instance_private_data &inst_data, VkPhysicalDevice phys_dev,
+ const device_dispatch_table &table, PFN_vkSetDeviceLoaderData set_loader_data,
+ const util::allocator &allocator);
+
+ static void disassociate(VkDevice dev);
/**
* @brief Get the mirror object that the layer associates to a given Vulkan device.
*/
static device_private_data &get(VkQueue queue);
- void add_layer_swapchain(VkSwapchainKHR swapchain);
+ VkResult add_layer_swapchain(VkSwapchainKHR swapchain);
/**
* @brief Return whether all the provided swapchains are owned by us (the WSI Layer).
*/
bool can_icds_create_swapchain(VkSurfaceKHR vk_surface);
- static void destroy(VkDevice dev);
+ /**
+ * @brief Get the device allocator
+ *
+ * @return const util::allocator& used for the device
+ */
+ const util::allocator &get_allocator() const
+ {
+ return allocator;
+ }
const device_dispatch_table disp;
instance_private_data &instance_data;
const VkDevice device;
private:
- std::unordered_set<VkSwapchainKHR> swapchains;
+ /* Allow util::allocator to access the private constructor */
+ friend util::allocator;
+
+ /**
+ * @brief Construct a new device private data object. This is marked private in order to
+ * ensure that the instance object can only be allocated using the allocator callbacks
+ *
+ * @param inst_data The instance that was used to create VkDevice.
+ * @param phys_dev The physical device that was used to create the VkDevice.
+ * @param dev The device to associate to the device_private_data.
+ * @param table A populated device dispatch table.
+ * @param set_loader_data The device loader data.
+ * @param alloc The allocator that the device_private_data will use.
+ */
+ device_private_data(instance_private_data &inst_data, VkPhysicalDevice phys_dev, VkDevice dev,
+ const device_dispatch_table &table, PFN_vkSetDeviceLoaderData set_loader_data,
+ const util::allocator &alloc);
+
+ /**
+ * @brief Destroy the device_private_data properly with its allocator
+ *
+ * @param device_data A valid pointer to device_private_data
+ */
+ static void destroy(device_private_data* device_data);
+
+ const util::allocator allocator;
+ util::unordered_set<VkSwapchainKHR> swapchains;
mutable std::mutex swapchains_lock;
};
if (result != VK_SUCCESS)
{
/* Error occured during initialization, need to free allocated memory. */
- wsi::destroy_surface_swapchain(sc, pAllocator);
+ wsi::destroy_surface_swapchain(sc, device_data, pAllocator);
return result;
}
- *pSwapchain = reinterpret_cast<VkSwapchainKHR>(sc);
- device_data.add_layer_swapchain(*pSwapchain);
+ auto vulkan_swapchain = reinterpret_cast<VkSwapchainKHR>(sc);
+ result = device_data.add_layer_swapchain(vulkan_swapchain);
+ if (result != VK_SUCCESS)
+ {
+ wsi::destroy_surface_swapchain(sc, device_data, pAllocator);
+ return result;
+ }
+
+ *pSwapchain = vulkan_swapchain;
return result;
}
assert(swapc != VK_NULL_HANDLE);
wsi::swapchain_base *sc = reinterpret_cast<wsi::swapchain_base *>(swapc);
- wsi::destroy_surface_swapchain(sc, pAllocator);
+ wsi::destroy_surface_swapchain(sc, device_data, pAllocator);
}
VKAPI_ATTR VkResult wsi_layer_vkGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapc,
const allocator& allocator::get_generic()
{
- static allocator generic{nullptr, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND};
+ static allocator generic{ VK_SYSTEM_ALLOCATION_SCOPE_COMMAND, nullptr };
return generic;
}
-
-allocator::allocator(const allocator& other, VkSystemAllocationScope new_scope)
- : allocator{other.get_original_callbacks(), new_scope}
+allocator::allocator(const allocator &other, VkSystemAllocationScope new_scope, const VkAllocationCallbacks *callbacks)
+ : allocator{ new_scope, callbacks == nullptr ? other.get_original_callbacks() : callbacks }
{
}
/* If callbacks is already populated by vulkan then use those specified as default. */
-allocator::allocator(const VkAllocationCallbacks *callbacks, VkSystemAllocationScope scope)
+allocator::allocator(VkSystemAllocationScope scope, const VkAllocationCallbacks *callbacks)
{
m_scope = scope;
if (callbacks != nullptr)
/**
* @brief Construct a new wrapper for the given VK callbacks and scope.
+ * @param scope The scope to use for this allocator.
* @param callbacks Pointer to allocation callbacks. If this is @c nullptr, then default
* allocation callbacks are used. These can be accessed through #m_callbacks.
- * @param scope The scope to use for this allocator.
*/
- allocator(const VkAllocationCallbacks *callbacks, VkSystemAllocationScope scope);
+ allocator(VkSystemAllocationScope scope, const VkAllocationCallbacks *callbacks);
/**
- * @brief Copy the given allocator, but change the allocation scope.
+ * @brief Construct a new allocator that uses @p callbacks or @p allocator callbacks if the
+ * @p callbacks are @c nullptr with the @c scope.
+ *
+ * @param other If @p callbacks is @c nullptr, the callbacks from this allocator will be used instead.
+ * @param new_scope The scope to use for allocations
+ * @param callbacks Pointer to allocation callbacks. If this is @c nullptr, then the callbacks
+ * from the @p allocator will be used.
*/
- allocator(const allocator& other, VkSystemAllocationScope new_scope);
+ allocator(const allocator &other, VkSystemAllocationScope new_scope,
+ const VkAllocationCallbacks *callbacks = nullptr);
/**
* @brief Get a pointer to the allocation callbacks provided while constructing this object.
--- /dev/null
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#pragma once
+#include <cassert>
+
+namespace util
+{
+template <typename T>
+class optional
+{
+public:
+ using value_type = T;
+
+ optional(const optional &) = delete;
+ optional &operator=(const optional &) = delete;
+
+ /**
+ * @brief Construct an empty optional object.
+ */
+ optional() = default;
+
+ /**
+ * @brief Construct an optional object with a value.
+ *
+ * @param val The value that will be placed in the optional.
+ */
+ optional(value_type &&val) noexcept
+ : m_has_value(true), m_value(std::move(val))
+ {
+ }
+
+ /**
+ * @brief Construct a new optional object from another optional
+ *
+ * @param opt The optional object that will be moved
+ */
+ optional(optional &&opt)
+ : m_has_value(opt.m_has_value)
+ {
+ if (opt.m_has_value)
+ {
+ m_value = std::move(opt.m_value);
+ opt.m_has_value = false;
+ }
+ else
+ {
+ m_value = T{};
+ }
+ }
+
+ /**
+ * @brief Check if optional has a value.
+ *
+ * @return true If the optional has a value.
+ * @return false If the optional does not have a value.
+ */
+ bool has_value() const noexcept
+ {
+ return m_has_value;
+ }
+
+ /**
+ * @brief Return the value in the optional. It is only
+ * valid to call this function if optional has a value.
+ *
+ * @return value_type& The value that is in the optional
+ */
+ value_type &value() noexcept
+ {
+ assert(has_value());
+ return m_value;
+ }
+
+ /**
+ * @brief Clears the value from the optional
+ *
+ */
+ void reset() noexcept
+ {
+ m_has_value = false;
+ }
+
+ /**
+ * @brief Return the value in the optional, same as value()
+ *
+ * @return value_type& The value in the optional
+ */
+ value_type &operator*() noexcept
+ {
+ assert(has_value());
+ return m_value;
+ }
+
+ /**
+ * @brief Return the value in the optional as pointer.
+ *
+ * @return value_type* The value in the optional
+ */
+ value_type *operator->() noexcept
+ {
+ assert(has_value());
+ return &m_value;
+ }
+
+ /**
+ * @brief Reassign/assign the value in the optional
+ *
+ * @param val The value to assign to this optional
+ * @return optional& This optional object with the value
+ */
+ optional &operator=(value_type &&val) noexcept
+ {
+ m_has_value = true;
+ m_value = std::move(val);
+
+ return *this;
+ }
+
+ /**
+ * @brief Construct a new optional object from another optional
+ *
+ * @param opt The optional object that will be moved
+ * @return optional& This optional object with the value
+ */
+ optional &operator=(optional &&opt)
+ {
+ if (this != &opt)
+ {
+ if (opt.m_has_value)
+ {
+ m_has_value = true;
+ m_value = std::move(opt.m_value);
+ opt.m_has_value = false;
+ }
+ else
+ {
+ m_value = T{};
+ m_has_value = false;
+ }
+ }
+
+ return *this;
+ }
+
+private:
+ bool m_has_value{false};
+ T m_value{};
+};
+
+template <typename T, typename... Args>
+inline optional<T> make_optional(Args &&...args)
+{
+ return optional<T>{T(std::forward<Args>(args)...)};
+}
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include <unordered_map>
+#include "custom_allocator.hpp"
+#include "optional.hpp"
+
+namespace util
+{
+/**
+ * @brief This utility class has the same purpose as std::unordered_map, but
+ * ensures that the operations that could result in out of memory
+ * exceptions don't throw them and also ensures that the memory can
+ * only be allocated by an custom_allocator.
+ */
+template <typename Key, typename Value,
+ typename Hash = std::hash<Key>,
+ typename Comparator = std::equal_to<Key>,
+ typename Allocator = util::custom_allocator<std::pair<const Key, Value>>>
+class unordered_map : public std::unordered_map<Key, Value, Hash, Comparator, Allocator>
+{
+ using base = std::unordered_map<Key, Value, Hash, Comparator, Allocator>;
+ using size_type = typename base::size_type;
+ using iterator = typename base::iterator;
+
+public:
+ /**
+ * Delete all member functions that can cause allocation failure by throwing std::bad_alloc.
+ */
+ Value &operator[](const Key &key) = delete;
+ Value &operator[](Key &&key) = delete;
+
+ unordered_map(const unordered_map &) = delete;
+ unordered_map &operator=(const unordered_map &) = delete;
+
+ void insert() = delete;
+ void emplace() = delete;
+ void emplace_hint() = delete;
+ void reserve() = delete;
+ void rehash() = delete;
+
+ /**
+ * @brief Construct a new unordered map object with a custom allocator.
+ *
+ * @param allocator The allocator that will be used.
+ */
+ explicit unordered_map(util::custom_allocator<std::pair<const Key, Value>> allocator)
+ : base(allocator)
+ {
+ }
+
+ /**
+ * @brief Like std::unordered_map.insert but doesn't throw on out of memory errors.
+ *
+ * @param value The value to insert in the map.
+ * @return util::optional<std::pair<iterator,bool>> If successful, the optional will
+ * contain the same return value as from std::unordered_map.insert, otherwise
+ * if out of memory, optional will be empty.
+ */
+ util::optional<std::pair<iterator, bool>> try_insert(const std::pair<Key, Value> &value)
+ {
+ try
+ {
+ return { base::insert(value) };
+ }
+ catch(std::bad_alloc& e)
+ {
+ return {};
+ }
+ }
+
+ /**
+ * @brief Like std::unordered_map.reserve but doesn't throw on out of memory errors.
+ *
+ * @param size The new capacity of the container. Same as std::unordered_map.reserve.
+ * @return true If the container was resized successfuly.
+ * @return false If the host has run out of memory
+ */
+ bool try_reserve(size_type size)
+ {
+ try
+ {
+ base::reserve(size);
+ return true;
+ }
+ catch(std::bad_alloc& e)
+ {
+ return false;
+ }
+ }
+
+ /**
+ * @brief Like std::unordered_map.rehash but doesn't throw on out of memory errors.
+ *
+ * @param count Number of buckets. Same as std::unordered_map.rehash.
+ * @return true If the container was rehashed successfuly.
+ * @return false If the host has run out of memory
+ */
+ bool try_rehash(size_type count)
+ {
+ try
+ {
+ base::rehash(count);
+ return true;
+ }
+ catch(std::bad_alloc& e)
+ {
+ return false;
+ }
+ }
+};
+}
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#pragma once
+
+#include <unordered_set>
+#include "custom_allocator.hpp"
+#include "optional.hpp"
+
+namespace util
+{
+/**
+ * @brief This utility class has the same purpose as std::unordered_set, but
+ * ensures that the operations that could result in out of memory
+ * exceptions don't throw them and also ensures that the memory can
+ * only be allocated by an custom_allocator.
+ */
+template <typename Key,
+ typename Hash = std::hash<Key>,
+ typename Comparator = std::equal_to<Key>,
+ typename Allocator = util::custom_allocator<Key>>
+class unordered_set : public std::unordered_set<Key, Hash, Comparator, Allocator>
+{
+ using value_type = Key;
+ using base = std::unordered_set<Key, Hash, Comparator, Allocator>;
+ using size_type = typename base::size_type;
+ using iterator = typename base::iterator;
+
+public:
+ /**
+ * Delete all member functions that can cause allocation failure by throwing std::bad_alloc.
+ */
+ unordered_set(const unordered_set &) = delete;
+ unordered_set &operator=(const unordered_set &) = delete;
+
+ void insert() = delete;
+ void emplace() = delete;
+ void emplace_hint() = delete;
+ void rehash() = delete;
+ void reserve() = delete;
+
+ /**
+ * @brief Construct a new unordered set object with a custom allocator.
+ *
+ * @param allocator The allocator that will be used.
+ */
+ explicit unordered_set(util::custom_allocator<Key> allocator)
+ : base(allocator)
+ {
+ }
+
+ /**
+ * @brief Like std::unordered_set.insert but doesn't throw on out of memory errors.
+ *
+ * @param value The value to insert in the map.
+ * @return util::optional<std::pair<iterator,bool>> If successful, the optional will
+ * contain the same return value as from std::unordered_set.insert, otherwise
+ * if out of memory, optional will be empty.
+ */
+ util::optional<std::pair<iterator, bool>> try_insert(const value_type &value) noexcept
+ {
+ try
+ {
+ return {base::insert(value)};
+ }
+ catch (const std::bad_alloc &e)
+ {
+ return {};
+ }
+ }
+
+ /**
+ * @brief Like std::unordered_set.reserve but doesn't throw on out of memory errors.
+ *
+ * @param size The new capacity of the container. Same as std::unordered_set.reserve.
+ * @return true If the container was resized successfuly.
+ * @return false If the host has run out of memory
+ */
+ bool try_reserve(size_type size)
+ {
+ try
+ {
+ base::reserve(size);
+ return true;
+ }
+ catch(std::bad_alloc& e)
+ {
+ return false;
+ }
+ }
+
+ /**
+ * @brief Like std::unordered_set.rehash but doesn't throw on out of memory errors.
+ *
+ * @param count Number of buckets. Same as std::unordered_set.rehash.
+ * @return true If the container was rehashed successfuly.
+ * @return false If the host has run out of memory
+ */
+ bool try_rehash(size_type count)
+ {
+ try
+ {
+ base::rehash(count);
+ return true;
+ }
+ catch(std::bad_alloc& e)
+ {
+ return false;
+ }
+ }
+};
+} // namespace util
\ No newline at end of file
, m_thread_sem_defined(false)
, m_first_present(true)
, m_pending_buffer_pool{ nullptr, 0, 0, 0 }
- , m_allocator(callbacks, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT)
+ , m_allocator(dev_data.get_allocator(), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT, callbacks)
, m_swapchain_images(m_allocator)
, m_surface(VK_NULL_HANDLE)
, m_present_mode(VK_PRESENT_MODE_IMMEDIATE_KHR)
*/
VkResult queue_present(VkQueue queue, const VkPresentInfoKHR *present_info, const uint32_t image_index);
+ /**
+ * @brief Get the allocator
+ *
+ * @return const util::allocator The allocator used in the swapchain
+ */
+ const util::allocator &get_allocator() const
+ {
+ return m_allocator;
+ }
+
protected:
layer::device_private_data &m_device_data;
* @retval VK_ERROR_OUT_OF_DEVICE_MEMORY Indicates the host went out of memory.
*/
static VkResult query_supported_formats(
- const VkSurfaceKHR surface, vk_format_set &vk_supported_formats)
+ const VkSurfaceKHR surface, vk_format_set &vk_supported_formats, const util::allocator& allocator)
{
const VkIcdSurfaceWayland *vk_surf = reinterpret_cast<VkIcdSurfaceWayland *>(surface);
wl_display *display = vk_surf->display;
return VK_ERROR_SURFACE_LOST_KHR;
}
- util::vector<drm_format_pair> drm_supported_formats(util::allocator::get_generic());
+ util::vector<drm_format_pair> drm_supported_formats{allocator};
const VkResult ret = get_supported_formats_and_modifiers(display, dmabuf_interface.get(), drm_supported_formats);
if (ret != VK_SUCCESS)
{
uint32_t *surfaceFormatCount, VkSurfaceFormatKHR *surfaceFormats)
{
vk_format_set formats;
- const auto query_res = query_supported_formats(surface, formats);
+
+ auto &instance = layer::instance_private_data::get(physical_device);
+ const auto query_res = query_supported_formats(surface, formats, instance.get_allocator());
if (query_res != VK_SUCCESS)
{
return query_res;
template <typename swapchain_type>
static swapchain_base *allocate_swapchain(layer::device_private_data &dev_data, const VkAllocationCallbacks *pAllocator)
{
- if (!pAllocator)
- {
- return new swapchain_type(dev_data, pAllocator);
- }
- void *memory = pAllocator->pfnAllocation(pAllocator->pUserData, sizeof(swapchain_type), alignof(swapchain_type),
- VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
- return new (memory) swapchain_type(dev_data, pAllocator);
+ util::allocator alloc{ dev_data.get_allocator(), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE, pAllocator };
+ return alloc.create<swapchain_type>(1, dev_data, pAllocator);
}
swapchain_base *allocate_surface_swapchain(VkSurfaceKHR surface, layer::device_private_data &dev_data,
return VK_SUCCESS;
}
-void destroy_surface_swapchain(swapchain_base *swapchain, const VkAllocationCallbacks *pAllocator)
+void destroy_surface_swapchain(swapchain_base *swapchain, layer::device_private_data &dev_data,
+ const VkAllocationCallbacks *pAllocator)
{
assert(swapchain);
- if (!pAllocator)
- {
- delete swapchain;
- }
- else
- {
- swapchain->~swapchain_base();
- pAllocator->pfnFree(pAllocator->pUserData, reinterpret_cast<void *>(swapchain));
- }
+ util::allocator alloc{ swapchain->get_allocator(), VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE, pAllocator };
+ alloc.destroy(1, swapchain);
}
PFN_vkVoidFunction get_proc_addr(const char *name)
* @brief Destroys a swapchain and frees memory. Used with @ref allocate_surface_swapchain.
*
* @param swapchain Pointer to the swapchain to destroy.
+ * @param dev_data The device specific data.
* @param pAllocator The allocator to use for freeing memory.
*/
-void destroy_surface_swapchain(swapchain_base *swapchain, const VkAllocationCallbacks *pAllocator);
+void destroy_surface_swapchain(swapchain_base *swapchain, layer::device_private_data &dev_data, const VkAllocationCallbacks *pAllocator);
/**
* @brief Return which platforms the layer can handle for an instance constructed in the specified way.