util/extension_list.cpp
util/log.cpp
wsi/swapchain_base.cpp
+ wsi/synchronization.cpp
wsi/wsi_factory.cpp
wsi/headless/surface_properties.cpp
wsi/headless/surface.cpp
OPTIONAL(DestroySurfaceKHR) \
OPTIONAL(GetPhysicalDeviceImageFormatProperties2KHR) \
OPTIONAL(GetPhysicalDeviceFormatProperties2KHR) \
- OPTIONAL(GetPhysicalDevicePresentRectanglesKHR)
+ OPTIONAL(GetPhysicalDevicePresentRectanglesKHR) \
+ OPTIONAL(GetPhysicalDeviceExternalFencePropertiesKHR)
struct instance_dispatch_table
{
OPTIONAL(BindImageMemory2KHR) \
OPTIONAL(GetDeviceGroupSurfacePresentModesKHR) \
OPTIONAL(GetDeviceGroupPresentCapabilitiesKHR) \
- OPTIONAL(AcquireNextImage2KHR)
+ OPTIONAL(AcquireNextImage2KHR) \
+ OPTIONAL(GetFenceFdKHR)
struct device_dispatch_table
{
--- /dev/null
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * @file
+ *
+ * @brief Contains the defintions of file descriptor utilities.
+ */
+
+#pragma once
+
+#include <unistd.h>
+#include <utility>
+
+namespace util
+{
+
+/**
+ * Manages a POSIX file descriptor.
+ */
+class fd_owner
+{
+public:
+
+ fd_owner() = default;
+ fd_owner(int fd)
+ : fd_handle{ fd }
+ {
+ }
+
+ fd_owner(const fd_owner &) = delete;
+ fd_owner &operator=(const fd_owner &) = delete;
+
+ fd_owner(fd_owner &&rhs)
+ {
+ *this = std::move(rhs);
+ }
+
+ fd_owner &operator=(fd_owner &&rhs)
+ {
+ std::swap(fd_handle, rhs.fd_handle);
+ return *this;
+ }
+
+ ~fd_owner()
+ {
+ if (is_valid())
+ {
+ close(fd_handle);
+ }
+ }
+
+ int get()
+ {
+ return fd_handle;
+ }
+
+ bool is_valid()
+ {
+ return fd_handle >= 0;
+ }
+
+private:
+ int fd_handle{ -1 };
+};
+
+} /* namespace util */
\ No newline at end of file
{
/* Device memory backing the image. */
VkDeviceMemory memory;
+ fence_sync present_fence;
};
swapchain::swapchain(layer::device_private_data &dev_data, const VkAllocationCallbacks *pAllocator)
}
/* Initialize presentation fence. */
- VkFenceCreateInfo fence_info = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0 };
- res = m_device_data.disp.CreateFence(m_device, &fence_info, nullptr, &image.present_fence);
- if (res != VK_SUCCESS)
+ auto present_fence = fence_sync::create(m_device_data);
+ if (!present_fence.has_value())
{
destroy_image(image);
- return res;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
}
+ data->present_fence = std::move(present_fence.value());
+
return res;
}
std::unique_lock<std::recursive_mutex> image_status_lock(m_image_status_mutex);
if (image.status != wsi::swapchain_image::INVALID)
{
- if (image.present_fence != VK_NULL_HANDLE)
- {
- m_device_data.disp.DestroyFence(m_device, image.present_fence, nullptr);
- image.present_fence = VK_NULL_HANDLE;
- }
-
if (image.image != VK_NULL_HANDLE)
{
m_device_data.disp.DestroyImage(m_device, image.image, get_allocation_callbacks());
}
+VkResult swapchain::image_set_present_payload(swapchain_image &image, VkQueue queue, const VkSemaphore *sem_payload,
+ uint32_t sem_count)
+{
+ auto data = reinterpret_cast<image_data *>(image.data);
+ return data->present_fence.set_payload(queue, sem_payload, sem_count);
+}
+
+VkResult swapchain::image_wait_present(swapchain_image &image, uint64_t timeout)
+{
+ auto data = reinterpret_cast<image_data *>(image.data);
+ return data->present_fence.wait_payload(timeout);
+}
+
} /* namespace headless */
} /* namespace wsi */
* @param image Handle to the image about to be released.
*/
void destroy_image(wsi::swapchain_image &image);
+
+ VkResult image_set_present_payload(swapchain_image &image, VkQueue queue, const VkSemaphore *sem_payload,
+ uint32_t sem_count) override;
+
+ VkResult image_wait_present(swapchain_image &image, uint64_t timeout) override;
};
} /* namespace headless */
auto pending_index = m_pending_buffer_pool.pop_front();
assert(pending_index.has_value());
- /* We wait for the fence of the oldest pending image to be signalled. */
- vk_res = m_device_data.disp.WaitForFences(m_device, 1, &sc_images[*pending_index].present_fence, VK_TRUE,
- timeout);
+ /* We may need to wait for the payload of the present sync of the oldest pending image to be finished. */
+ vk_res = image_wait_present(sc_images[*pending_index], timeout);
if (vk_res != VK_SUCCESS)
{
m_is_valid = false;
}
}
- /* When the semaphore that comes in is signalled, we know that all work is done. So, we do not
- * want to block any future Vulkan queue work on it. So, we pass in BOTTOM_OF_PIPE bit as the
- * wait flag.
- */
- VkPipelineStageFlags pipeline_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
-
- VkSubmitInfo submit_info = { VK_STRUCTURE_TYPE_SUBMIT_INFO,
- NULL,
- present_info->waitSemaphoreCount,
- present_info->pWaitSemaphores,
- &pipeline_stage_flags,
- 0,
- NULL,
- 0,
- NULL };
-
- assert(m_swapchain_images[image_index].status == swapchain_image::ACQUIRED);
- result = m_device_data.disp.ResetFences(m_device, 1, &m_swapchain_images[image_index].present_fence);
- if (result != VK_SUCCESS)
- {
- return result;
- }
-
- result = m_device_data.disp.QueueSubmit(queue, 1, &submit_info, m_swapchain_images[image_index].present_fence);
+ result = image_set_present_payload(m_swapchain_images[image_index], queue, present_info->pWaitSemaphores,
+ present_info->waitSemaphoreCount);
if (result != VK_SUCCESS)
{
return result;
#include <util/custom_allocator.hpp>
#include <util/ring_buffer.hpp>
#include "surface_properties.hpp"
+#include "wsi/synchronization.hpp"
namespace wsi
{
VkImage image{VK_NULL_HANDLE};
status status{swapchain_image::INVALID};
-
- VkFence present_fence{VK_NULL_HANDLE};
};
/**
return VK_SUCCESS;
}
+ /**
+ * @brief Sets the present payload for a swapchain image.
+ *
+ * @param[in] image The swapchain image for which to set a present payload.
+ * @param queue A Vulkan queue that can be used for any Vulkan commands needed.
+ * @param[in] sem_payload Array of Vulkan semaphores that constitute the payload.
+ * @param sem_count Number of elements in @p sem_payload
+ *
+ * @return VK_SUCCESS on success or an error code otherwise.
+ */
+ virtual VkResult image_set_present_payload(swapchain_image &image, VkQueue queue, const VkSemaphore *sem_payload,
+ uint32_t sem_count) = 0;
+
+ /**
+ * @brief Waits for the present payload of an image if necessary.
+ *
+ * If the page flip thread needs to wait for the image present synchronization payload the WSI implemention can block
+ * and wait in this call. Otherwise the function should return successfully without blocking.
+ *
+ * @param[in] image The swapchain image for which the function may need to wait until the presentat payload has
+ * finished.
+ * @param timeout Timeout for any wait in nanoseconds.
+ *
+ * @return VK_SUCCESS if waiting was successful or unnecessary. An error code otherwise.
+ */
+ virtual VkResult image_wait_present(swapchain_image &image, uint64_t timeout) = 0;
+
private:
/**
* @brief Wait for a buffer to become free.
--- /dev/null
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * @file
+ *
+ * @brief Contains the implementation for WSI synchronization primitives.
+ */
+
+#include "synchronization.hpp"
+#include "layer/private_data.hpp"
+
+namespace wsi
+{
+
+fence_sync::fence_sync(layer::device_private_data &device, VkFence vk_fence)
+ : fence{ vk_fence }
+ , has_payload{ false }
+ , dev{ &device }
+{
+}
+
+util::optional<fence_sync> fence_sync::create(layer::device_private_data &device)
+{
+ VkFence fence{ VK_NULL_HANDLE };
+ VkFenceCreateInfo fence_info{ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0 };
+ VkResult res =
+ device.disp.CreateFence(device.device, &fence_info, device.get_allocator().get_original_callbacks(), &fence);
+ if (res != VK_SUCCESS)
+ {
+ return {};
+ }
+ return fence_sync(device, fence);
+}
+
+fence_sync::fence_sync(fence_sync &&rhs)
+{
+ *this = std::move(rhs);
+}
+
+fence_sync &fence_sync::operator=(fence_sync &&rhs)
+{
+ std::swap(fence, rhs.fence);
+ std::swap(has_payload, rhs.has_payload);
+ std::swap(payload_finished, rhs.payload_finished);
+ std::swap(dev, rhs.dev);
+ return *this;
+}
+
+fence_sync::~fence_sync()
+{
+ if (fence != VK_NULL_HANDLE)
+ {
+ wait_payload(UINT64_MAX);
+ dev->disp.DestroyFence(dev->device, fence, dev->get_allocator().get_original_callbacks());
+ }
+}
+
+VkResult fence_sync::wait_payload(uint64_t timeout)
+{
+ VkResult res = VK_SUCCESS;
+ if (has_payload && !payload_finished)
+ {
+ res = dev->disp.WaitForFences(dev->device, 1, &fence, VK_TRUE, timeout);
+ if (res == VK_SUCCESS)
+ {
+ payload_finished = true;
+ }
+ }
+ return res;
+}
+
+VkResult fence_sync::set_payload(VkQueue queue, const VkSemaphore *sem_payload, uint32_t sem_count)
+{
+ VkResult result = dev->disp.ResetFences(dev->device, 1, &fence);
+ if (result != VK_SUCCESS)
+ {
+ return result;
+ }
+ has_payload = false;
+ /* When the semaphore that comes in is signalled, we know that all work is done. So, we do not
+ * want to block any future Vulkan queue work on it. So, we pass in BOTTOM_OF_PIPE bit as the
+ * wait flag.
+ */
+ VkPipelineStageFlags pipeline_stage_flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+
+ VkSubmitInfo submit_info = {
+ VK_STRUCTURE_TYPE_SUBMIT_INFO, NULL, sem_count, sem_payload, &pipeline_stage_flags, 0, NULL, 0, NULL
+ };
+
+ result = dev->disp.QueueSubmit(queue, 1, &submit_info, fence);
+ if (result == VK_SUCCESS)
+ {
+ has_payload = true;
+ payload_finished = false;
+ }
+ return result;
+}
+
+bool fence_sync::swap_payload(bool new_payload)
+{
+ bool old_payload = has_payload;
+ has_payload = new_payload;
+ payload_finished = false;
+ return old_payload;
+}
+
+sync_fd_fence_sync::sync_fd_fence_sync(layer::device_private_data &device, VkFence vk_fence)
+ : fence_sync{ device, vk_fence }
+{
+}
+
+bool sync_fd_fence_sync::is_supported(layer::instance_private_data &instance, VkPhysicalDevice phys_dev)
+{
+ VkPhysicalDeviceExternalFenceInfoKHR external_fence_info = {};
+ external_fence_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO;
+ external_fence_info.handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
+ VkExternalFencePropertiesKHR fence_properties = {};
+ fence_properties.sType = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES;
+ instance.disp.GetPhysicalDeviceExternalFencePropertiesKHR(phys_dev, &external_fence_info, &fence_properties);
+ return fence_properties.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR;
+}
+
+util::optional<sync_fd_fence_sync> sync_fd_fence_sync::create(layer::device_private_data &device)
+{
+ VkExportFenceCreateInfo export_fence_create_info = {};
+ export_fence_create_info.sType = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO;
+ export_fence_create_info.handleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
+ VkFenceCreateInfo fence_info = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &export_fence_create_info, 0 };
+ VkFence fence = VK_NULL_HANDLE;
+ VkResult res =
+ device.disp.CreateFence(device.device, &fence_info, device.get_allocator().get_original_callbacks(), &fence);
+ if (res != VK_SUCCESS)
+ {
+ return {};
+ }
+ return sync_fd_fence_sync{ device, fence };
+}
+
+util::optional<util::fd_owner> sync_fd_fence_sync::export_sync_fd()
+{
+ int exported_fd = -1;
+ VkFenceGetFdInfoKHR fence_fd_info = {};
+ fence_fd_info.sType = VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR;
+ fence_fd_info.fence = get_fence();
+ fence_fd_info.handleType = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT;
+
+ VkResult result = get_device().disp.GetFenceFdKHR(get_device().device, &fence_fd_info, &exported_fd);
+ if (result == VK_SUCCESS)
+ {
+ swap_payload(false);
+ return util::fd_owner(exported_fd);
+ }
+ return {};
+}
+
+} /* namespace wsi */
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2021 Arm Limited.
+ *
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+/**
+ * @file
+ *
+ * @brief Contains the defintions of WSI synchronization primitives.
+ */
+
+#pragma once
+
+#include "util/file_descriptor.hpp"
+#include "util/optional.hpp"
+
+#include <vulkan/vulkan.h>
+
+namespace layer
+{
+ class device_private_data;
+ class instance_private_data;
+} /* namespace layer */
+
+namespace wsi
+{
+
+/**
+ * Synchronization using a Vulkan Fence object.
+ */
+class fence_sync
+{
+public:
+ /**
+ * Creates a new fence synchronization object.
+ *
+ * @param device The device private data for which to create it.
+ *
+ * @return Empty optional on failure or initialized fence.
+ */
+ static util::optional<fence_sync> create(layer::device_private_data &device);
+
+ /** Default constructor provided for use with @ref util::optional */
+ fence_sync() = default;
+ fence_sync(const fence_sync &) = delete;
+ fence_sync &operator=(const fence_sync &) = delete;
+
+ fence_sync(fence_sync &&rhs);
+ fence_sync &operator=(fence_sync &&rhs);
+
+ virtual ~fence_sync();
+
+ /**
+ * Waits for any pending payload to complete execution.
+ *
+ * @note This method is not threadsafe.
+ *
+ * @param timeout Timeout for waiting in nanoseconds.
+ *
+ * @return VK_SUCCESS on success or if no payload or a completed payload is set.
+ * Other error code on failure or timeout.
+ */
+ VkResult wait_payload(uint64_t timeout);
+
+ /**
+ * Sets the payload for the fence that would need to complete before operations that wait on it.
+ *
+ * @note This method is not threadsafe.
+ *
+ * @param queue The Vulkan queue that may be used to submit synchronization commands.
+ * @param[in] sem_payload Array of Vulkan Semaphores that comprise the payload.
+ * @param sem_count Number of elements in @p sem_payload.
+ *
+ * @return VK_SUCCESS on success or other error code on failing to set the payload.
+ */
+ VkResult set_payload(VkQueue queue, const VkSemaphore *sem_payload, uint32_t sem_count);
+
+protected:
+ /**
+ * Non-public constructor to initialize the object with valid data.
+ *
+ * @param device The device private data for the fence.
+ * @param vk_fence The created Vulkan fence.
+ */
+ fence_sync(layer::device_private_data &device, VkFence vk_fence);
+
+ VkFence get_fence()
+ {
+ return fence;
+ }
+
+ /**
+ * Swaps current payload. This operation could be performed when exporting or importing external fences.
+ *
+ * @param new_payload Whether a new payload is set.
+ *
+ * @return If there is an existing payload that is being replaced.
+ */
+ bool swap_payload(bool new_payload);
+
+ layer::device_private_data &get_device()
+ {
+ return *dev;
+ }
+
+private:
+ VkFence fence{ VK_NULL_HANDLE };
+ bool has_payload{ false };
+ bool payload_finished{ false };
+ layer::device_private_data *dev{ nullptr };
+};
+
+/**
+ * Synchronization using a Vulkan fence exportable to a native Sync FD object.
+ */
+class sync_fd_fence_sync : public fence_sync
+{
+public:
+ /** Default constructor provided for use with @ref util::optional */
+ sync_fd_fence_sync() = default;
+
+ /**
+ * Checks if a Vulkan device can support Sync FD fences.
+ *
+ * @param instance The instance private data for the physical device.
+ * @param phys_dev The physical device to check support for.
+ *
+ * @return true if supported, false otherwise.
+ */
+ static bool is_supported(layer::instance_private_data &instance, VkPhysicalDevice phys_dev);
+
+ /**
+ * Creates a new fence compatible with Sync FD.
+ *
+ * @param device The device private data for which to create the fence.
+ *
+ * @return Empty optional on failure or initialized fence.
+ */
+ static util::optional<sync_fd_fence_sync> create(layer::device_private_data &device);
+
+ /**
+ * Exports the fence to a native Sync FD.
+ *
+ * @note This method is not threadsafe.
+ *
+ * @return The exported Sync FD on success or empty optional on failure.
+ */
+ util::optional<util::fd_owner> export_sync_fd();
+
+private:
+ /**
+ * Non-public constructor to initialize the object with valid data.
+ *
+ * @param device The device private data for the fence.
+ * @param vk_fence The created exportable Vulkan fence.
+ */
+ sync_fd_fence_sync(layer::device_private_data &device, VkFence vk_fence);
+};
+
+} /* namespace wsi */
\ No newline at end of file
{
}
+void surface_registry_handler(void *data, struct wl_registry *wl_registry, uint32_t name, const char *interface,
+ uint32_t version)
+{
+ auto wsi_surface = reinterpret_cast<wsi::wayland::surface *>(data);
+
+ if (!strcmp(interface, zwp_linux_dmabuf_v1_interface.name) && version >= ZWP_LINUX_DMABUF_V1_MODIFIER_SINCE_VERSION)
+ {
+ zwp_linux_dmabuf_v1 *dmabuf_interface_obj = reinterpret_cast<zwp_linux_dmabuf_v1 *>(wl_registry_bind(
+ wl_registry, name, &zwp_linux_dmabuf_v1_interface, ZWP_LINUX_DMABUF_V1_MODIFIER_SINCE_VERSION));
+
+ if (dmabuf_interface_obj == nullptr)
+ {
+ WSI_LOG_ERROR("Failed to get zwp_linux_dmabuf_v1 interface.");
+ return;
+ }
+
+ wsi_surface->dmabuf_interface.reset(dmabuf_interface_obj);
+ }
+ else if (!strcmp(interface, zwp_linux_explicit_synchronization_v1_interface.name))
+ {
+ zwp_linux_explicit_synchronization_v1 *explicit_sync_interface_obj =
+ reinterpret_cast<zwp_linux_explicit_synchronization_v1 *>(
+ wl_registry_bind(wl_registry, name, &zwp_linux_explicit_synchronization_v1_interface, 1));
+
+ if (explicit_sync_interface_obj == nullptr)
+ {
+ WSI_LOG_ERROR("Failed to get zwp_linux_explicit_synchronization_v1 interface.");
+ return;
+ }
+
+ wsi_surface->explicit_sync_interface.reset(explicit_sync_interface_obj);
+ }
+}
+
bool surface::init()
{
surface_queue = wl_display_create_queue(wayland_display);
return false;
};
- auto registry = registry_owner{ wl_display_get_registry(display_proxy.get()) };
+ auto registry = wayland_owner<wl_registry>{ wl_display_get_registry(display_proxy.get()) };
if (registry == nullptr)
{
WSI_LOG_ERROR("Failed to get wl display registry.");
return false;
}
- const wl_registry_listener registry_listener = { registry_handler };
- int res = wl_registry_add_listener(registry.get(), ®istry_listener, &dmabuf_interface);
+ const wl_registry_listener registry_listener = { surface_registry_handler };
+ int res = wl_registry_add_listener(registry.get(), ®istry_listener, this);
if (res < 0)
{
WSI_LOG_ERROR("Failed to add registry listener.");
return false;
}
+ if (explicit_sync_interface.get() == nullptr)
+ {
+ WSI_LOG_ERROR("Failed to obtain zwp_linux_explicit_synchronization_v1 interface.");
+ return false;
+ }
+
+ auto surface_sync_obj =
+ zwp_linux_explicit_synchronization_v1_get_synchronization(explicit_sync_interface.get(), wayland_surface);
+ if (surface_sync_obj == nullptr)
+ {
+ WSI_LOG_ERROR("Failed to retrieve surface synchronization interface");
+ return false;
+ }
+
+ surface_sync_interface.reset(surface_sync_obj);
+
VkResult vk_res =
get_supported_formats_and_modifiers(wayland_display, surface_queue, dmabuf_interface.get(), supported_formats);
if (vk_res != VK_SUCCESS)
uint64_t modifier;
};
+/**
+ * Wayland callback for global wl_registry events to handle global objects required by @ref wsi::wayland::surface
+ */
+extern "C" void surface_registry_handler(void *data, struct wl_registry *wl_registry, uint32_t name,
+ const char *interface, uint32_t version);
+
class surface : public wsi::surface
{
public:
return dmabuf_interface.get();
}
+ /**
+ * @brief Returns a pointer to the Wayland zwp_linux_surface_synchronization_v1 interface obtained for the wayland
+ * surface.
+ *
+ * The raw pointer is valid for the lifetime of the surface.
+ */
+ zwp_linux_surface_synchronization_v1* get_surface_sync_interface()
+ {
+ return surface_sync_interface.get();
+ }
+
/**
* @brief Returns a reference to a list of DRM formats supported by the Wayland surface.
*
*/
bool init();
+ friend void surface_registry_handler(void *data, struct wl_registry *wl_registry, uint32_t name,
+ const char *interface, uint32_t version);
+
/** The native Wayland display */
wl_display *wayland_display;
/** The native Wayland surface */
surface_properties properties;
/** Container for the zwp_linux_dmabuf_v1 interface binding */
- zwp_linux_dmabuf_v1_owner dmabuf_interface;
+ wayland_owner<zwp_linux_dmabuf_v1> dmabuf_interface;
+
+ /** Container for the zwp_linux_explicit_synchronization_v1 interface binding */
+ wayland_owner<zwp_linux_explicit_synchronization_v1> explicit_sync_interface;
+ /** Container for the surface specific zwp_linux_surface_synchronization_v1 interface. */
+ wayland_owner<zwp_linux_surface_synchronization_v1> surface_sync_interface;
+
/** Private queue for surface events generated by the layer */
wl_event_queue *surface_queue;
};
VkResult surface_properties::get_surface_formats(VkPhysicalDevice physical_device, VkSurfaceKHR surface,
uint32_t *surfaceFormatCount, VkSurfaceFormatKHR *surfaceFormats)
{
- auto &instance = layer::instance_private_data::get(physical_device);
-
assert(specific_surface);
if (!supported_formats.size())
{
VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
+ VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME,
+ VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME,
};
VkResult surface_properties::get_required_device_extensions(util::extension_list &extension_list)
VkBool32 GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physical_device, uint32_t queue_index,
struct wl_display *display)
{
+ bool dev_supports_sync =
+ sync_fd_fence_sync::is_supported(layer::instance_private_data::get(physical_device), physical_device);
+ if (!dev_supports_sync)
+ {
+ return VK_FALSE;
+ }
+
return VK_TRUE;
}
VkDeviceMemory memory[MAX_PLANES];
uint32_t num_planes;
+
+ sync_fd_fence_sync present_fence;
};
swapchain::swapchain(layer::device_private_data &dev_data, const VkAllocationCallbacks *pAllocator,
}
/* Initialize presentation fence. */
- VkFenceCreateInfo fenceInfo = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0 };
- result = m_device_data.disp.CreateFence(m_device, &fenceInfo, get_allocation_callbacks(), &image.present_fence);
- if (result != VK_SUCCESS)
+ auto present_fence = sync_fd_fence_sync::create(m_device_data);
+ if (!present_fence.has_value())
{
destroy_image(image);
- return result;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
}
+ image_data->present_fence = std::move(present_fence.value());
return VK_SUCCESS;
}
}
wl_surface_attach(m_surface, image_data->buffer, 0, 0);
+
+ auto present_sync_fd = image_data->present_fence.export_sync_fd();
+ if (!present_sync_fd.has_value())
+ {
+ WSI_LOG_ERROR("Failed to export present fence.");
+ m_is_valid = false;
+ }
+ else if (present_sync_fd->is_valid())
+ {
+ zwp_linux_surface_synchronization_v1_set_acquire_fence(m_wsi_surface->get_surface_sync_interface(),
+ present_sync_fd->get());
+ }
+
/* TODO: work out damage */
wl_surface_damage(m_surface, 0, 0, INT32_MAX, INT32_MAX);
if (image.status != swapchain_image::INVALID)
{
- if (image.present_fence != VK_NULL_HANDLE)
- {
- m_device_data.disp.DestroyFence(m_device, image.present_fence, get_allocation_callbacks());
- image.present_fence = VK_NULL_HANDLE;
- }
-
if (image.image != VK_NULL_HANDLE)
{
m_device_data.disp.DestroyImage(m_device, image.image, get_allocation_callbacks());
}
}
+VkResult swapchain::image_set_present_payload(swapchain_image &image, VkQueue queue, const VkSemaphore *sem_payload,
+ uint32_t sem_count)
+{
+ auto image_data = reinterpret_cast<wayland_image_data *>(image.data);
+ return image_data->present_fence.set_payload(queue, sem_payload, sem_count);
+}
+
+VkResult swapchain::image_wait_present(swapchain_image &, uint64_t)
+{
+ /* With explicit sync in use there is no need to wait for the present sync before submiting the image to the
+ * compositor. */
+ return VK_SUCCESS;
+}
+
} // namespace wayland
} // namespace wsi
*/
VkResult get_free_buffer(uint64_t *timeout) override;
+ VkResult image_set_present_payload(swapchain_image &image, VkQueue queue, const VkSemaphore *sem_payload,
+ uint32_t sem_count) override;
+
+ VkResult image_wait_present(swapchain_image &image, uint64_t timeout) override;
+
private:
struct wayland_image_data;
extern "C" {
- void registry_handler(void *data, struct wl_registry *wl_registry, uint32_t name, const char *interface,
- uint32_t version)
- {
- auto dmabuf_interface = reinterpret_cast<wsi::wayland::zwp_linux_dmabuf_v1_owner* >(data);
-
- if (!strcmp(interface, "zwp_linux_dmabuf_v1"))
- {
- version = ZWP_LINUX_DMABUF_V1_MODIFIER_SINCE_VERSION;
- zwp_linux_dmabuf_v1 *dmabuf_interface_obj =
- reinterpret_cast<zwp_linux_dmabuf_v1 *>(wl_registry_bind(
- wl_registry, name, &zwp_linux_dmabuf_v1_interface, version));
-
- if (dmabuf_interface_obj == nullptr)
- {
- WSI_LOG_ERROR("Failed to get zwp_linux_dmabuf_v1 interface.");
- return;
- }
-
- dmabuf_interface->reset(dmabuf_interface_obj);
- }
- }
-
int dispatch_queue(struct wl_display *display, struct wl_event_queue *queue, int timeout)
{
int err;
#include <stdint.h>
#include <wayland-client.h>
-#include <linux-dmabuf-unstable-v1-client-protocol.h>
#include "util/custom_allocator.hpp"
extern "C" {
- void registry_handler(void *data, struct wl_registry *wl_registry, uint32_t name, const char *interface,
- uint32_t version);
-
/**
* @brief Dispatch events from a Wayland event queue
*
#include <wayland-client.h>
#include <linux-dmabuf-unstable-v1-client-protocol.h>
+#include <linux-explicit-synchronization-unstable-v1-protocol.h>
#include <memory.h>
+#include <functional>
namespace wsi
{
namespace wayland
{
-struct registry_deleter
+static inline void wayland_object_destroy(wl_registry *obj)
{
- void operator()(wl_registry* obj) const
- {
- if (obj != nullptr)
- {
- wl_registry_destroy(obj);
- }
- }
-};
+ wl_registry_destroy(obj);
+}
-struct dmabuf_deleter
+static inline void wayland_object_destroy(zwp_linux_dmabuf_v1 *obj)
+{
+ zwp_linux_dmabuf_v1_destroy(obj);
+}
+
+static inline void wayland_object_destroy(zwp_linux_explicit_synchronization_v1 *obj)
+{
+ zwp_linux_explicit_synchronization_v1_destroy(obj);
+}
+
+static inline void wayland_object_destroy(zwp_linux_surface_synchronization_v1 *obj)
+{
+ zwp_linux_surface_synchronization_v1_destroy(obj);
+}
+
+template <typename T>
+struct wayland_deleter
{
- void operator()(zwp_linux_dmabuf_v1* obj) const
+ void operator()(T *obj) const
{
if (obj != nullptr)
{
- zwp_linux_dmabuf_v1_destroy(obj);
+ wayland_object_destroy(obj);
}
}
};
-using registry_owner = std::unique_ptr<wl_registry, registry_deleter>;
-using zwp_linux_dmabuf_v1_owner = std::unique_ptr<zwp_linux_dmabuf_v1, dmabuf_deleter>;
+template<typename T>
+using wayland_owner = std::unique_ptr<T, wayland_deleter<T>>;
template <typename T>
static std::unique_ptr<T, std::function<void(T *)>> make_proxy_with_queue(T *object, wl_event_queue *queue)