2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
28 #include <xcb/present.h>
31 #include "util/macros.h"
32 #include <stdatomic.h>
41 #include "drm-uapi/drm_fourcc.h"
42 #include "util/hash_table.h"
43 #include "util/os_file.h"
44 #include "util/os_time.h"
45 #include "util/u_debug.h"
46 #include "util/u_thread.h"
47 #include "util/xmlconfig.h"
49 #include "vk_instance.h"
50 #include "vk_physical_device.h"
52 #include "vk_enum_to_str.h"
53 #include "wsi_common_entrypoints.h"
54 #include "wsi_common_private.h"
55 #include "wsi_common_queue.h"
62 struct wsi_x11_connection {
64 bool has_dri3_modifiers;
66 bool is_proprietary_x11;
73 struct wsi_interface base;
75 pthread_mutex_t mutex;
76 /* Hash table of xcb_connection -> wsi_x11_connection mappings */
77 struct hash_table *connections;
82 * Wrapper around xcb_dri3_open. Returns the opened fd or -1 on error.
85 wsi_dri3_open(xcb_connection_t *conn,
89 xcb_dri3_open_cookie_t cookie;
90 xcb_dri3_open_reply_t *reply;
93 cookie = xcb_dri3_open(conn,
97 reply = xcb_dri3_open_reply(conn, cookie, NULL);
101 /* According to DRI3 extension nfd must equal one. */
102 if (reply->nfd != 1) {
107 fd = xcb_dri3_open_reply_fds(conn, reply)[0];
109 fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
115 * Checks compatibility of the device wsi_dev with the device the X server
118 * This returns true when no device could be retrieved from the X server or when
119 * the information for the X server device indicate that it is the same device.
122 wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
123 xcb_connection_t *conn)
125 xcb_screen_iterator_t screen_iter =
126 xcb_setup_roots_iterator(xcb_get_setup(conn));
127 xcb_screen_t *screen = screen_iter.data;
129 /* Open the DRI3 device from the X server. If we do not retrieve one we
130 * assume our local device is compatible.
132 int dri3_fd = wsi_dri3_open(conn, screen->root, None);
136 bool match = wsi_device_matches_drm_fd(wsi_dev, dri3_fd);
144 wsi_x11_detect_xwayland(xcb_connection_t *conn)
146 xcb_randr_query_version_cookie_t ver_cookie =
147 xcb_randr_query_version_unchecked(conn, 1, 3);
148 xcb_randr_query_version_reply_t *ver_reply =
149 xcb_randr_query_version_reply(conn, ver_cookie, NULL);
150 bool has_randr_v1_3 = ver_reply && (ver_reply->major_version > 1 ||
151 ver_reply->minor_version >= 3);
157 const xcb_setup_t *setup = xcb_get_setup(conn);
158 xcb_screen_iterator_t iter = xcb_setup_roots_iterator(setup);
160 xcb_randr_get_screen_resources_current_cookie_t gsr_cookie =
161 xcb_randr_get_screen_resources_current_unchecked(conn, iter.data->root);
162 xcb_randr_get_screen_resources_current_reply_t *gsr_reply =
163 xcb_randr_get_screen_resources_current_reply(conn, gsr_cookie, NULL);
165 if (!gsr_reply || gsr_reply->num_outputs == 0) {
170 xcb_randr_output_t *randr_outputs =
171 xcb_randr_get_screen_resources_current_outputs(gsr_reply);
172 xcb_randr_get_output_info_cookie_t goi_cookie =
173 xcb_randr_get_output_info(conn, randr_outputs[0], gsr_reply->config_timestamp);
176 xcb_randr_get_output_info_reply_t *goi_reply =
177 xcb_randr_get_output_info_reply(conn, goi_cookie, NULL);
182 char *output_name = (char*)xcb_randr_get_output_info_name(goi_reply);
183 bool is_xwayland = output_name && strncmp(output_name, "XWAYLAND", 8) == 0;
189 static struct wsi_x11_connection *
190 wsi_x11_connection_create(struct wsi_device *wsi_dev,
191 xcb_connection_t *conn)
193 xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie,
194 amd_cookie, nv_cookie, shm_cookie, sync_cookie,
196 xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply,
197 *amd_reply, *nv_reply, *shm_reply = NULL,
199 bool wants_shm = wsi_dev->sw && !(WSI_DEBUG & WSI_DEBUG_NOSHM) &&
200 wsi_dev->has_import_memory_host;
201 bool has_dri3_v1_2 = false;
202 bool has_present_v1_2 = false;
204 struct wsi_x11_connection *wsi_conn =
205 vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
206 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
210 sync_cookie = xcb_query_extension(conn, 4, "SYNC");
211 dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
212 pres_cookie = xcb_query_extension(conn, 7, "Present");
213 randr_cookie = xcb_query_extension(conn, 5, "RANDR");
214 xfixes_cookie = xcb_query_extension(conn, 6, "XFIXES");
217 shm_cookie = xcb_query_extension(conn, 7, "MIT-SHM");
219 /* We try to be nice to users and emit a warning if they try to use a
220 * Vulkan application on a system without DRI3 enabled. However, this ends
221 * up spewing the warning when a user has, for example, both Intel
222 * integrated graphics and a discrete card with proprietary drivers and are
223 * running on the discrete card with the proprietary DDX. In this case, we
224 * really don't want to print the warning because it just confuses users.
225 * As a heuristic to detect this case, we check for a couple of proprietary
228 amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
229 nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
231 xcb_discard_reply(conn, sync_cookie.sequence);
232 dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
233 pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
234 randr_reply = xcb_query_extension_reply(conn, randr_cookie, NULL);
235 amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
236 nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
237 xfixes_reply = xcb_query_extension_reply(conn, xfixes_cookie, NULL);
239 shm_reply = xcb_query_extension_reply(conn, shm_cookie, NULL);
240 if (!dri3_reply || !pres_reply || !xfixes_reply) {
249 vk_free(&wsi_dev->instance_alloc, wsi_conn);
253 wsi_conn->has_dri3 = dri3_reply->present != 0;
254 #ifdef HAVE_DRI3_MODIFIERS
255 if (wsi_conn->has_dri3) {
256 xcb_dri3_query_version_cookie_t ver_cookie;
257 xcb_dri3_query_version_reply_t *ver_reply;
259 ver_cookie = xcb_dri3_query_version(conn, 1, 2);
260 ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
261 has_dri3_v1_2 = ver_reply != NULL &&
262 (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
267 wsi_conn->has_present = pres_reply->present != 0;
268 #ifdef HAVE_DRI3_MODIFIERS
269 if (wsi_conn->has_present) {
270 xcb_present_query_version_cookie_t ver_cookie;
271 xcb_present_query_version_reply_t *ver_reply;
273 ver_cookie = xcb_present_query_version(conn, 1, 2);
274 ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
276 (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
281 wsi_conn->has_xfixes = xfixes_reply->present != 0;
282 if (wsi_conn->has_xfixes) {
283 xcb_xfixes_query_version_cookie_t ver_cookie;
284 xcb_xfixes_query_version_reply_t *ver_reply;
286 ver_cookie = xcb_xfixes_query_version(conn, 6, 0);
287 ver_reply = xcb_xfixes_query_version_reply(conn, ver_cookie, NULL);
288 wsi_conn->has_xfixes = (ver_reply->major_version >= 2);
292 if (randr_reply && randr_reply->present != 0)
293 wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn);
295 wsi_conn->is_xwayland = false;
297 wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
298 wsi_conn->is_proprietary_x11 = false;
299 if (amd_reply && amd_reply->present)
300 wsi_conn->is_proprietary_x11 = true;
301 if (nv_reply && nv_reply->present)
302 wsi_conn->is_proprietary_x11 = true;
304 wsi_conn->has_mit_shm = false;
305 if (wsi_conn->has_dri3 && wsi_conn->has_present && wants_shm) {
306 bool has_mit_shm = shm_reply->present != 0;
308 xcb_shm_query_version_cookie_t ver_cookie;
309 xcb_shm_query_version_reply_t *ver_reply;
311 ver_cookie = xcb_shm_query_version(conn);
312 ver_reply = xcb_shm_query_version_reply(conn, ver_cookie, NULL);
314 has_mit_shm = ver_reply->shared_pixmaps;
316 xcb_void_cookie_t cookie;
317 xcb_generic_error_t *error;
320 cookie = xcb_shm_detach_checked(conn, 0);
321 if ((error = xcb_request_check(conn, cookie))) {
322 if (error->error_code != BadRequest)
323 wsi_conn->has_mit_shm = true;
341 wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
342 struct wsi_x11_connection *conn)
344 vk_free(&wsi_dev->instance_alloc, conn);
348 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
350 if (wsi_conn->has_dri3)
352 if (!wsi_conn->is_proprietary_x11) {
353 fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
354 "Note: you can probably enable DRI3 in your Xorg config\n");
360 * Get internal struct representing an xcb_connection_t.
362 * This can allocate the struct but the caller does not own the struct. It is
363 * deleted on wsi_x11_finish_wsi by the hash table it is inserted.
365 * If the allocation fails NULL is returned.
367 static struct wsi_x11_connection *
368 wsi_x11_get_connection(struct wsi_device *wsi_dev,
369 xcb_connection_t *conn)
371 struct wsi_x11 *wsi =
372 (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
374 pthread_mutex_lock(&wsi->mutex);
376 struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
378 /* We're about to make a bunch of blocking calls. Let's drop the
379 * mutex for now so we don't block up too badly.
381 pthread_mutex_unlock(&wsi->mutex);
383 struct wsi_x11_connection *wsi_conn =
384 wsi_x11_connection_create(wsi_dev, conn);
388 pthread_mutex_lock(&wsi->mutex);
390 entry = _mesa_hash_table_search(wsi->connections, conn);
392 /* Oops, someone raced us to it */
393 wsi_x11_connection_destroy(wsi_dev, wsi_conn);
395 entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
399 pthread_mutex_unlock(&wsi->mutex);
404 struct surface_format {
406 unsigned bits_per_rgb;
409 static const struct surface_format formats[] = {
410 { VK_FORMAT_B8G8R8A8_SRGB, 8 },
411 { VK_FORMAT_B8G8R8A8_UNORM, 8 },
412 { VK_FORMAT_A2R10G10B10_UNORM_PACK32, 10 },
415 static const VkPresentModeKHR present_modes[] = {
416 VK_PRESENT_MODE_IMMEDIATE_KHR,
417 VK_PRESENT_MODE_MAILBOX_KHR,
418 VK_PRESENT_MODE_FIFO_KHR,
419 VK_PRESENT_MODE_FIFO_RELAXED_KHR,
422 static xcb_screen_t *
423 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
425 xcb_screen_iterator_t screen_iter =
426 xcb_setup_roots_iterator(xcb_get_setup(conn));
428 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
429 if (screen_iter.data->root == root)
430 return screen_iter.data;
436 static xcb_visualtype_t *
437 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
440 xcb_depth_iterator_t depth_iter =
441 xcb_screen_allowed_depths_iterator(screen);
443 for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
444 xcb_visualtype_iterator_t visual_iter =
445 xcb_depth_visuals_iterator (depth_iter.data);
447 for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
448 if (visual_iter.data->visual_id == visual_id) {
450 *depth = depth_iter.data->depth;
451 return visual_iter.data;
459 static xcb_visualtype_t *
460 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id)
462 xcb_screen_iterator_t screen_iter =
463 xcb_setup_roots_iterator(xcb_get_setup(conn));
465 /* For this we have to iterate over all of the screens which is rather
466 * annoying. Fortunately, there is probably only 1.
468 for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
469 xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
478 static xcb_visualtype_t *
479 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
482 xcb_query_tree_cookie_t tree_cookie;
483 xcb_get_window_attributes_cookie_t attrib_cookie;
484 xcb_query_tree_reply_t *tree;
485 xcb_get_window_attributes_reply_t *attrib;
487 tree_cookie = xcb_query_tree(conn, window);
488 attrib_cookie = xcb_get_window_attributes(conn, window);
490 tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
491 attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
492 if (attrib == NULL || tree == NULL) {
498 xcb_window_t root = tree->root;
499 xcb_visualid_t visual_id = attrib->visual;
503 xcb_screen_t *screen = get_screen_for_root(conn, root);
507 return screen_get_visualtype(screen, visual_id, depth);
511 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
513 uint32_t rgb_mask = visual->red_mask |
517 uint32_t all_mask = 0xffffffff >> (32 - depth);
519 /* Do we have bits left over after RGB? */
520 return (all_mask & ~rgb_mask) != 0;
524 visual_supported(xcb_visualtype_t *visual)
529 return visual->bits_per_rgb_value == 8 || visual->bits_per_rgb_value == 10;
532 VKAPI_ATTR VkBool32 VKAPI_CALL
533 wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
534 uint32_t queueFamilyIndex,
535 xcb_connection_t *connection,
536 xcb_visualid_t visual_id)
538 VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
539 struct wsi_device *wsi_device = pdevice->wsi_device;
540 struct wsi_x11_connection *wsi_conn =
541 wsi_x11_get_connection(wsi_device, connection);
546 if (!wsi_device->sw) {
547 if (!wsi_x11_check_for_dri3(wsi_conn))
551 if (!visual_supported(connection_get_visualtype(connection, visual_id)))
557 VKAPI_ATTR VkBool32 VKAPI_CALL
558 wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
559 uint32_t queueFamilyIndex,
563 return wsi_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
565 XGetXCBConnection(dpy),
569 static xcb_connection_t*
570 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
572 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
573 return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
575 return ((VkIcdSurfaceXcb *)icd_surface)->connection;
579 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
581 if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
582 return ((VkIcdSurfaceXlib *)icd_surface)->window;
584 return ((VkIcdSurfaceXcb *)icd_surface)->window;
588 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
589 struct wsi_device *wsi_device,
590 uint32_t queueFamilyIndex,
591 VkBool32* pSupported)
593 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
594 xcb_window_t window = x11_surface_get_window(icd_surface);
596 struct wsi_x11_connection *wsi_conn =
597 wsi_x11_get_connection(wsi_device, conn);
599 return VK_ERROR_OUT_OF_HOST_MEMORY;
601 if (!wsi_device->sw) {
602 if (!wsi_x11_check_for_dri3(wsi_conn)) {
608 if (!visual_supported(get_visualtype_for_window(conn, window, NULL))) {
618 x11_get_min_image_count(const struct wsi_device *wsi_device)
620 if (wsi_device->x11.override_minImageCount)
621 return wsi_device->x11.override_minImageCount;
623 /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
624 * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
625 * the render latency is CPU duration + GPU duration.
627 * This means that with scanout from pageflipping we need 3 frames to run
629 * 1) CPU rendering work
630 * 2) GPU rendering work
633 * Once we have a nonblocking acquire that returns a semaphore we can merge
634 * 1 and 3. Hence the ideal implementation needs only 2 images, but games
635 * cannot tellwe currently do not have an ideal implementation and that
636 * hence they need to allocate 3 images. So let us do it for them.
638 * This is a tradeoff as it uses more memory than needed for non-fullscreen
639 * and non-performance intensive applications.
645 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
646 struct wsi_device *wsi_device,
647 VkSurfaceCapabilitiesKHR *caps)
649 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
650 xcb_window_t window = x11_surface_get_window(icd_surface);
651 xcb_get_geometry_cookie_t geom_cookie;
652 xcb_generic_error_t *err;
653 xcb_get_geometry_reply_t *geom;
654 unsigned visual_depth;
656 geom_cookie = xcb_get_geometry(conn, window);
658 /* This does a round-trip. This is why we do get_geometry first and
659 * wait to read the reply until after we have a visual.
661 xcb_visualtype_t *visual =
662 get_visualtype_for_window(conn, window, &visual_depth);
665 return VK_ERROR_SURFACE_LOST_KHR;
667 geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
669 VkExtent2D extent = { geom->width, geom->height };
670 caps->currentExtent = extent;
671 caps->minImageExtent = extent;
672 caps->maxImageExtent = extent;
677 return VK_ERROR_SURFACE_LOST_KHR;
679 if (visual_has_alpha(visual, visual_depth)) {
680 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
681 VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
683 caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
684 VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
687 caps->minImageCount = x11_get_min_image_count(wsi_device);
688 /* There is no real maximum */
689 caps->maxImageCount = 0;
691 caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
692 caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
693 caps->maxImageArrayLayers = 1;
694 caps->supportedUsageFlags =
695 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
696 VK_IMAGE_USAGE_SAMPLED_BIT |
697 VK_IMAGE_USAGE_TRANSFER_DST_BIT |
698 VK_IMAGE_USAGE_STORAGE_BIT |
699 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
700 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
706 x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
707 struct wsi_device *wsi_device,
708 const void *info_next,
709 VkSurfaceCapabilities2KHR *caps)
711 assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
714 x11_surface_get_capabilities(icd_surface, wsi_device,
715 &caps->surfaceCapabilities);
717 if (result != VK_SUCCESS)
720 vk_foreach_struct(ext, caps->pNext) {
721 switch (ext->sType) {
722 case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
723 VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
724 protected->supportsProtected = VK_FALSE;
738 get_sorted_vk_formats(VkIcdSurfaceBase *surface, struct wsi_device *wsi_device,
739 VkFormat *sorted_formats, unsigned *count)
741 xcb_connection_t *conn = x11_surface_get_connection(surface);
742 xcb_window_t window = x11_surface_get_window(surface);
743 xcb_visualtype_t *visual = get_visualtype_for_window(conn, window, NULL);
748 for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
749 if (formats[i].bits_per_rgb == visual->bits_per_rgb_value)
750 sorted_formats[(*count)++] = formats[i].format;
753 if (wsi_device->force_bgra8_unorm_first) {
754 for (unsigned i = 0; i < *count; i++) {
755 if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
756 sorted_formats[i] = sorted_formats[0];
757 sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
767 x11_surface_get_formats(VkIcdSurfaceBase *surface,
768 struct wsi_device *wsi_device,
769 uint32_t *pSurfaceFormatCount,
770 VkSurfaceFormatKHR *pSurfaceFormats)
772 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
773 pSurfaceFormats, pSurfaceFormatCount);
776 VkFormat sorted_formats[ARRAY_SIZE(formats)];
777 if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
778 return VK_ERROR_SURFACE_LOST_KHR;
780 for (unsigned i = 0; i < count; i++) {
781 vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
782 f->format = sorted_formats[i];
783 f->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
787 return vk_outarray_status(&out);
791 x11_surface_get_formats2(VkIcdSurfaceBase *surface,
792 struct wsi_device *wsi_device,
793 const void *info_next,
794 uint32_t *pSurfaceFormatCount,
795 VkSurfaceFormat2KHR *pSurfaceFormats)
797 VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
798 pSurfaceFormats, pSurfaceFormatCount);
801 VkFormat sorted_formats[ARRAY_SIZE(formats)];
802 if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
803 return VK_ERROR_SURFACE_LOST_KHR;
805 for (unsigned i = 0; i < count; i++) {
806 vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
807 assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
808 f->surfaceFormat.format = sorted_formats[i];
809 f->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
813 return vk_outarray_status(&out);
817 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
818 uint32_t *pPresentModeCount,
819 VkPresentModeKHR *pPresentModes)
821 if (pPresentModes == NULL) {
822 *pPresentModeCount = ARRAY_SIZE(present_modes);
826 *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
827 typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
829 return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
830 VK_INCOMPLETE : VK_SUCCESS;
834 x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
835 struct wsi_device *wsi_device,
836 uint32_t* pRectCount,
839 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
840 xcb_window_t window = x11_surface_get_window(icd_surface);
841 VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
843 vk_outarray_append_typed(VkRect2D, &out, rect) {
844 xcb_generic_error_t *err = NULL;
845 xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
846 xcb_get_geometry_reply_t *geom =
847 xcb_get_geometry_reply(conn, geom_cookie, &err);
852 .extent = { geom->width, geom->height },
857 return VK_ERROR_SURFACE_LOST_KHR;
860 return vk_outarray_status(&out);
863 VKAPI_ATTR VkResult VKAPI_CALL
864 wsi_CreateXcbSurfaceKHR(VkInstance _instance,
865 const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
866 const VkAllocationCallbacks *pAllocator,
867 VkSurfaceKHR *pSurface)
869 VK_FROM_HANDLE(vk_instance, instance, _instance);
870 VkIcdSurfaceXcb *surface;
872 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
874 surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
875 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
877 return VK_ERROR_OUT_OF_HOST_MEMORY;
879 surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
880 surface->connection = pCreateInfo->connection;
881 surface->window = pCreateInfo->window;
883 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
887 VKAPI_ATTR VkResult VKAPI_CALL
888 wsi_CreateXlibSurfaceKHR(VkInstance _instance,
889 const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
890 const VkAllocationCallbacks *pAllocator,
891 VkSurfaceKHR *pSurface)
893 VK_FROM_HANDLE(vk_instance, instance, _instance);
894 VkIcdSurfaceXlib *surface;
896 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
898 surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
899 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
901 return VK_ERROR_OUT_OF_HOST_MEMORY;
903 surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
904 surface->dpy = pCreateInfo->dpy;
905 surface->window = pCreateInfo->window;
907 *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
912 struct wsi_image base;
914 xcb_xfixes_region_t update_region; /* long lived XID */
915 xcb_xfixes_region_t update_area; /* the above or None */
918 struct xshmfence * shm_fence;
921 xcb_shm_seg_t shmseg;
926 struct x11_swapchain {
927 struct wsi_swapchain base;
929 bool has_dri3_modifiers;
932 xcb_connection_t * conn;
938 xcb_present_event_t event_id;
939 xcb_special_event_t * special_event;
941 uint64_t last_present_msc;
943 atomic_int sent_image_count;
945 bool has_present_queue;
946 bool has_acquire_queue;
948 bool copy_is_suboptimal;
949 struct wsi_queue present_queue;
950 struct wsi_queue acquire_queue;
951 pthread_t queue_manager;
953 struct x11_image images[0];
955 VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
956 VK_OBJECT_TYPE_SWAPCHAIN_KHR)
959 * Update the swapchain status with the result of an operation, and return
960 * the combined status. The chain status will eventually be returned from
961 * AcquireNextImage and QueuePresent.
963 * We make sure to 'stick' more pessimistic statuses: an out-of-date error
964 * is permanent once seen, and every subsequent call will return this. If
965 * this has not been seen, success will be returned.
968 _x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
969 const char *file, int line)
971 /* Prioritise returning existing errors for consistency. */
972 if (chain->status < 0)
973 return chain->status;
975 /* If we have a new error, mark it as permanent on the chain and return. */
978 fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
979 file, line, vk_Result_to_str(result));
981 chain->status = result;
985 /* Return temporary errors, but don't persist them. */
986 if (result == VK_TIMEOUT || result == VK_NOT_READY)
989 /* Suboptimal isn't an error, but is a status which sticks to the swapchain
990 * and is always returned rather than success.
992 if (result == VK_SUBOPTIMAL_KHR) {
994 if (chain->status != VK_SUBOPTIMAL_KHR) {
995 fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
996 file, line, vk_Result_to_str(result));
999 chain->status = result;
1003 /* No changes, so return the last status. */
1004 return chain->status;
1006 #define x11_swapchain_result(chain, result) \
1007 _x11_swapchain_result(chain, result, __FILE__, __LINE__)
1009 static struct wsi_image *
1010 x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
1012 struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1013 return &chain->images[image_index].base;
1017 * Process an X11 Present event. Does not update chain->status.
1020 x11_handle_dri3_present_event(struct x11_swapchain *chain,
1021 xcb_present_generic_event_t *event)
1023 switch (event->evtype) {
1024 case XCB_PRESENT_CONFIGURE_NOTIFY: {
1025 xcb_present_configure_notify_event_t *config = (void *) event;
1027 if (config->width != chain->extent.width ||
1028 config->height != chain->extent.height)
1029 return VK_SUBOPTIMAL_KHR;
1034 case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
1035 xcb_present_idle_notify_event_t *idle = (void *) event;
1037 for (unsigned i = 0; i < chain->base.image_count; i++) {
1038 if (chain->images[i].pixmap == idle->pixmap) {
1039 chain->images[i].busy = false;
1040 chain->sent_image_count--;
1041 assert(chain->sent_image_count >= 0);
1042 if (chain->has_acquire_queue)
1043 wsi_queue_push(&chain->acquire_queue, i);
1051 case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
1052 xcb_present_complete_notify_event_t *complete = (void *) event;
1053 if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
1055 for (i = 0; i < chain->base.image_count; i++) {
1056 struct x11_image *image = &chain->images[i];
1057 if (image->present_queued && image->serial == complete->serial)
1058 image->present_queued = false;
1060 chain->last_present_msc = complete->msc;
1063 VkResult result = VK_SUCCESS;
1064 switch (complete->mode) {
1065 case XCB_PRESENT_COMPLETE_MODE_COPY:
1066 if (chain->copy_is_suboptimal)
1067 result = VK_SUBOPTIMAL_KHR;
1069 case XCB_PRESENT_COMPLETE_MODE_FLIP:
1070 /* If we ever go from flipping to copying, the odds are very likely
1071 * that we could reallocate in a more optimal way if we didn't have
1072 * to care about scanout, so we always do this.
1074 chain->copy_is_suboptimal = true;
1076 #ifdef HAVE_DRI3_MODIFIERS
1077 case XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY:
1078 /* The winsys is now trying to flip directly and cannot due to our
1079 * configuration. Request the user reallocate.
1081 result = VK_SUBOPTIMAL_KHR;
1099 static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
1101 uint64_t current_time = os_time_get_nano();
1103 timeout = MIN2(UINT64_MAX - current_time, timeout);
1105 return current_time + timeout;
1109 * Acquire a ready-to-use image directly from our swapchain. If all images are
1110 * busy wait until one is not anymore or till timeout.
1113 x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
1114 uint32_t *image_index, uint64_t timeout)
1116 xcb_generic_event_t *event;
1120 for (uint32_t i = 0; i < chain->base.image_count; i++) {
1121 if (!chain->images[i].busy) {
1122 /* We found a non-busy image */
1123 xshmfence_await(chain->images[i].shm_fence);
1125 chain->images[i].busy = true;
1126 return x11_swapchain_result(chain, VK_SUCCESS);
1130 xcb_flush(chain->conn);
1132 if (timeout == UINT64_MAX) {
1133 event = xcb_wait_for_special_event(chain->conn, chain->special_event);
1135 return x11_swapchain_result(chain, VK_ERROR_SURFACE_LOST_KHR);
1137 event = xcb_poll_for_special_event(chain->conn, chain->special_event);
1141 return x11_swapchain_result(chain, VK_NOT_READY);
1143 atimeout = wsi_get_absolute_timeout(timeout);
1145 pfds.fd = xcb_get_file_descriptor(chain->conn);
1146 pfds.events = POLLIN;
1147 ret = poll(&pfds, 1, timeout / 1000 / 1000);
1149 return x11_swapchain_result(chain, VK_TIMEOUT);
1151 return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
1153 /* If a non-special event happens, the fd will still
1154 * poll. So recalculate the timeout now just in case.
1156 uint64_t current_time = os_time_get_nano();
1157 if (atimeout > current_time)
1158 timeout = atimeout - current_time;
1165 /* Update the swapchain status here. We may catch non-fatal errors here,
1166 * in which case we need to update the status and continue.
1168 VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
1169 /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1170 result = x11_swapchain_result(chain, result);
1178 * Acquire a ready-to-use image from the acquire-queue. Only relevant in fifo
1179 * presentation mode.
1182 x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
1183 uint32_t *image_index_out, uint64_t timeout)
1185 assert(chain->has_acquire_queue);
1187 uint32_t image_index;
1188 VkResult result = wsi_queue_pull(&chain->acquire_queue,
1189 &image_index, timeout);
1190 if (result < 0 || result == VK_TIMEOUT) {
1191 /* On error, the thread has shut down, so safe to update chain->status.
1192 * Calling x11_swapchain_result with VK_TIMEOUT won't modify
1193 * chain->status so that is also safe.
1195 return x11_swapchain_result(chain, result);
1196 } else if (chain->status < 0) {
1197 return chain->status;
1200 assert(image_index < chain->base.image_count);
1201 xshmfence_await(chain->images[image_index].shm_fence);
1203 *image_index_out = image_index;
1205 return chain->status;
1209 * Send image to X server via Present extension.
1212 x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
1213 uint64_t target_msc)
1215 struct x11_image *image = &chain->images[image_index];
1217 assert(image_index < chain->base.image_count);
1219 uint32_t options = XCB_PRESENT_OPTION_NONE;
1221 int64_t divisor = 0;
1222 int64_t remainder = 0;
1224 struct wsi_x11_connection *wsi_conn =
1225 wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1227 return VK_ERROR_OUT_OF_HOST_MEMORY;
1229 if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
1230 (chain->base.present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
1231 wsi_conn->is_xwayland) ||
1232 chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
1233 options |= XCB_PRESENT_OPTION_ASYNC;
1235 #ifdef HAVE_DRI3_MODIFIERS
1236 if (chain->has_dri3_modifiers)
1237 options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1240 /* Poll for any available event and update the swapchain status. This could
1241 * update the status of the swapchain to SUBOPTIMAL or OUT_OF_DATE if the
1242 * associated X11 surface has been resized.
1244 xcb_generic_event_t *event;
1245 while ((event = xcb_poll_for_special_event(chain->conn, chain->special_event))) {
1246 VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
1247 /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1248 result = x11_swapchain_result(chain, result);
1254 xshmfence_reset(image->shm_fence);
1256 ++chain->sent_image_count;
1257 assert(chain->sent_image_count <= chain->base.image_count);
1260 image->present_queued = true;
1261 image->serial = (uint32_t) chain->send_sbc;
1263 xcb_void_cookie_t cookie =
1264 xcb_present_pixmap_checked(chain->conn,
1269 image->update_area, /* update */
1272 XCB_NONE, /* target_crtc */
1278 remainder, 0, NULL);
1279 xcb_generic_error_t *error = xcb_request_check(chain->conn, cookie);
1282 return x11_swapchain_result(chain, VK_ERROR_SURFACE_LOST_KHR);
1285 return x11_swapchain_result(chain, VK_SUCCESS);
1289 * Send image to X server unaccelerated (software drivers).
1292 x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index,
1293 uint64_t target_msc)
1295 struct x11_image *image = &chain->images[image_index];
1297 xcb_void_cookie_t cookie;
1298 void *myptr = image->base.cpu_map;
1299 size_t hdr_len = sizeof(xcb_put_image_request_t);
1300 int stride_b = image->base.row_pitches[0];
1301 size_t size = (hdr_len + stride_b * chain->extent.height) >> 2;
1302 uint64_t max_req_len = xcb_get_maximum_request_length(chain->conn);
1303 chain->images[image_index].busy = false;
1305 if (size < max_req_len) {
1306 cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1309 image->base.row_pitches[0] / 4,
1310 chain->extent.height,
1312 image->base.row_pitches[0] * chain->extent.height,
1313 image->base.cpu_map);
1314 xcb_discard_reply(chain->conn, cookie.sequence);
1316 int num_lines = ((max_req_len << 2) - hdr_len) / stride_b;
1318 int y_todo = chain->extent.height;
1320 int this_lines = MIN2(num_lines, y_todo);
1321 cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1324 image->base.row_pitches[0] / 4,
1327 this_lines * stride_b,
1328 (const uint8_t *)myptr + (y_start * stride_b));
1329 xcb_discard_reply(chain->conn, cookie.sequence);
1330 y_start += this_lines;
1331 y_todo -= this_lines;
1335 xcb_flush(chain->conn);
1336 return x11_swapchain_result(chain, VK_SUCCESS);
1340 * Send image to the X server for presentation at target_msc.
1343 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
1344 uint64_t target_msc)
1346 if (chain->base.wsi->sw && !chain->has_mit_shm)
1347 return x11_present_to_x11_sw(chain, image_index, target_msc);
1348 return x11_present_to_x11_dri3(chain, image_index, target_msc);
1352 * Acquire a ready-to-use image from the swapchain.
1354 * This means usually that the image is not waiting on presentation and that the
1355 * image has been released by the X server to be used again by the consumer.
1358 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
1359 const VkAcquireNextImageInfoKHR *info,
1360 uint32_t *image_index)
1362 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1363 uint64_t timeout = info->timeout;
1365 /* If the swapchain is in an error state, don't go any further. */
1366 if (chain->status < 0)
1367 return chain->status;
1369 if (chain->base.wsi->sw && !chain->has_mit_shm) {
1370 for (unsigned i = 0; i < chain->base.image_count; i++) {
1371 if (!chain->images[i].busy) {
1373 chain->images[i].busy = true;
1374 xcb_generic_error_t *err;
1376 xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(chain->conn, chain->window);
1377 xcb_get_geometry_reply_t *geom = xcb_get_geometry_reply(chain->conn, geom_cookie, &err);
1378 VkResult result = VK_SUCCESS;
1380 if (chain->extent.width != geom->width ||
1381 chain->extent.height != geom->height)
1382 result = VK_SUBOPTIMAL_KHR;
1384 result = VK_ERROR_SURFACE_LOST_KHR;
1391 return VK_NOT_READY;
1394 if (chain->has_acquire_queue) {
1395 return x11_acquire_next_image_from_queue(chain, image_index, timeout);
1397 return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
1401 #define MAX_DAMAGE_RECTS 64
1404 * Queue a new presentation of an image that was previously acquired by the
1407 * Note that in immediate presentation mode this does not really queue the
1408 * presentation but directly asks the X server to show it.
1411 x11_queue_present(struct wsi_swapchain *anv_chain,
1412 uint32_t image_index,
1413 const VkPresentRegionKHR *damage)
1415 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1416 xcb_xfixes_region_t update_area = 0;
1418 /* If the swapchain is in an error state, don't go any further. */
1419 if (chain->status < 0)
1420 return chain->status;
1422 if (damage && damage->pRectangles && damage->rectangleCount > 0 &&
1423 damage->rectangleCount <= MAX_DAMAGE_RECTS) {
1424 xcb_rectangle_t rects[MAX_DAMAGE_RECTS];
1426 update_area = chain->images[image_index].update_region;
1427 for (unsigned i = 0; i < damage->rectangleCount; i++) {
1428 const VkRectLayerKHR *rect = &damage->pRectangles[i];
1429 assert(rect->layer == 0);
1430 rects[i].x = rect->offset.x;
1431 rects[i].y = rect->offset.y;
1432 rects[i].width = rect->extent.width;
1433 rects[i].height = rect->extent.height;
1435 xcb_xfixes_set_region(chain->conn, update_area, damage->rectangleCount, rects);
1437 chain->images[image_index].update_area = update_area;
1439 chain->images[image_index].busy = true;
1440 if (chain->has_present_queue) {
1441 wsi_queue_push(&chain->present_queue, image_index);
1442 return chain->status;
1444 /* No present queue means immedate mode, so we present immediately. */
1445 return x11_present_to_x11(chain, image_index, 0);
1450 * Decides if an early wait on buffer fences before buffer submission is required. That is for:
1451 * - Mailbox mode, as otherwise the latest image in the queue might not be fully rendered at
1452 * present time, what could lead to missing a frame.
1453 * - Immediate mode under Xwayland, as it works practically the same as mailbox mode using the
1454 * mailbox mechanism of Wayland. Sending a buffer with fences not yet signalled can make the
1455 * compositor miss a frame when compositing the final image with this buffer.
1457 * Note though that early waits can be disabled in general on Xwayland by setting the
1458 * 'vk_xwayland_wait_ready' DRIConf option to false.
1461 x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
1462 struct wsi_x11_connection *wsi_conn,
1463 VkPresentModeKHR present_mode)
1465 if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
1469 switch (present_mode) {
1470 case VK_PRESENT_MODE_MAILBOX_KHR:
1472 case VK_PRESENT_MODE_IMMEDIATE_KHR:
1473 return wsi_conn->is_xwayland;
1480 * The number of images that are not owned by X11:
1481 * (1) in the ownership of the app, or
1482 * (2) app to take ownership through an acquire, or
1483 * (3) in the present queue waiting for the FIFO thread to present to X11.
1485 static unsigned x11_driver_owned_images(const struct x11_swapchain *chain)
1487 return chain->base.image_count - chain->sent_image_count;
1491 * Our queue manager. Albeit called x11_manage_fifo_queues only directly
1492 * manages the present-queue and does this in general in fifo and mailbox presentation
1493 * modes (there is no present-queue in immediate mode with the exception of Xwayland).
1495 * Runs in a separate thread, blocks and reacts to queued images on the
1498 * In mailbox mode the queue management is simplified since we only need to
1499 * pull new images from the present queue and can directly present them.
1501 * In fifo mode images can only be presented one after the other. For that after
1502 * sending the image to the X server we wait until the image either has been
1503 * presented or released and only then pull a new image from the present-queue.
1506 x11_manage_fifo_queues(void *state)
1508 struct x11_swapchain *chain = state;
1509 struct wsi_x11_connection *wsi_conn =
1510 wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1511 VkResult result = VK_SUCCESS;
1513 assert(chain->has_present_queue);
1515 u_thread_setname("WSI swapchain queue");
1517 while (chain->status >= 0) {
1518 /* We can block here unconditionally because after an image was sent to
1519 * the server (later on in this loop) we ensure at least one image is
1520 * acquirable by the consumer or wait there on such an event.
1522 uint32_t image_index = 0;
1523 result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
1524 assert(result != VK_TIMEOUT);
1528 } else if (chain->status < 0) {
1529 /* The status can change underneath us if the swapchain is destroyed
1530 * from another thread.
1535 /* Waiting for the GPU work to finish at this point in time is required in certain usage
1536 * scenarios. Otherwise we wait as usual in wsi_common_queue_present.
1538 if (x11_needs_wait_for_fences(chain->base.wsi, wsi_conn,
1539 chain->base.present_mode)) {
1540 result = chain->base.wsi->WaitForFences(chain->base.device, 1,
1541 &chain->base.fences[image_index],
1543 if (result != VK_SUCCESS) {
1544 result = VK_ERROR_OUT_OF_DATE_KHR;
1549 uint64_t target_msc = 0;
1550 if (chain->has_acquire_queue)
1551 target_msc = chain->last_present_msc + 1;
1553 result = x11_present_to_x11(chain, image_index, target_msc);
1557 if (chain->has_acquire_queue) {
1558 /* Assume this isn't a swapchain where we force 5 images, because those
1559 * don't end up with an acquire queue at the moment.
1561 unsigned min_image_count = x11_get_min_image_count(chain->base.wsi);
1563 /* With drirc overrides some games have swapchain with less than
1564 * minimum number of images. */
1565 min_image_count = MIN2(min_image_count, chain->base.image_count);
1567 /* We always need to ensure that the app can have this number of images
1568 * acquired concurrently in between presents:
1569 * "VUID-vkAcquireNextImageKHR-swapchain-01802
1570 * If the number of currently acquired images is greater than the difference
1571 * between the number of images in swapchain and the value of
1572 * VkSurfaceCapabilitiesKHR::minImageCount as returned by a call to
1573 * vkGetPhysicalDeviceSurfaceCapabilities2KHR with the surface used to
1574 * create swapchain, timeout must not be UINT64_MAX"
1576 unsigned forward_progress_guaranteed_acquired_images =
1577 chain->base.image_count - min_image_count + 1;
1579 /* Wait for our presentation to occur and ensure we have at least one
1580 * image that can be acquired by the client afterwards. This ensures we
1581 * can pull on the present-queue on the next loop.
1583 while (chain->images[image_index].present_queued ||
1584 /* If we have images in the present queue the outer loop won't block and a break
1585 * here would end up at this loop again, otherwise a break here satisfies
1586 * VUID-vkAcquireNextImageKHR-swapchain-01802 */
1587 x11_driver_owned_images(chain) < forward_progress_guaranteed_acquired_images) {
1589 xcb_generic_event_t *event =
1590 xcb_wait_for_special_event(chain->conn, chain->special_event);
1592 result = VK_ERROR_SURFACE_LOST_KHR;
1596 result = x11_handle_dri3_present_event(chain, (void *)event);
1597 /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1598 result = x11_swapchain_result(chain, result);
1607 x11_swapchain_result(chain, result);
1608 if (chain->has_acquire_queue)
1609 wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
1615 alloc_shm(struct wsi_image *imagew, unsigned size)
1617 #ifdef HAVE_SYS_SHM_H
1618 struct x11_image *image = (struct x11_image *)imagew;
1619 image->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
1620 if (image->shmid < 0)
1623 uint8_t *addr = (uint8_t *)shmat(image->shmid, 0, 0);
1624 /* mark the segment immediately for deletion to avoid leaks */
1625 shmctl(image->shmid, IPC_RMID, 0);
1627 if (addr == (uint8_t *) -1)
1630 image->shmaddr = addr;
1638 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
1639 const VkSwapchainCreateInfoKHR *pCreateInfo,
1640 const VkAllocationCallbacks* pAllocator,
1641 struct x11_image *image)
1643 xcb_void_cookie_t cookie;
1648 result = wsi_create_image(&chain->base, &chain->base.image_info,
1650 if (result != VK_SUCCESS)
1653 image->update_region = xcb_generate_id(chain->conn);
1654 xcb_xfixes_create_region(chain->conn, image->update_region, 0, NULL);
1656 if (chain->base.wsi->sw) {
1657 if (!chain->has_mit_shm) {
1658 image->busy = false;
1662 image->shmseg = xcb_generate_id(chain->conn);
1664 xcb_shm_attach(chain->conn,
1668 image->pixmap = xcb_generate_id(chain->conn);
1669 cookie = xcb_shm_create_pixmap_checked(chain->conn,
1672 image->base.row_pitches[0] / 4,
1673 pCreateInfo->imageExtent.height,
1676 xcb_discard_reply(chain->conn, cookie.sequence);
1679 image->pixmap = xcb_generate_id(chain->conn);
1681 #ifdef HAVE_DRI3_MODIFIERS
1682 if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
1683 /* If the image has a modifier, we must have DRI3 v1.2. */
1684 assert(chain->has_dri3_modifiers);
1686 /* XCB requires an array of file descriptors but we only have one */
1687 int fds[4] = { -1, -1, -1, -1 };
1688 for (int i = 0; i < image->base.num_planes; i++) {
1689 fds[i] = os_dupfd_cloexec(image->base.dma_buf_fd);
1691 for (int j = 0; j < i; j++)
1694 return VK_ERROR_OUT_OF_HOST_MEMORY;
1699 xcb_dri3_pixmap_from_buffers_checked(chain->conn,
1702 image->base.num_planes,
1703 pCreateInfo->imageExtent.width,
1704 pCreateInfo->imageExtent.height,
1705 image->base.row_pitches[0],
1706 image->base.offsets[0],
1707 image->base.row_pitches[1],
1708 image->base.offsets[1],
1709 image->base.row_pitches[2],
1710 image->base.offsets[2],
1711 image->base.row_pitches[3],
1712 image->base.offsets[3],
1714 image->base.drm_modifier,
1719 /* Without passing modifiers, we can't have multi-plane RGB images. */
1720 assert(image->base.num_planes == 1);
1722 /* XCB will take ownership of the FD we pass it. */
1723 int fd = os_dupfd_cloexec(image->base.dma_buf_fd);
1725 return VK_ERROR_OUT_OF_HOST_MEMORY;
1728 xcb_dri3_pixmap_from_buffer_checked(chain->conn,
1731 image->base.sizes[0],
1732 pCreateInfo->imageExtent.width,
1733 pCreateInfo->imageExtent.height,
1734 image->base.row_pitches[0],
1735 chain->depth, bpp, fd);
1738 xcb_discard_reply(chain->conn, cookie.sequence);
1741 fence_fd = xshmfence_alloc_shm();
1745 image->shm_fence = xshmfence_map_shm(fence_fd);
1746 if (image->shm_fence == NULL)
1747 goto fail_shmfence_alloc;
1749 image->sync_fence = xcb_generate_id(chain->conn);
1750 xcb_dri3_fence_from_fd(chain->conn,
1756 image->busy = false;
1757 xshmfence_trigger(image->shm_fence);
1761 fail_shmfence_alloc:
1765 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1766 xcb_discard_reply(chain->conn, cookie.sequence);
1768 wsi_destroy_image(&chain->base, &image->base);
1770 return VK_ERROR_INITIALIZATION_FAILED;
1774 x11_image_finish(struct x11_swapchain *chain,
1775 const VkAllocationCallbacks* pAllocator,
1776 struct x11_image *image)
1778 xcb_void_cookie_t cookie;
1780 if (!chain->base.wsi->sw || chain->has_mit_shm) {
1781 cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
1782 xcb_discard_reply(chain->conn, cookie.sequence);
1783 xshmfence_unmap_shm(image->shm_fence);
1785 cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1786 xcb_discard_reply(chain->conn, cookie.sequence);
1788 cookie = xcb_xfixes_destroy_region(chain->conn, image->update_region);
1789 xcb_discard_reply(chain->conn, cookie.sequence);
1792 wsi_destroy_image(&chain->base, &image->base);
1793 #ifdef HAVE_SYS_SHM_H
1795 shmdt(image->shmaddr);
1800 wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
1801 xcb_connection_t *conn, xcb_window_t window,
1802 uint8_t depth, uint8_t bpp,
1803 VkCompositeAlphaFlagsKHR vk_alpha,
1804 uint64_t **modifiers_in, uint32_t *num_modifiers_in,
1805 uint32_t *num_tranches_in,
1806 const VkAllocationCallbacks *pAllocator)
1808 if (!wsi_conn->has_dri3_modifiers)
1811 #ifdef HAVE_DRI3_MODIFIERS
1812 xcb_generic_error_t *error = NULL;
1813 xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
1814 xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
1815 xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
1816 xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
1819 if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
1820 mod_reply->num_screen_modifiers == 0)) {
1827 uint64_t *modifiers[2];
1829 if (mod_reply->num_window_modifiers) {
1830 counts[n] = mod_reply->num_window_modifiers;
1831 modifiers[n] = vk_alloc(pAllocator,
1832 counts[n] * sizeof(uint64_t),
1833 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1834 if (!modifiers[n]) {
1839 memcpy(modifiers[n],
1840 xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1841 counts[n] * sizeof(uint64_t));
1845 if (mod_reply->num_screen_modifiers) {
1846 counts[n] = mod_reply->num_screen_modifiers;
1847 modifiers[n] = vk_alloc(pAllocator,
1848 counts[n] * sizeof(uint64_t),
1849 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1850 if (!modifiers[n]) {
1852 vk_free(pAllocator, modifiers[0]);
1857 memcpy(modifiers[n],
1858 xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1859 counts[n] * sizeof(uint64_t));
1863 for (int i = 0; i < n; i++) {
1864 modifiers_in[i] = modifiers[i];
1865 num_modifiers_in[i] = counts[i];
1867 *num_tranches_in = n;
1873 *num_tranches_in = 0;
1877 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
1878 const VkAllocationCallbacks *pAllocator)
1880 struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1881 xcb_void_cookie_t cookie;
1883 if (chain->has_present_queue) {
1884 chain->status = VK_ERROR_OUT_OF_DATE_KHR;
1885 /* Push a UINT32_MAX to wake up the manager */
1886 wsi_queue_push(&chain->present_queue, UINT32_MAX);
1887 pthread_join(chain->queue_manager, NULL);
1889 if (chain->has_acquire_queue)
1890 wsi_queue_destroy(&chain->acquire_queue);
1891 wsi_queue_destroy(&chain->present_queue);
1894 for (uint32_t i = 0; i < chain->base.image_count; i++)
1895 x11_image_finish(chain, pAllocator, &chain->images[i]);
1896 wsi_destroy_image_info(&chain->base, &chain->base.image_info);
1898 xcb_unregister_for_special_event(chain->conn, chain->special_event);
1899 cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
1901 XCB_PRESENT_EVENT_MASK_NO_EVENT);
1902 xcb_discard_reply(chain->conn, cookie.sequence);
1904 wsi_swapchain_finish(&chain->base);
1906 vk_free(pAllocator, chain);
1912 wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
1913 xcb_drawable_t drawable,
1916 static char const name[] = "_VARIABLE_REFRESH";
1917 xcb_intern_atom_cookie_t cookie;
1918 xcb_intern_atom_reply_t* reply;
1919 xcb_void_cookie_t check;
1921 cookie = xcb_intern_atom(conn, 0, strlen(name), name);
1922 reply = xcb_intern_atom_reply(conn, cookie, NULL);
1927 check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
1928 drawable, reply->atom,
1929 XCB_ATOM_CARDINAL, 32, 1, &state);
1931 check = xcb_delete_property_checked(conn, drawable, reply->atom);
1933 xcb_discard_reply(conn, check.sequence);
1938 * Create the swapchain.
1940 * Supports immediate, fifo and mailbox presentation mode.
1944 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
1946 struct wsi_device *wsi_device,
1947 const VkSwapchainCreateInfoKHR *pCreateInfo,
1948 const VkAllocationCallbacks* pAllocator,
1949 struct wsi_swapchain **swapchain_out)
1951 struct x11_swapchain *chain;
1952 xcb_void_cookie_t cookie;
1954 VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
1956 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
1958 /* Get xcb connection from the icd_surface and from that our internal struct
1961 xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
1962 struct wsi_x11_connection *wsi_conn =
1963 wsi_x11_get_connection(wsi_device, conn);
1965 return VK_ERROR_OUT_OF_HOST_MEMORY;
1967 /* Get number of images in our swapchain. This count depends on:
1968 * - requested minimal image count
1969 * - device characteristics
1970 * - presentation mode.
1972 unsigned num_images = pCreateInfo->minImageCount;
1973 if (wsi_device->x11.strict_imageCount)
1974 num_images = pCreateInfo->minImageCount;
1975 else if (x11_needs_wait_for_fences(wsi_device, wsi_conn, present_mode))
1976 num_images = MAX2(num_images, 5);
1977 else if (wsi_device->x11.ensure_minImageCount)
1978 num_images = MAX2(num_images, x11_get_min_image_count(wsi_device));
1980 /* Check that we have a window up-front. It is an error to not have one. */
1981 xcb_window_t window = x11_surface_get_window(icd_surface);
1983 /* Get the geometry of that window. The bit depth of the swapchain will be fitted and the
1984 * chain's images extents should fit it for performance-optimizing flips.
1986 xcb_get_geometry_reply_t *geometry =
1987 xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
1988 if (geometry == NULL)
1989 return VK_ERROR_SURFACE_LOST_KHR;
1990 const uint32_t bit_depth = geometry->depth;
1991 const uint16_t cur_width = geometry->width;
1992 const uint16_t cur_height = geometry->height;
1995 /* Allocate the actual swapchain. The size depends on image count. */
1996 size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
1997 chain = vk_zalloc(pAllocator, size, 8,
1998 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2000 return VK_ERROR_OUT_OF_HOST_MEMORY;
2002 /* When our local device is not compatible with the DRI3 device provided by
2003 * the X server we assume this is a PRIME system.
2005 bool use_buffer_blit = false;
2006 if (!wsi_device->sw)
2007 if (!wsi_x11_check_dri3_compatible(wsi_device, conn))
2008 use_buffer_blit = true;
2010 result = wsi_swapchain_init(wsi_device, &chain->base, device,
2011 pCreateInfo, pAllocator, use_buffer_blit);
2012 if (result != VK_SUCCESS)
2015 chain->base.destroy = x11_swapchain_destroy;
2016 chain->base.get_wsi_image = x11_get_wsi_image;
2017 chain->base.acquire_next_image = x11_acquire_next_image;
2018 chain->base.queue_present = x11_queue_present;
2019 chain->base.present_mode = present_mode;
2020 chain->base.image_count = num_images;
2022 chain->window = window;
2023 chain->depth = bit_depth;
2024 chain->extent = pCreateInfo->imageExtent;
2025 chain->send_sbc = 0;
2026 chain->sent_image_count = 0;
2027 chain->last_present_msc = 0;
2028 chain->has_acquire_queue = false;
2029 chain->has_present_queue = false;
2030 chain->status = VK_SUCCESS;
2031 chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
2032 chain->has_mit_shm = wsi_conn->has_mit_shm;
2034 /* When images in the swapchain don't fit the window, X can still present them, but it won't
2035 * happen by flip, only by copy. So this is a suboptimal copy, because if the client would change
2036 * the chain extents X may be able to flip
2038 if (chain->extent.width != cur_width || chain->extent.height != cur_height)
2039 chain->status = VK_SUBOPTIMAL_KHR;
2041 /* On a new swapchain this helper variable is set to false. Once we present it will have an
2042 * impact once we ever do at least one flip and go back to copying afterwards. It is presumed
2043 * that in this case here is a high likelihood X could do flips again if the client reallocates a
2046 * Note that we used to inheritted this property from 'pCreateInfo->oldSwapchain'. But when it
2047 * was true, and when the next present was completed with copying, we would return
2048 * VK_SUBOPTIMAL_KHR and hint the app to reallocate again for no good reason. If all following
2049 * presents on the surface were completed with copying because of some surface state change, we
2050 * would always return VK_SUBOPTIMAL_KHR no matter how many times the app had reallocated.
2052 * Note also that is is questionable in general if that mechanism is really useful. It ist not
2053 * clear why on a change from flipping to copying we can assume a reallocation has a high chance
2054 * of making flips work again per se. In other words it is not clear why there is need for
2055 * another way to inform clients about suboptimal copies besides forwarding the
2056 * 'PresentOptionSuboptimal' complete mode.
2058 chain->copy_is_suboptimal = false;
2060 /* For our swapchain we need to listen to following Present extension events:
2061 * - Configure: Window dimensions changed. Images in the swapchain might need
2062 * to be reallocated.
2063 * - Complete: An image from our swapchain was presented on the output.
2064 * - Idle: An image from our swapchain is not anymore accessed by the X
2065 * server and can be reused.
2067 chain->event_id = xcb_generate_id(chain->conn);
2068 xcb_present_select_input(chain->conn, chain->event_id, chain->window,
2069 XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
2070 XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
2071 XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
2073 /* Create an XCB event queue to hold present events outside of the usual
2074 * application event queue
2076 chain->special_event =
2077 xcb_register_for_special_xge(chain->conn, &xcb_present_id,
2078 chain->event_id, NULL);
2080 /* Create the graphics context. */
2081 chain->gc = xcb_generate_id(chain->conn);
2083 /* FINISHME: Choose a better error. */
2084 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2088 cookie = xcb_create_gc(chain->conn,
2091 XCB_GC_GRAPHICS_EXPOSURES,
2092 (uint32_t []) { 0 });
2093 xcb_discard_reply(chain->conn, cookie.sequence);
2095 uint64_t *modifiers[2] = {NULL, NULL};
2096 uint32_t num_modifiers[2] = {0, 0};
2097 uint32_t num_tranches = 0;
2098 if (wsi_device->supports_modifiers)
2099 wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, chain->depth, 32,
2100 pCreateInfo->compositeAlpha,
2101 modifiers, num_modifiers, &num_tranches,
2104 if (wsi_device->sw) {
2105 result = wsi_configure_cpu_image(&chain->base, pCreateInfo,
2106 chain->has_mit_shm ? &alloc_shm : NULL,
2107 &chain->base.image_info);
2108 } else if (chain->base.use_buffer_blit) {
2109 bool use_modifier = num_tranches > 0;
2110 result = wsi_configure_prime_image(&chain->base, pCreateInfo,
2112 &chain->base.image_info);
2114 result = wsi_configure_native_image(&chain->base, pCreateInfo,
2115 num_tranches, num_modifiers,
2116 (const uint64_t *const *)modifiers,
2117 &chain->base.image_info);
2119 if (result != VK_SUCCESS)
2120 goto fail_modifiers;
2123 for (; image < chain->base.image_count; image++) {
2124 result = x11_image_init(device, chain, pCreateInfo, pAllocator,
2125 &chain->images[image]);
2126 if (result != VK_SUCCESS)
2127 goto fail_init_images;
2130 /* Initialize queues for images in our swapchain. Possible queues are:
2131 * - Present queue: for images sent to the X server but not yet presented.
2132 * - Acquire queue: for images already presented but not yet released by the
2135 * In general queues are not used on software drivers, otherwise which queues
2136 * are used depends on our presentation mode:
2137 * - Fifo: present and acquire
2138 * - Mailbox: present only
2139 * - Immediate: present when we wait on fences before buffer submission (Xwayland)
2141 if ((chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2142 chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR ||
2143 x11_needs_wait_for_fences(wsi_device, wsi_conn,
2144 chain->base.present_mode)) &&
2145 !chain->base.wsi->sw) {
2146 chain->has_present_queue = true;
2148 /* The queues have a length of base.image_count + 1 because we will
2149 * occasionally use UINT32_MAX to signal the other thread that an error
2150 * has occurred and we don't want an overflow.
2153 ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
2155 goto fail_init_images;
2158 if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2159 chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
2160 chain->has_acquire_queue = true;
2162 ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
2164 wsi_queue_destroy(&chain->present_queue);
2165 goto fail_init_images;
2168 for (unsigned i = 0; i < chain->base.image_count; i++)
2169 wsi_queue_push(&chain->acquire_queue, i);
2172 ret = pthread_create(&chain->queue_manager, NULL,
2173 x11_manage_fifo_queues, chain);
2175 wsi_queue_destroy(&chain->present_queue);
2176 if (chain->has_acquire_queue)
2177 wsi_queue_destroy(&chain->acquire_queue);
2179 goto fail_init_images;
2183 assert(chain->has_present_queue || !chain->has_acquire_queue);
2185 for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2186 vk_free(pAllocator, modifiers[i]);
2188 /* It is safe to set it here as only one swapchain can be associated with
2189 * the window, and swapchain creation does the association. At this point
2190 * we know the creation is going to succeed. */
2191 wsi_x11_set_adaptive_sync_property(conn, window,
2192 wsi_device->enable_adaptive_sync);
2194 *swapchain_out = &chain->base;
2199 for (uint32_t j = 0; j < image; j++)
2200 x11_image_finish(chain, pAllocator, &chain->images[j]);
2202 wsi_destroy_image_info(&chain->base, &chain->base.image_info);
2205 for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2206 vk_free(pAllocator, modifiers[i]);
2209 xcb_unregister_for_special_event(chain->conn, chain->special_event);
2211 wsi_swapchain_finish(&chain->base);
2214 vk_free(pAllocator, chain);
2220 wsi_x11_init_wsi(struct wsi_device *wsi_device,
2221 const VkAllocationCallbacks *alloc,
2222 const struct driOptionCache *dri_options)
2224 struct wsi_x11 *wsi;
2227 wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2228 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2230 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2234 int ret = pthread_mutex_init(&wsi->mutex, NULL);
2236 if (ret == ENOMEM) {
2237 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2239 /* FINISHME: Choose a better error. */
2240 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2246 wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2247 _mesa_key_pointer_equal);
2248 if (!wsi->connections) {
2249 result = VK_ERROR_OUT_OF_HOST_MEMORY;
2254 if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
2255 wsi_device->x11.override_minImageCount =
2256 driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
2258 if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
2259 wsi_device->x11.strict_imageCount =
2260 driQueryOptionb(dri_options, "vk_x11_strict_image_count");
2262 if (driCheckOption(dri_options, "vk_x11_ensure_min_image_count", DRI_BOOL)) {
2263 wsi_device->x11.ensure_minImageCount =
2264 driQueryOptionb(dri_options, "vk_x11_ensure_min_image_count");
2266 wsi_device->x11.xwaylandWaitReady = true;
2267 if (driCheckOption(dri_options, "vk_xwayland_wait_ready", DRI_BOOL)) {
2268 wsi_device->x11.xwaylandWaitReady =
2269 driQueryOptionb(dri_options, "vk_xwayland_wait_ready");
2273 wsi->base.get_support = x11_surface_get_support;
2274 wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
2275 wsi->base.get_formats = x11_surface_get_formats;
2276 wsi->base.get_formats2 = x11_surface_get_formats2;
2277 wsi->base.get_present_modes = x11_surface_get_present_modes;
2278 wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
2279 wsi->base.create_swapchain = x11_surface_create_swapchain;
2281 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
2282 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
2287 pthread_mutex_destroy(&wsi->mutex);
2289 vk_free(alloc, wsi);
2291 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
2292 wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
2298 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
2299 const VkAllocationCallbacks *alloc)
2301 struct wsi_x11 *wsi =
2302 (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
2305 hash_table_foreach(wsi->connections, entry)
2306 wsi_x11_connection_destroy(wsi_device, entry->data);
2308 _mesa_hash_table_destroy(wsi->connections, NULL);
2310 pthread_mutex_destroy(&wsi->mutex);
2312 vk_free(alloc, wsi);