vulkan/wsi/x11: detect Xwayland via the XWAYLAND extension
[platform/upstream/mesa.git] / src / vulkan / wsi / wsi_common_x11.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #include <xcb/xcb.h>
27 #include <xcb/dri3.h>
28 #include <xcb/present.h>
29 #include <xcb/shm.h>
30
31 #include "util/macros.h"
32 #include <stdatomic.h>
33 #include <stdlib.h>
34 #include <stdio.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <string.h>
38 #include <fcntl.h>
39 #include <poll.h>
40 #include <xf86drm.h>
41 #include "drm-uapi/drm_fourcc.h"
42 #include "util/hash_table.h"
43 #include "util/os_file.h"
44 #include "util/os_time.h"
45 #include "util/u_debug.h"
46 #include "util/u_thread.h"
47 #include "util/xmlconfig.h"
48
49 #include "vk_instance.h"
50 #include "vk_physical_device.h"
51 #include "vk_util.h"
52 #include "vk_enum_to_str.h"
53 #include "wsi_common_entrypoints.h"
54 #include "wsi_common_private.h"
55 #include "wsi_common_queue.h"
56
57 #ifdef HAVE_SYS_SHM_H
58 #include <sys/ipc.h>
59 #include <sys/shm.h>
60 #endif
61
62 struct wsi_x11_connection {
63    bool has_dri3;
64    bool has_dri3_modifiers;
65    bool has_present;
66    bool is_proprietary_x11;
67    bool is_xwayland;
68    bool has_mit_shm;
69    bool has_xfixes;
70 };
71
72 struct wsi_x11 {
73    struct wsi_interface base;
74
75    pthread_mutex_t                              mutex;
76    /* Hash table of xcb_connection -> wsi_x11_connection mappings */
77    struct hash_table *connections;
78 };
79
80
81 /**
82  * Wrapper around xcb_dri3_open. Returns the opened fd or -1 on error.
83  */
84 static int
85 wsi_dri3_open(xcb_connection_t *conn,
86               xcb_window_t root,
87               uint32_t provider)
88 {
89    xcb_dri3_open_cookie_t       cookie;
90    xcb_dri3_open_reply_t        *reply;
91    int                          fd;
92
93    cookie = xcb_dri3_open(conn,
94                           root,
95                           provider);
96
97    reply = xcb_dri3_open_reply(conn, cookie, NULL);
98    if (!reply)
99       return -1;
100
101    /* According to DRI3 extension nfd must equal one. */
102    if (reply->nfd != 1) {
103       free(reply);
104       return -1;
105    }
106
107    fd = xcb_dri3_open_reply_fds(conn, reply)[0];
108    free(reply);
109    fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
110
111    return fd;
112 }
113
114 /**
115  * Checks compatibility of the device wsi_dev with the device the X server
116  * provides via DRI3.
117  *
118  * This returns true when no device could be retrieved from the X server or when
119  * the information for the X server device indicate that it is the same device.
120  */
121 static bool
122 wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
123                               xcb_connection_t *conn)
124 {
125    xcb_screen_iterator_t screen_iter =
126       xcb_setup_roots_iterator(xcb_get_setup(conn));
127    xcb_screen_t *screen = screen_iter.data;
128
129    /* Open the DRI3 device from the X server. If we do not retrieve one we
130     * assume our local device is compatible.
131     */
132    int dri3_fd = wsi_dri3_open(conn, screen->root, None);
133    if (dri3_fd == -1)
134       return true;
135
136    bool match = wsi_device_matches_drm_fd(wsi_dev, dri3_fd);
137
138    close(dri3_fd);
139
140    return match;
141 }
142
143 static bool
144 wsi_x11_detect_xwayland(xcb_connection_t *conn,
145                         xcb_query_extension_reply_t *randr_reply,
146                         xcb_query_extension_reply_t *xwl_reply)
147 {
148    /* Newer Xwayland exposes an X11 extension we can check for */
149    if (xwl_reply && xwl_reply->present)
150       return true;
151
152    /* Older Xwayland uses the word "XWAYLAND" in the RandR output names */
153    if (!randr_reply || !randr_reply->present)
154       return false;
155
156    xcb_randr_query_version_cookie_t ver_cookie =
157       xcb_randr_query_version_unchecked(conn, 1, 3);
158    xcb_randr_query_version_reply_t *ver_reply =
159       xcb_randr_query_version_reply(conn, ver_cookie, NULL);
160    bool has_randr_v1_3 = ver_reply && (ver_reply->major_version > 1 ||
161                                        ver_reply->minor_version >= 3);
162    free(ver_reply);
163
164    if (!has_randr_v1_3)
165       return false;
166
167    const xcb_setup_t *setup = xcb_get_setup(conn);
168    xcb_screen_iterator_t iter = xcb_setup_roots_iterator(setup);
169
170    xcb_randr_get_screen_resources_current_cookie_t gsr_cookie =
171       xcb_randr_get_screen_resources_current_unchecked(conn, iter.data->root);
172    xcb_randr_get_screen_resources_current_reply_t *gsr_reply =
173       xcb_randr_get_screen_resources_current_reply(conn, gsr_cookie, NULL);
174
175    if (!gsr_reply || gsr_reply->num_outputs == 0) {
176       free(gsr_reply);
177       return false;
178    }
179
180    xcb_randr_output_t *randr_outputs =
181       xcb_randr_get_screen_resources_current_outputs(gsr_reply);
182    xcb_randr_get_output_info_cookie_t goi_cookie =
183       xcb_randr_get_output_info(conn, randr_outputs[0], gsr_reply->config_timestamp);
184    free(gsr_reply);
185
186    xcb_randr_get_output_info_reply_t *goi_reply =
187       xcb_randr_get_output_info_reply(conn, goi_cookie, NULL);
188    if (!goi_reply) {
189       return false;
190    }
191
192    char *output_name = (char*)xcb_randr_get_output_info_name(goi_reply);
193    bool is_xwayland = output_name && strncmp(output_name, "XWAYLAND", 8) == 0;
194    free(goi_reply);
195
196    return is_xwayland;
197 }
198
199 static struct wsi_x11_connection *
200 wsi_x11_connection_create(struct wsi_device *wsi_dev,
201                           xcb_connection_t *conn)
202 {
203    xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie,
204                                 amd_cookie, nv_cookie, shm_cookie, sync_cookie,
205                                 xfixes_cookie, xwl_cookie;
206    xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply,
207                                *amd_reply, *nv_reply, *shm_reply = NULL,
208                                *xfixes_reply, *xwl_reply;
209    bool wants_shm = wsi_dev->sw && !(WSI_DEBUG & WSI_DEBUG_NOSHM) &&
210                     wsi_dev->has_import_memory_host;
211    bool has_dri3_v1_2 = false;
212    bool has_present_v1_2 = false;
213
214    struct wsi_x11_connection *wsi_conn =
215       vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
216                 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
217    if (!wsi_conn)
218       return NULL;
219
220    sync_cookie = xcb_query_extension(conn, 4, "SYNC");
221    dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
222    pres_cookie = xcb_query_extension(conn, 7, "Present");
223    randr_cookie = xcb_query_extension(conn, 5, "RANDR");
224    xfixes_cookie = xcb_query_extension(conn, 6, "XFIXES");
225    xwl_cookie = xcb_query_extension(conn, 8, "XWAYLAND");
226
227    if (wants_shm)
228       shm_cookie = xcb_query_extension(conn, 7, "MIT-SHM");
229
230    /* We try to be nice to users and emit a warning if they try to use a
231     * Vulkan application on a system without DRI3 enabled.  However, this ends
232     * up spewing the warning when a user has, for example, both Intel
233     * integrated graphics and a discrete card with proprietary drivers and are
234     * running on the discrete card with the proprietary DDX.  In this case, we
235     * really don't want to print the warning because it just confuses users.
236     * As a heuristic to detect this case, we check for a couple of proprietary
237     * X11 extensions.
238     */
239    amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
240    nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
241
242    xcb_discard_reply(conn, sync_cookie.sequence);
243    dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
244    pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
245    randr_reply = xcb_query_extension_reply(conn, randr_cookie, NULL);
246    amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
247    nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
248    xfixes_reply = xcb_query_extension_reply(conn, xfixes_cookie, NULL);
249    xwl_reply = xcb_query_extension_reply(conn, xwl_cookie, NULL);
250    if (wants_shm)
251       shm_reply = xcb_query_extension_reply(conn, shm_cookie, NULL);
252    if (!dri3_reply || !pres_reply || !xfixes_reply) {
253       free(dri3_reply);
254       free(pres_reply);
255       free(xfixes_reply);
256       free(xwl_reply);
257       free(randr_reply);
258       free(amd_reply);
259       free(nv_reply);
260       if (wants_shm)
261          free(shm_reply);
262       vk_free(&wsi_dev->instance_alloc, wsi_conn);
263       return NULL;
264    }
265
266    wsi_conn->has_dri3 = dri3_reply->present != 0;
267 #ifdef HAVE_DRI3_MODIFIERS
268    if (wsi_conn->has_dri3) {
269       xcb_dri3_query_version_cookie_t ver_cookie;
270       xcb_dri3_query_version_reply_t *ver_reply;
271
272       ver_cookie = xcb_dri3_query_version(conn, 1, 2);
273       ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
274       has_dri3_v1_2 = ver_reply != NULL &&
275          (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
276       free(ver_reply);
277    }
278 #endif
279
280    wsi_conn->has_present = pres_reply->present != 0;
281 #ifdef HAVE_DRI3_MODIFIERS
282    if (wsi_conn->has_present) {
283       xcb_present_query_version_cookie_t ver_cookie;
284       xcb_present_query_version_reply_t *ver_reply;
285
286       ver_cookie = xcb_present_query_version(conn, 1, 2);
287       ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
288       has_present_v1_2 =
289         (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
290       free(ver_reply);
291    }
292 #endif
293
294    wsi_conn->has_xfixes = xfixes_reply->present != 0;
295    if (wsi_conn->has_xfixes) {
296       xcb_xfixes_query_version_cookie_t ver_cookie;
297       xcb_xfixes_query_version_reply_t *ver_reply;
298
299       ver_cookie = xcb_xfixes_query_version(conn, 6, 0);
300       ver_reply = xcb_xfixes_query_version_reply(conn, ver_cookie, NULL);
301       wsi_conn->has_xfixes = (ver_reply->major_version >= 2);
302       free(ver_reply);
303    }
304
305    wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn, randr_reply,
306                                                    xwl_reply);
307
308    wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
309    wsi_conn->is_proprietary_x11 = false;
310    if (amd_reply && amd_reply->present)
311       wsi_conn->is_proprietary_x11 = true;
312    if (nv_reply && nv_reply->present)
313       wsi_conn->is_proprietary_x11 = true;
314
315    wsi_conn->has_mit_shm = false;
316    if (wsi_conn->has_dri3 && wsi_conn->has_present && wants_shm) {
317       bool has_mit_shm = shm_reply->present != 0;
318
319       xcb_shm_query_version_cookie_t ver_cookie;
320       xcb_shm_query_version_reply_t *ver_reply;
321
322       ver_cookie = xcb_shm_query_version(conn);
323       ver_reply = xcb_shm_query_version_reply(conn, ver_cookie, NULL);
324
325       has_mit_shm = ver_reply->shared_pixmaps;
326       free(ver_reply);
327       xcb_void_cookie_t cookie;
328       xcb_generic_error_t *error;
329
330       if (has_mit_shm) {
331          cookie = xcb_shm_detach_checked(conn, 0);
332          if ((error = xcb_request_check(conn, cookie))) {
333             if (error->error_code != BadRequest)
334                wsi_conn->has_mit_shm = true;
335             free(error);
336          }
337       }
338    }
339
340    free(dri3_reply);
341    free(pres_reply);
342    free(randr_reply);
343    free(xwl_reply);
344    free(amd_reply);
345    free(nv_reply);
346    if (wants_shm)
347       free(shm_reply);
348
349    return wsi_conn;
350 }
351
352 static void
353 wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
354                            struct wsi_x11_connection *conn)
355 {
356    vk_free(&wsi_dev->instance_alloc, conn);
357 }
358
359 static bool
360 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
361 {
362   if (wsi_conn->has_dri3)
363     return true;
364   if (!wsi_conn->is_proprietary_x11) {
365     fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
366                     "Note: you can probably enable DRI3 in your Xorg config\n");
367   }
368   return false;
369 }
370
371 /**
372  * Get internal struct representing an xcb_connection_t.
373  *
374  * This can allocate the struct but the caller does not own the struct. It is
375  * deleted on wsi_x11_finish_wsi by the hash table it is inserted.
376  *
377  * If the allocation fails NULL is returned.
378  */
379 static struct wsi_x11_connection *
380 wsi_x11_get_connection(struct wsi_device *wsi_dev,
381                        xcb_connection_t *conn)
382 {
383    struct wsi_x11 *wsi =
384       (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
385
386    pthread_mutex_lock(&wsi->mutex);
387
388    struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
389    if (!entry) {
390       /* We're about to make a bunch of blocking calls.  Let's drop the
391        * mutex for now so we don't block up too badly.
392        */
393       pthread_mutex_unlock(&wsi->mutex);
394
395       struct wsi_x11_connection *wsi_conn =
396          wsi_x11_connection_create(wsi_dev, conn);
397       if (!wsi_conn)
398          return NULL;
399
400       pthread_mutex_lock(&wsi->mutex);
401
402       entry = _mesa_hash_table_search(wsi->connections, conn);
403       if (entry) {
404          /* Oops, someone raced us to it */
405          wsi_x11_connection_destroy(wsi_dev, wsi_conn);
406       } else {
407          entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
408       }
409    }
410
411    pthread_mutex_unlock(&wsi->mutex);
412
413    return entry->data;
414 }
415
416 struct surface_format {
417    VkFormat format;
418    unsigned bits_per_rgb;
419 };
420
421 static const struct surface_format formats[] = {
422    { VK_FORMAT_B8G8R8A8_SRGB,             8 },
423    { VK_FORMAT_B8G8R8A8_UNORM,            8 },
424    { VK_FORMAT_A2R10G10B10_UNORM_PACK32, 10 },
425 };
426
427 static const VkPresentModeKHR present_modes[] = {
428    VK_PRESENT_MODE_IMMEDIATE_KHR,
429    VK_PRESENT_MODE_MAILBOX_KHR,
430    VK_PRESENT_MODE_FIFO_KHR,
431    VK_PRESENT_MODE_FIFO_RELAXED_KHR,
432 };
433
434 static xcb_screen_t *
435 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
436 {
437    xcb_screen_iterator_t screen_iter =
438       xcb_setup_roots_iterator(xcb_get_setup(conn));
439
440    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
441       if (screen_iter.data->root == root)
442          return screen_iter.data;
443    }
444
445    return NULL;
446 }
447
448 static xcb_visualtype_t *
449 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
450                       unsigned *depth)
451 {
452    xcb_depth_iterator_t depth_iter =
453       xcb_screen_allowed_depths_iterator(screen);
454
455    for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
456       xcb_visualtype_iterator_t visual_iter =
457          xcb_depth_visuals_iterator (depth_iter.data);
458
459       for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
460          if (visual_iter.data->visual_id == visual_id) {
461             if (depth)
462                *depth = depth_iter.data->depth;
463             return visual_iter.data;
464          }
465       }
466    }
467
468    return NULL;
469 }
470
471 static xcb_visualtype_t *
472 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id)
473 {
474    xcb_screen_iterator_t screen_iter =
475       xcb_setup_roots_iterator(xcb_get_setup(conn));
476
477    /* For this we have to iterate over all of the screens which is rather
478     * annoying.  Fortunately, there is probably only 1.
479     */
480    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
481       xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
482                                                        visual_id, NULL);
483       if (visual)
484          return visual;
485    }
486
487    return NULL;
488 }
489
490 static xcb_visualtype_t *
491 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
492                           unsigned *depth)
493 {
494    xcb_query_tree_cookie_t tree_cookie;
495    xcb_get_window_attributes_cookie_t attrib_cookie;
496    xcb_query_tree_reply_t *tree;
497    xcb_get_window_attributes_reply_t *attrib;
498
499    tree_cookie = xcb_query_tree(conn, window);
500    attrib_cookie = xcb_get_window_attributes(conn, window);
501
502    tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
503    attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
504    if (attrib == NULL || tree == NULL) {
505       free(attrib);
506       free(tree);
507       return NULL;
508    }
509
510    xcb_window_t root = tree->root;
511    xcb_visualid_t visual_id = attrib->visual;
512    free(attrib);
513    free(tree);
514
515    xcb_screen_t *screen = get_screen_for_root(conn, root);
516    if (screen == NULL)
517       return NULL;
518
519    return screen_get_visualtype(screen, visual_id, depth);
520 }
521
522 static bool
523 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
524 {
525    uint32_t rgb_mask = visual->red_mask |
526                        visual->green_mask |
527                        visual->blue_mask;
528
529    uint32_t all_mask = 0xffffffff >> (32 - depth);
530
531    /* Do we have bits left over after RGB? */
532    return (all_mask & ~rgb_mask) != 0;
533 }
534
535 static bool
536 visual_supported(xcb_visualtype_t *visual)
537 {
538    if (!visual)
539       return false;
540
541    return visual->bits_per_rgb_value == 8 || visual->bits_per_rgb_value == 10;
542 }
543
544 VKAPI_ATTR VkBool32 VKAPI_CALL
545 wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
546                                                uint32_t queueFamilyIndex,
547                                                xcb_connection_t *connection,
548                                                xcb_visualid_t visual_id)
549 {
550    VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
551    struct wsi_device *wsi_device = pdevice->wsi_device;
552    struct wsi_x11_connection *wsi_conn =
553       wsi_x11_get_connection(wsi_device, connection);
554
555    if (!wsi_conn)
556       return false;
557
558    if (!wsi_device->sw) {
559       if (!wsi_x11_check_for_dri3(wsi_conn))
560          return false;
561    }
562
563    if (!visual_supported(connection_get_visualtype(connection, visual_id)))
564       return false;
565
566    return true;
567 }
568
569 VKAPI_ATTR VkBool32 VKAPI_CALL
570 wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
571                                                 uint32_t queueFamilyIndex,
572                                                 Display *dpy,
573                                                 VisualID visualID)
574 {
575    return wsi_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
576                                                          queueFamilyIndex,
577                                                          XGetXCBConnection(dpy),
578                                                          visualID);
579 }
580
581 static xcb_connection_t*
582 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
583 {
584    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
585       return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
586    else
587       return ((VkIcdSurfaceXcb *)icd_surface)->connection;
588 }
589
590 static xcb_window_t
591 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
592 {
593    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
594       return ((VkIcdSurfaceXlib *)icd_surface)->window;
595    else
596       return ((VkIcdSurfaceXcb *)icd_surface)->window;
597 }
598
599 static VkResult
600 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
601                         struct wsi_device *wsi_device,
602                         uint32_t queueFamilyIndex,
603                         VkBool32* pSupported)
604 {
605    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
606    xcb_window_t window = x11_surface_get_window(icd_surface);
607
608    struct wsi_x11_connection *wsi_conn =
609       wsi_x11_get_connection(wsi_device, conn);
610    if (!wsi_conn)
611       return VK_ERROR_OUT_OF_HOST_MEMORY;
612
613    if (!wsi_device->sw) {
614       if (!wsi_x11_check_for_dri3(wsi_conn)) {
615          *pSupported = false;
616          return VK_SUCCESS;
617       }
618    }
619
620    if (!visual_supported(get_visualtype_for_window(conn, window, NULL))) {
621       *pSupported = false;
622       return VK_SUCCESS;
623    }
624
625    *pSupported = true;
626    return VK_SUCCESS;
627 }
628
629 static uint32_t
630 x11_get_min_image_count(const struct wsi_device *wsi_device)
631 {
632    if (wsi_device->x11.override_minImageCount)
633       return wsi_device->x11.override_minImageCount;
634
635    /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
636     * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
637     * the render latency is CPU duration + GPU duration.
638     *
639     * This means that with scanout from pageflipping we need 3 frames to run
640     * full speed:
641     * 1) CPU rendering work
642     * 2) GPU rendering work
643     * 3) scanout
644     *
645     * Once we have a nonblocking acquire that returns a semaphore we can merge
646     * 1 and 3. Hence the ideal implementation needs only 2 images, but games
647     * cannot tellwe currently do not have an ideal implementation and that
648     * hence they need to allocate 3 images. So let us do it for them.
649     *
650     * This is a tradeoff as it uses more memory than needed for non-fullscreen
651     * and non-performance intensive applications.
652     */
653    return 3;
654 }
655
656 static VkResult
657 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
658                              struct wsi_device *wsi_device,
659                              VkSurfaceCapabilitiesKHR *caps)
660 {
661    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
662    xcb_window_t window = x11_surface_get_window(icd_surface);
663    xcb_get_geometry_cookie_t geom_cookie;
664    xcb_generic_error_t *err;
665    xcb_get_geometry_reply_t *geom;
666    unsigned visual_depth;
667
668    geom_cookie = xcb_get_geometry(conn, window);
669
670    /* This does a round-trip.  This is why we do get_geometry first and
671     * wait to read the reply until after we have a visual.
672     */
673    xcb_visualtype_t *visual =
674       get_visualtype_for_window(conn, window, &visual_depth);
675
676    if (!visual)
677       return VK_ERROR_SURFACE_LOST_KHR;
678
679    geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
680    if (geom) {
681       VkExtent2D extent = { geom->width, geom->height };
682       caps->currentExtent = extent;
683       caps->minImageExtent = extent;
684       caps->maxImageExtent = extent;
685    }
686    free(err);
687    free(geom);
688    if (!geom)
689        return VK_ERROR_SURFACE_LOST_KHR;
690
691    if (visual_has_alpha(visual, visual_depth)) {
692       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
693                                       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
694    } else {
695       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
696                                       VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
697    }
698
699    caps->minImageCount = x11_get_min_image_count(wsi_device);
700    /* There is no real maximum */
701    caps->maxImageCount = 0;
702
703    caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
704    caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
705    caps->maxImageArrayLayers = 1;
706    caps->supportedUsageFlags =
707       VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
708       VK_IMAGE_USAGE_SAMPLED_BIT |
709       VK_IMAGE_USAGE_TRANSFER_DST_BIT |
710       VK_IMAGE_USAGE_STORAGE_BIT |
711       VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
712       VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
713
714    return VK_SUCCESS;
715 }
716
717 static VkResult
718 x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
719                               struct wsi_device *wsi_device,
720                               const void *info_next,
721                               VkSurfaceCapabilities2KHR *caps)
722 {
723    assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
724
725    VkResult result =
726       x11_surface_get_capabilities(icd_surface, wsi_device,
727                                    &caps->surfaceCapabilities);
728
729    if (result != VK_SUCCESS)
730       return result;
731
732    vk_foreach_struct(ext, caps->pNext) {
733       switch (ext->sType) {
734       case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
735          VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
736          protected->supportsProtected = VK_FALSE;
737          break;
738       }
739
740       default:
741          /* Ignored */
742          break;
743       }
744    }
745
746    return result;
747 }
748
749 static bool
750 get_sorted_vk_formats(VkIcdSurfaceBase *surface, struct wsi_device *wsi_device,
751                       VkFormat *sorted_formats, unsigned *count)
752 {
753    xcb_connection_t *conn = x11_surface_get_connection(surface);
754    xcb_window_t window = x11_surface_get_window(surface);
755    xcb_visualtype_t *visual = get_visualtype_for_window(conn, window, NULL);
756    if (!visual)
757       return false;
758
759    *count = 0;
760    for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
761       if (formats[i].bits_per_rgb == visual->bits_per_rgb_value)
762          sorted_formats[(*count)++] = formats[i].format;
763    }
764
765    if (wsi_device->force_bgra8_unorm_first) {
766       for (unsigned i = 0; i < *count; i++) {
767          if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
768             sorted_formats[i] = sorted_formats[0];
769             sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
770             break;
771          }
772       }
773    }
774
775    return true;
776 }
777
778 static VkResult
779 x11_surface_get_formats(VkIcdSurfaceBase *surface,
780                         struct wsi_device *wsi_device,
781                         uint32_t *pSurfaceFormatCount,
782                         VkSurfaceFormatKHR *pSurfaceFormats)
783 {
784    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
785                           pSurfaceFormats, pSurfaceFormatCount);
786
787    unsigned count;
788    VkFormat sorted_formats[ARRAY_SIZE(formats)];
789    if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
790       return VK_ERROR_SURFACE_LOST_KHR;
791
792    for (unsigned i = 0; i < count; i++) {
793       vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
794          f->format = sorted_formats[i];
795          f->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
796       }
797    }
798
799    return vk_outarray_status(&out);
800 }
801
802 static VkResult
803 x11_surface_get_formats2(VkIcdSurfaceBase *surface,
804                         struct wsi_device *wsi_device,
805                         const void *info_next,
806                         uint32_t *pSurfaceFormatCount,
807                         VkSurfaceFormat2KHR *pSurfaceFormats)
808 {
809    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
810                           pSurfaceFormats, pSurfaceFormatCount);
811
812    unsigned count;
813    VkFormat sorted_formats[ARRAY_SIZE(formats)];
814    if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
815       return VK_ERROR_SURFACE_LOST_KHR;
816
817    for (unsigned i = 0; i < count; i++) {
818       vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
819          assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
820          f->surfaceFormat.format = sorted_formats[i];
821          f->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
822       }
823    }
824
825    return vk_outarray_status(&out);
826 }
827
828 static VkResult
829 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
830                               uint32_t *pPresentModeCount,
831                               VkPresentModeKHR *pPresentModes)
832 {
833    if (pPresentModes == NULL) {
834       *pPresentModeCount = ARRAY_SIZE(present_modes);
835       return VK_SUCCESS;
836    }
837
838    *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
839    typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
840
841    return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
842       VK_INCOMPLETE : VK_SUCCESS;
843 }
844
845 static VkResult
846 x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
847                                    struct wsi_device *wsi_device,
848                                    uint32_t* pRectCount,
849                                    VkRect2D* pRects)
850 {
851    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
852    xcb_window_t window = x11_surface_get_window(icd_surface);
853    VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
854
855    vk_outarray_append_typed(VkRect2D, &out, rect) {
856       xcb_generic_error_t *err = NULL;
857       xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
858       xcb_get_geometry_reply_t *geom =
859          xcb_get_geometry_reply(conn, geom_cookie, &err);
860       free(err);
861       if (geom) {
862          *rect = (VkRect2D) {
863             .offset = { 0, 0 },
864             .extent = { geom->width, geom->height },
865          };
866       }
867       free(geom);
868       if (!geom)
869           return VK_ERROR_SURFACE_LOST_KHR;
870    }
871
872    return vk_outarray_status(&out);
873 }
874
875 VKAPI_ATTR VkResult VKAPI_CALL
876 wsi_CreateXcbSurfaceKHR(VkInstance _instance,
877                         const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
878                         const VkAllocationCallbacks *pAllocator,
879                         VkSurfaceKHR *pSurface)
880 {
881    VK_FROM_HANDLE(vk_instance, instance, _instance);
882    VkIcdSurfaceXcb *surface;
883
884    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
885
886    surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
887                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
888    if (surface == NULL)
889       return VK_ERROR_OUT_OF_HOST_MEMORY;
890
891    surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
892    surface->connection = pCreateInfo->connection;
893    surface->window = pCreateInfo->window;
894
895    *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
896    return VK_SUCCESS;
897 }
898
899 VKAPI_ATTR VkResult VKAPI_CALL
900 wsi_CreateXlibSurfaceKHR(VkInstance _instance,
901                          const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
902                          const VkAllocationCallbacks *pAllocator,
903                          VkSurfaceKHR *pSurface)
904 {
905    VK_FROM_HANDLE(vk_instance, instance, _instance);
906    VkIcdSurfaceXlib *surface;
907
908    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
909
910    surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
911                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
912    if (surface == NULL)
913       return VK_ERROR_OUT_OF_HOST_MEMORY;
914
915    surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
916    surface->dpy = pCreateInfo->dpy;
917    surface->window = pCreateInfo->window;
918
919    *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
920    return VK_SUCCESS;
921 }
922
923 struct x11_image {
924    struct wsi_image                          base;
925    xcb_pixmap_t                              pixmap;
926    xcb_xfixes_region_t                       update_region; /* long lived XID */
927    xcb_xfixes_region_t                       update_area;   /* the above or None */
928    bool                                      busy;
929    bool                                      present_queued;
930    struct xshmfence *                        shm_fence;
931    uint32_t                                  sync_fence;
932    uint32_t                                  serial;
933    xcb_shm_seg_t                             shmseg;
934    int                                       shmid;
935    uint8_t *                                 shmaddr;
936 };
937
938 struct x11_swapchain {
939    struct wsi_swapchain                        base;
940
941    bool                                         has_dri3_modifiers;
942    bool                                         has_mit_shm;
943
944    xcb_connection_t *                           conn;
945    xcb_window_t                                 window;
946    xcb_gc_t                                     gc;
947    uint32_t                                     depth;
948    VkExtent2D                                   extent;
949
950    xcb_present_event_t                          event_id;
951    xcb_special_event_t *                        special_event;
952    uint64_t                                     send_sbc;
953    uint64_t                                     last_present_msc;
954    uint32_t                                     stamp;
955    atomic_int                                   sent_image_count;
956
957    bool                                         has_present_queue;
958    bool                                         has_acquire_queue;
959    VkResult                                     status;
960    bool                                         copy_is_suboptimal;
961    struct wsi_queue                             present_queue;
962    struct wsi_queue                             acquire_queue;
963    pthread_t                                    queue_manager;
964
965    struct x11_image                             images[0];
966 };
967 VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
968                                VK_OBJECT_TYPE_SWAPCHAIN_KHR)
969
970 /**
971  * Update the swapchain status with the result of an operation, and return
972  * the combined status. The chain status will eventually be returned from
973  * AcquireNextImage and QueuePresent.
974  *
975  * We make sure to 'stick' more pessimistic statuses: an out-of-date error
976  * is permanent once seen, and every subsequent call will return this. If
977  * this has not been seen, success will be returned.
978  */
979 static VkResult
980 _x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
981                       const char *file, int line)
982 {
983    /* Prioritise returning existing errors for consistency. */
984    if (chain->status < 0)
985       return chain->status;
986
987    /* If we have a new error, mark it as permanent on the chain and return. */
988    if (result < 0) {
989 #ifndef NDEBUG
990       fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
991               file, line, vk_Result_to_str(result));
992 #endif
993       chain->status = result;
994       return result;
995    }
996
997    /* Return temporary errors, but don't persist them. */
998    if (result == VK_TIMEOUT || result == VK_NOT_READY)
999       return result;
1000
1001    /* Suboptimal isn't an error, but is a status which sticks to the swapchain
1002     * and is always returned rather than success.
1003     */
1004    if (result == VK_SUBOPTIMAL_KHR) {
1005 #ifndef NDEBUG
1006       if (chain->status != VK_SUBOPTIMAL_KHR) {
1007          fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
1008                  file, line, vk_Result_to_str(result));
1009       }
1010 #endif
1011       chain->status = result;
1012       return result;
1013    }
1014
1015    /* No changes, so return the last status. */
1016    return chain->status;
1017 }
1018 #define x11_swapchain_result(chain, result) \
1019    _x11_swapchain_result(chain, result, __FILE__, __LINE__)
1020
1021 static struct wsi_image *
1022 x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
1023 {
1024    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1025    return &chain->images[image_index].base;
1026 }
1027
1028 /**
1029  * Process an X11 Present event. Does not update chain->status.
1030  */
1031 static VkResult
1032 x11_handle_dri3_present_event(struct x11_swapchain *chain,
1033                               xcb_present_generic_event_t *event)
1034 {
1035    switch (event->evtype) {
1036    case XCB_PRESENT_CONFIGURE_NOTIFY: {
1037       xcb_present_configure_notify_event_t *config = (void *) event;
1038
1039       if (config->width != chain->extent.width ||
1040           config->height != chain->extent.height)
1041          return VK_SUBOPTIMAL_KHR;
1042
1043       break;
1044    }
1045
1046    case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
1047       xcb_present_idle_notify_event_t *idle = (void *) event;
1048
1049       for (unsigned i = 0; i < chain->base.image_count; i++) {
1050          if (chain->images[i].pixmap == idle->pixmap) {
1051             chain->images[i].busy = false;
1052             chain->sent_image_count--;
1053             assert(chain->sent_image_count >= 0);
1054             if (chain->has_acquire_queue)
1055                wsi_queue_push(&chain->acquire_queue, i);
1056             break;
1057          }
1058       }
1059
1060       break;
1061    }
1062
1063    case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
1064       xcb_present_complete_notify_event_t *complete = (void *) event;
1065       if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
1066          unsigned i;
1067          for (i = 0; i < chain->base.image_count; i++) {
1068             struct x11_image *image = &chain->images[i];
1069             if (image->present_queued && image->serial == complete->serial)
1070                image->present_queued = false;
1071          }
1072          chain->last_present_msc = complete->msc;
1073       }
1074
1075       VkResult result = VK_SUCCESS;
1076       switch (complete->mode) {
1077       case XCB_PRESENT_COMPLETE_MODE_COPY:
1078          if (chain->copy_is_suboptimal)
1079             result = VK_SUBOPTIMAL_KHR;
1080          break;
1081       case XCB_PRESENT_COMPLETE_MODE_FLIP:
1082          /* If we ever go from flipping to copying, the odds are very likely
1083           * that we could reallocate in a more optimal way if we didn't have
1084           * to care about scanout, so we always do this.
1085           */
1086          chain->copy_is_suboptimal = true;
1087          break;
1088 #ifdef HAVE_DRI3_MODIFIERS
1089       case XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY:
1090          /* The winsys is now trying to flip directly and cannot due to our
1091           * configuration. Request the user reallocate.
1092           */
1093          result = VK_SUBOPTIMAL_KHR;
1094          break;
1095 #endif
1096       default:
1097          break;
1098       }
1099
1100       return result;
1101    }
1102
1103    default:
1104       break;
1105    }
1106
1107    return VK_SUCCESS;
1108 }
1109
1110
1111 static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
1112 {
1113    uint64_t current_time = os_time_get_nano();
1114
1115    timeout = MIN2(UINT64_MAX - current_time, timeout);
1116
1117    return current_time + timeout;
1118 }
1119
1120 /**
1121  * Acquire a ready-to-use image directly from our swapchain. If all images are
1122  * busy wait until one is not anymore or till timeout.
1123  */
1124 static VkResult
1125 x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
1126                                 uint32_t *image_index, uint64_t timeout)
1127 {
1128    xcb_generic_event_t *event;
1129    struct pollfd pfds;
1130    uint64_t atimeout;
1131    while (1) {
1132       for (uint32_t i = 0; i < chain->base.image_count; i++) {
1133          if (!chain->images[i].busy) {
1134             /* We found a non-busy image */
1135             xshmfence_await(chain->images[i].shm_fence);
1136             *image_index = i;
1137             chain->images[i].busy = true;
1138             return x11_swapchain_result(chain, VK_SUCCESS);
1139          }
1140       }
1141
1142       xcb_flush(chain->conn);
1143
1144       if (timeout == UINT64_MAX) {
1145          event = xcb_wait_for_special_event(chain->conn, chain->special_event);
1146          if (!event)
1147             return x11_swapchain_result(chain, VK_ERROR_SURFACE_LOST_KHR);
1148       } else {
1149          event = xcb_poll_for_special_event(chain->conn, chain->special_event);
1150          if (!event) {
1151             int ret;
1152             if (timeout == 0)
1153                return x11_swapchain_result(chain, VK_NOT_READY);
1154
1155             atimeout = wsi_get_absolute_timeout(timeout);
1156
1157             pfds.fd = xcb_get_file_descriptor(chain->conn);
1158             pfds.events = POLLIN;
1159             ret = poll(&pfds, 1, timeout / 1000 / 1000);
1160             if (ret == 0)
1161                return x11_swapchain_result(chain, VK_TIMEOUT);
1162             if (ret == -1)
1163                return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
1164
1165             /* If a non-special event happens, the fd will still
1166              * poll. So recalculate the timeout now just in case.
1167              */
1168             uint64_t current_time = os_time_get_nano();
1169             if (atimeout > current_time)
1170                timeout = atimeout - current_time;
1171             else
1172                timeout = 0;
1173             continue;
1174          }
1175       }
1176
1177       /* Update the swapchain status here. We may catch non-fatal errors here,
1178        * in which case we need to update the status and continue.
1179        */
1180       VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
1181       /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1182       result = x11_swapchain_result(chain, result);
1183       free(event);
1184       if (result < 0)
1185          return result;
1186    }
1187 }
1188
1189 /**
1190  * Acquire a ready-to-use image from the acquire-queue. Only relevant in fifo
1191  * presentation mode.
1192  */
1193 static VkResult
1194 x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
1195                                   uint32_t *image_index_out, uint64_t timeout)
1196 {
1197    assert(chain->has_acquire_queue);
1198
1199    uint32_t image_index;
1200    VkResult result = wsi_queue_pull(&chain->acquire_queue,
1201                                     &image_index, timeout);
1202    if (result < 0 || result == VK_TIMEOUT) {
1203       /* On error, the thread has shut down, so safe to update chain->status.
1204        * Calling x11_swapchain_result with VK_TIMEOUT won't modify
1205        * chain->status so that is also safe.
1206        */
1207       return x11_swapchain_result(chain, result);
1208    } else if (chain->status < 0) {
1209       return chain->status;
1210    }
1211
1212    assert(image_index < chain->base.image_count);
1213    xshmfence_await(chain->images[image_index].shm_fence);
1214
1215    *image_index_out = image_index;
1216
1217    return chain->status;
1218 }
1219
1220 /**
1221  * Send image to X server via Present extension.
1222  */
1223 static VkResult
1224 x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
1225                         uint64_t target_msc)
1226 {
1227    struct x11_image *image = &chain->images[image_index];
1228
1229    assert(image_index < chain->base.image_count);
1230
1231    uint32_t options = XCB_PRESENT_OPTION_NONE;
1232
1233    int64_t divisor = 0;
1234    int64_t remainder = 0;
1235
1236    struct wsi_x11_connection *wsi_conn =
1237       wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1238    if (!wsi_conn)
1239       return VK_ERROR_OUT_OF_HOST_MEMORY;
1240
1241    if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
1242        (chain->base.present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
1243         wsi_conn->is_xwayland) ||
1244        chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
1245       options |= XCB_PRESENT_OPTION_ASYNC;
1246
1247 #ifdef HAVE_DRI3_MODIFIERS
1248    if (chain->has_dri3_modifiers)
1249       options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1250 #endif
1251
1252    /* Poll for any available event and update the swapchain status. This could
1253     * update the status of the swapchain to SUBOPTIMAL or OUT_OF_DATE if the
1254     * associated X11 surface has been resized.
1255     */
1256    xcb_generic_event_t *event;
1257    while ((event = xcb_poll_for_special_event(chain->conn, chain->special_event))) {
1258       VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
1259       /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1260       result = x11_swapchain_result(chain, result);
1261       free(event);
1262       if (result < 0)
1263          return result;
1264    }
1265
1266    xshmfence_reset(image->shm_fence);
1267
1268    ++chain->sent_image_count;
1269    assert(chain->sent_image_count <= chain->base.image_count);
1270
1271    ++chain->send_sbc;
1272    image->present_queued = true;
1273    image->serial = (uint32_t) chain->send_sbc;
1274
1275    xcb_void_cookie_t cookie =
1276       xcb_present_pixmap_checked(chain->conn,
1277                                  chain->window,
1278                                  image->pixmap,
1279                                  image->serial,
1280                                  0,                            /* valid */
1281                                  image->update_area,           /* update */
1282                                  0,                            /* x_off */
1283                                  0,                            /* y_off */
1284                                  XCB_NONE,                     /* target_crtc */
1285                                  XCB_NONE,
1286                                  image->sync_fence,
1287                                  options,
1288                                  target_msc,
1289                                  divisor,
1290                                  remainder, 0, NULL);
1291    xcb_generic_error_t *error = xcb_request_check(chain->conn, cookie);
1292    if (error) {
1293       free(error);
1294       return x11_swapchain_result(chain, VK_ERROR_SURFACE_LOST_KHR);
1295    }
1296
1297    return x11_swapchain_result(chain, VK_SUCCESS);
1298 }
1299
1300 /**
1301  * Send image to X server unaccelerated (software drivers).
1302  */
1303 static VkResult
1304 x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index,
1305                       uint64_t target_msc)
1306 {
1307    struct x11_image *image = &chain->images[image_index];
1308
1309    xcb_void_cookie_t cookie;
1310    void *myptr = image->base.cpu_map;
1311    size_t hdr_len = sizeof(xcb_put_image_request_t);
1312    int stride_b = image->base.row_pitches[0];
1313    size_t size = (hdr_len + stride_b * chain->extent.height) >> 2;
1314    uint64_t max_req_len = xcb_get_maximum_request_length(chain->conn);
1315    chain->images[image_index].busy = false;
1316
1317    if (size < max_req_len) {
1318       cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1319                              chain->window,
1320                              chain->gc,
1321                              image->base.row_pitches[0] / 4,
1322                              chain->extent.height,
1323                              0,0,0,24,
1324                              image->base.row_pitches[0] * chain->extent.height,
1325                              image->base.cpu_map);
1326       xcb_discard_reply(chain->conn, cookie.sequence);
1327    } else {
1328       int num_lines = ((max_req_len << 2) - hdr_len) / stride_b;
1329       int y_start = 0;
1330       int y_todo = chain->extent.height;
1331       while (y_todo) {
1332          int this_lines = MIN2(num_lines, y_todo);
1333          cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1334                                 chain->window,
1335                                 chain->gc,
1336                                 image->base.row_pitches[0] / 4,
1337                                 this_lines,
1338                                 0,y_start,0,24,
1339                                 this_lines * stride_b,
1340                                 (const uint8_t *)myptr + (y_start * stride_b));
1341          xcb_discard_reply(chain->conn, cookie.sequence);
1342          y_start += this_lines;
1343          y_todo -= this_lines;
1344       }
1345    }
1346
1347    xcb_flush(chain->conn);
1348    return x11_swapchain_result(chain, VK_SUCCESS);
1349 }
1350
1351 /**
1352  * Send image to the X server for presentation at target_msc.
1353  */
1354 static VkResult
1355 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
1356                    uint64_t target_msc)
1357 {
1358    if (chain->base.wsi->sw && !chain->has_mit_shm)
1359       return x11_present_to_x11_sw(chain, image_index, target_msc);
1360    return x11_present_to_x11_dri3(chain, image_index, target_msc);
1361 }
1362
1363 /**
1364  * Acquire a ready-to-use image from the swapchain.
1365  *
1366  * This means usually that the image is not waiting on presentation and that the
1367  * image has been released by the X server to be used again by the consumer.
1368  */
1369 static VkResult
1370 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
1371                        const VkAcquireNextImageInfoKHR *info,
1372                        uint32_t *image_index)
1373 {
1374    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1375    uint64_t timeout = info->timeout;
1376
1377    /* If the swapchain is in an error state, don't go any further. */
1378    if (chain->status < 0)
1379       return chain->status;
1380
1381    if (chain->base.wsi->sw && !chain->has_mit_shm) {
1382       for (unsigned i = 0; i < chain->base.image_count; i++) {
1383          if (!chain->images[i].busy) {
1384             *image_index = i;
1385             chain->images[i].busy = true;
1386             xcb_generic_error_t *err;
1387
1388             xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(chain->conn, chain->window);
1389             xcb_get_geometry_reply_t *geom = xcb_get_geometry_reply(chain->conn, geom_cookie, &err);
1390             VkResult result = VK_SUCCESS;
1391             if (geom) {
1392                if (chain->extent.width != geom->width ||
1393                    chain->extent.height != geom->height)
1394                   result = VK_SUBOPTIMAL_KHR;
1395             } else {
1396                result = VK_ERROR_SURFACE_LOST_KHR;
1397             }
1398             free(err);
1399             free(geom);
1400             return result;
1401          }
1402       }
1403       return VK_NOT_READY;
1404    }
1405
1406    if (chain->has_acquire_queue) {
1407       return x11_acquire_next_image_from_queue(chain, image_index, timeout);
1408    } else {
1409       return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
1410    }
1411 }
1412
1413 #define MAX_DAMAGE_RECTS 64
1414
1415 /**
1416  * Queue a new presentation of an image that was previously acquired by the
1417  * consumer.
1418  *
1419  * Note that in immediate presentation mode this does not really queue the
1420  * presentation but directly asks the X server to show it.
1421  */
1422 static VkResult
1423 x11_queue_present(struct wsi_swapchain *anv_chain,
1424                   uint32_t image_index,
1425                   const VkPresentRegionKHR *damage)
1426 {
1427    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1428    xcb_xfixes_region_t update_area = 0;
1429
1430    /* If the swapchain is in an error state, don't go any further. */
1431    if (chain->status < 0)
1432       return chain->status;
1433
1434    if (damage && damage->pRectangles && damage->rectangleCount > 0 &&
1435       damage->rectangleCount <= MAX_DAMAGE_RECTS) {
1436       xcb_rectangle_t rects[MAX_DAMAGE_RECTS];
1437
1438       update_area = chain->images[image_index].update_region;
1439       for (unsigned i = 0; i < damage->rectangleCount; i++) {
1440          const VkRectLayerKHR *rect = &damage->pRectangles[i];
1441          assert(rect->layer == 0);
1442          rects[i].x = rect->offset.x;
1443          rects[i].y = rect->offset.y;
1444          rects[i].width = rect->extent.width;
1445          rects[i].height = rect->extent.height;
1446       }
1447       xcb_xfixes_set_region(chain->conn, update_area, damage->rectangleCount, rects);
1448    }
1449    chain->images[image_index].update_area = update_area;
1450
1451    chain->images[image_index].busy = true;
1452    if (chain->has_present_queue) {
1453       wsi_queue_push(&chain->present_queue, image_index);
1454       return chain->status;
1455    } else {
1456       /* No present queue means immedate mode, so we present immediately. */
1457       return x11_present_to_x11(chain, image_index, 0);
1458    }
1459 }
1460
1461 /**
1462  * Decides if an early wait on buffer fences before buffer submission is required. That is for:
1463  *   - Mailbox mode, as otherwise the latest image in the queue might not be fully rendered at
1464  *     present time, what could lead to missing a frame.
1465  *   - Immediate mode under Xwayland, as it works practically the same as mailbox mode using the
1466  *     mailbox mechanism of Wayland. Sending a buffer with fences not yet signalled can make the
1467  *     compositor miss a frame when compositing the final image with this buffer.
1468  *
1469  * Note though that early waits can be disabled in general on Xwayland by setting the
1470  * 'vk_xwayland_wait_ready' DRIConf option to false.
1471  */
1472 static bool
1473 x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
1474                           struct wsi_x11_connection *wsi_conn,
1475                           VkPresentModeKHR present_mode)
1476 {
1477    if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
1478       return false;
1479    }
1480
1481    switch (present_mode) {
1482    case VK_PRESENT_MODE_MAILBOX_KHR:
1483       return true;
1484    case VK_PRESENT_MODE_IMMEDIATE_KHR:
1485       return wsi_conn->is_xwayland;
1486    default:
1487       return false;
1488    }
1489 }
1490
1491 /**
1492  * The number of images that are not owned by X11:
1493  *  (1) in the ownership of the app, or
1494  *  (2) app to take ownership through an acquire, or
1495  *  (3) in the present queue waiting for the FIFO thread to present to X11.
1496  */
1497 static unsigned x11_driver_owned_images(const struct x11_swapchain *chain)
1498 {
1499    return chain->base.image_count - chain->sent_image_count;
1500 }
1501
1502 /**
1503  * Our queue manager. Albeit called x11_manage_fifo_queues only directly
1504  * manages the present-queue and does this in general in fifo and mailbox presentation
1505  * modes (there is no present-queue in immediate mode with the exception of Xwayland).
1506  *
1507  * Runs in a separate thread, blocks and reacts to queued images on the
1508  * present-queue
1509  *
1510  * In mailbox mode the queue management is simplified since we only need to
1511  * pull new images from the present queue and can directly present them.
1512  *
1513  * In fifo mode images can only be presented one after the other. For that after
1514  * sending the image to the X server we wait until the image either has been
1515  * presented or released and only then pull a new image from the present-queue.
1516  */
1517 static void *
1518 x11_manage_fifo_queues(void *state)
1519 {
1520    struct x11_swapchain *chain = state;
1521    struct wsi_x11_connection *wsi_conn =
1522       wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1523    VkResult result = VK_SUCCESS;
1524
1525    assert(chain->has_present_queue);
1526
1527    u_thread_setname("WSI swapchain queue");
1528
1529    while (chain->status >= 0) {
1530       /* We can block here unconditionally because after an image was sent to
1531        * the server (later on in this loop) we ensure at least one image is
1532        * acquirable by the consumer or wait there on such an event.
1533        */
1534       uint32_t image_index = 0;
1535       result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
1536       assert(result != VK_TIMEOUT);
1537
1538       if (result < 0) {
1539          goto fail;
1540       } else if (chain->status < 0) {
1541          /* The status can change underneath us if the swapchain is destroyed
1542           * from another thread.
1543           */
1544          return NULL;
1545       }
1546
1547       /* Waiting for the GPU work to finish at this point in time is required in certain usage
1548        * scenarios. Otherwise we wait as usual in wsi_common_queue_present.
1549        */
1550       if (x11_needs_wait_for_fences(chain->base.wsi, wsi_conn,
1551                                     chain->base.present_mode)) {
1552          result = chain->base.wsi->WaitForFences(chain->base.device, 1,
1553                                         &chain->base.fences[image_index],
1554                                         true, UINT64_MAX);
1555          if (result != VK_SUCCESS) {
1556             result = VK_ERROR_OUT_OF_DATE_KHR;
1557             goto fail;
1558          }
1559       }
1560
1561       uint64_t target_msc = 0;
1562       if (chain->has_acquire_queue)
1563          target_msc = chain->last_present_msc + 1;
1564
1565       result = x11_present_to_x11(chain, image_index, target_msc);
1566       if (result < 0)
1567          goto fail;
1568
1569       if (chain->has_acquire_queue) {
1570          /* Assume this isn't a swapchain where we force 5 images, because those
1571           * don't end up with an acquire queue at the moment.
1572           */
1573          unsigned min_image_count = x11_get_min_image_count(chain->base.wsi);
1574
1575          /* With drirc overrides some games have swapchain with less than
1576           * minimum number of images. */
1577          min_image_count = MIN2(min_image_count, chain->base.image_count);
1578
1579          /* We always need to ensure that the app can have this number of images
1580           * acquired concurrently in between presents:
1581           * "VUID-vkAcquireNextImageKHR-swapchain-01802
1582           *  If the number of currently acquired images is greater than the difference
1583           *  between the number of images in swapchain and the value of
1584           *  VkSurfaceCapabilitiesKHR::minImageCount as returned by a call to
1585           *  vkGetPhysicalDeviceSurfaceCapabilities2KHR with the surface used to
1586           *  create swapchain, timeout must not be UINT64_MAX"
1587           */
1588          unsigned forward_progress_guaranteed_acquired_images =
1589             chain->base.image_count - min_image_count + 1;
1590
1591          /* Wait for our presentation to occur and ensure we have at least one
1592           * image that can be acquired by the client afterwards. This ensures we
1593           * can pull on the present-queue on the next loop.
1594           */
1595          while (chain->images[image_index].present_queued ||
1596                 /* If we have images in the present queue the outer loop won't block and a break
1597                  * here would end up at this loop again, otherwise a break here satisfies
1598                  * VUID-vkAcquireNextImageKHR-swapchain-01802 */
1599                 x11_driver_owned_images(chain) < forward_progress_guaranteed_acquired_images) {
1600
1601             xcb_generic_event_t *event =
1602                xcb_wait_for_special_event(chain->conn, chain->special_event);
1603             if (!event) {
1604                result = VK_ERROR_SURFACE_LOST_KHR;
1605                goto fail;
1606             }
1607
1608             result = x11_handle_dri3_present_event(chain, (void *)event);
1609             /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1610             result = x11_swapchain_result(chain, result);
1611             free(event);
1612             if (result < 0)
1613                goto fail;
1614          }
1615       }
1616    }
1617
1618 fail:
1619    x11_swapchain_result(chain, result);
1620    if (chain->has_acquire_queue)
1621       wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
1622
1623    return NULL;
1624 }
1625
1626 static uint8_t *
1627 alloc_shm(struct wsi_image *imagew, unsigned size)
1628 {
1629 #ifdef HAVE_SYS_SHM_H
1630    struct x11_image *image = (struct x11_image *)imagew;
1631    image->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
1632    if (image->shmid < 0)
1633       return NULL;
1634
1635    uint8_t *addr = (uint8_t *)shmat(image->shmid, 0, 0);
1636    /* mark the segment immediately for deletion to avoid leaks */
1637    shmctl(image->shmid, IPC_RMID, 0);
1638
1639    if (addr == (uint8_t *) -1)
1640       return NULL;
1641
1642    image->shmaddr = addr;
1643    return addr;
1644 #else
1645    return NULL;
1646 #endif
1647 }
1648
1649 static VkResult
1650 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
1651                const VkSwapchainCreateInfoKHR *pCreateInfo,
1652                const VkAllocationCallbacks* pAllocator,
1653                struct x11_image *image)
1654 {
1655    xcb_void_cookie_t cookie;
1656    VkResult result;
1657    uint32_t bpp = 32;
1658    int fence_fd;
1659
1660    result = wsi_create_image(&chain->base, &chain->base.image_info,
1661                              &image->base);
1662    if (result != VK_SUCCESS)
1663       return result;
1664
1665    image->update_region = xcb_generate_id(chain->conn);
1666    xcb_xfixes_create_region(chain->conn, image->update_region, 0, NULL);
1667
1668    if (chain->base.wsi->sw) {
1669       if (!chain->has_mit_shm) {
1670          image->busy = false;
1671          return VK_SUCCESS;
1672       }
1673
1674       image->shmseg = xcb_generate_id(chain->conn);
1675
1676       xcb_shm_attach(chain->conn,
1677                      image->shmseg,
1678                      image->shmid,
1679                      0);
1680       image->pixmap = xcb_generate_id(chain->conn);
1681       cookie = xcb_shm_create_pixmap_checked(chain->conn,
1682                                              image->pixmap,
1683                                              chain->window,
1684                                              image->base.row_pitches[0] / 4,
1685                                              pCreateInfo->imageExtent.height,
1686                                              chain->depth,
1687                                              image->shmseg, 0);
1688       xcb_discard_reply(chain->conn, cookie.sequence);
1689       goto out_fence;
1690    }
1691    image->pixmap = xcb_generate_id(chain->conn);
1692
1693 #ifdef HAVE_DRI3_MODIFIERS
1694    if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
1695       /* If the image has a modifier, we must have DRI3 v1.2. */
1696       assert(chain->has_dri3_modifiers);
1697
1698       /* XCB requires an array of file descriptors but we only have one */
1699       int fds[4] = { -1, -1, -1, -1 };
1700       for (int i = 0; i < image->base.num_planes; i++) {
1701          fds[i] = os_dupfd_cloexec(image->base.dma_buf_fd);
1702          if (fds[i] == -1) {
1703             for (int j = 0; j < i; j++)
1704                close(fds[j]);
1705
1706             return VK_ERROR_OUT_OF_HOST_MEMORY;
1707          }
1708       }
1709
1710       cookie =
1711          xcb_dri3_pixmap_from_buffers_checked(chain->conn,
1712                                               image->pixmap,
1713                                               chain->window,
1714                                               image->base.num_planes,
1715                                               pCreateInfo->imageExtent.width,
1716                                               pCreateInfo->imageExtent.height,
1717                                               image->base.row_pitches[0],
1718                                               image->base.offsets[0],
1719                                               image->base.row_pitches[1],
1720                                               image->base.offsets[1],
1721                                               image->base.row_pitches[2],
1722                                               image->base.offsets[2],
1723                                               image->base.row_pitches[3],
1724                                               image->base.offsets[3],
1725                                               chain->depth, bpp,
1726                                               image->base.drm_modifier,
1727                                               fds);
1728    } else
1729 #endif
1730    {
1731       /* Without passing modifiers, we can't have multi-plane RGB images. */
1732       assert(image->base.num_planes == 1);
1733
1734       /* XCB will take ownership of the FD we pass it. */
1735       int fd = os_dupfd_cloexec(image->base.dma_buf_fd);
1736       if (fd == -1)
1737          return VK_ERROR_OUT_OF_HOST_MEMORY;
1738
1739       cookie =
1740          xcb_dri3_pixmap_from_buffer_checked(chain->conn,
1741                                              image->pixmap,
1742                                              chain->window,
1743                                              image->base.sizes[0],
1744                                              pCreateInfo->imageExtent.width,
1745                                              pCreateInfo->imageExtent.height,
1746                                              image->base.row_pitches[0],
1747                                              chain->depth, bpp, fd);
1748    }
1749
1750    xcb_discard_reply(chain->conn, cookie.sequence);
1751
1752 out_fence:
1753    fence_fd = xshmfence_alloc_shm();
1754    if (fence_fd < 0)
1755       goto fail_pixmap;
1756
1757    image->shm_fence = xshmfence_map_shm(fence_fd);
1758    if (image->shm_fence == NULL)
1759       goto fail_shmfence_alloc;
1760
1761    image->sync_fence = xcb_generate_id(chain->conn);
1762    xcb_dri3_fence_from_fd(chain->conn,
1763                           image->pixmap,
1764                           image->sync_fence,
1765                           false,
1766                           fence_fd);
1767
1768    image->busy = false;
1769    xshmfence_trigger(image->shm_fence);
1770
1771    return VK_SUCCESS;
1772
1773 fail_shmfence_alloc:
1774    close(fence_fd);
1775
1776 fail_pixmap:
1777    cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1778    xcb_discard_reply(chain->conn, cookie.sequence);
1779
1780    wsi_destroy_image(&chain->base, &image->base);
1781
1782    return VK_ERROR_INITIALIZATION_FAILED;
1783 }
1784
1785 static void
1786 x11_image_finish(struct x11_swapchain *chain,
1787                  const VkAllocationCallbacks* pAllocator,
1788                  struct x11_image *image)
1789 {
1790    xcb_void_cookie_t cookie;
1791
1792    if (!chain->base.wsi->sw || chain->has_mit_shm) {
1793       cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
1794       xcb_discard_reply(chain->conn, cookie.sequence);
1795       xshmfence_unmap_shm(image->shm_fence);
1796
1797       cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1798       xcb_discard_reply(chain->conn, cookie.sequence);
1799
1800       cookie = xcb_xfixes_destroy_region(chain->conn, image->update_region);
1801       xcb_discard_reply(chain->conn, cookie.sequence);
1802    }
1803
1804    wsi_destroy_image(&chain->base, &image->base);
1805 #ifdef HAVE_SYS_SHM_H
1806    if (image->shmaddr)
1807       shmdt(image->shmaddr);
1808 #endif
1809 }
1810
1811 static void
1812 wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
1813                            xcb_connection_t *conn, xcb_window_t window,
1814                            uint8_t depth, uint8_t bpp,
1815                            VkCompositeAlphaFlagsKHR vk_alpha,
1816                            uint64_t **modifiers_in, uint32_t *num_modifiers_in,
1817                            uint32_t *num_tranches_in,
1818                            const VkAllocationCallbacks *pAllocator)
1819 {
1820    if (!wsi_conn->has_dri3_modifiers)
1821       goto out;
1822
1823 #ifdef HAVE_DRI3_MODIFIERS
1824    xcb_generic_error_t *error = NULL;
1825    xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
1826       xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
1827    xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
1828       xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
1829    free(error);
1830
1831    if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
1832                       mod_reply->num_screen_modifiers == 0)) {
1833       free(mod_reply);
1834       goto out;
1835    }
1836
1837    uint32_t n = 0;
1838    uint32_t counts[2];
1839    uint64_t *modifiers[2];
1840
1841    if (mod_reply->num_window_modifiers) {
1842       counts[n] = mod_reply->num_window_modifiers;
1843       modifiers[n] = vk_alloc(pAllocator,
1844                               counts[n] * sizeof(uint64_t),
1845                               8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1846       if (!modifiers[n]) {
1847          free(mod_reply);
1848          goto out;
1849       }
1850
1851       memcpy(modifiers[n],
1852              xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1853              counts[n] * sizeof(uint64_t));
1854       n++;
1855    }
1856
1857    if (mod_reply->num_screen_modifiers) {
1858       counts[n] = mod_reply->num_screen_modifiers;
1859       modifiers[n] = vk_alloc(pAllocator,
1860                               counts[n] * sizeof(uint64_t),
1861                               8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1862       if (!modifiers[n]) {
1863          if (n > 0)
1864             vk_free(pAllocator, modifiers[0]);
1865          free(mod_reply);
1866          goto out;
1867       }
1868
1869       memcpy(modifiers[n],
1870              xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1871              counts[n] * sizeof(uint64_t));
1872       n++;
1873    }
1874
1875    for (int i = 0; i < n; i++) {
1876       modifiers_in[i] = modifiers[i];
1877       num_modifiers_in[i] = counts[i];
1878    }
1879    *num_tranches_in = n;
1880
1881    free(mod_reply);
1882    return;
1883 #endif
1884 out:
1885    *num_tranches_in = 0;
1886 }
1887
1888 static VkResult
1889 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
1890                       const VkAllocationCallbacks *pAllocator)
1891 {
1892    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1893    xcb_void_cookie_t cookie;
1894
1895    if (chain->has_present_queue) {
1896       chain->status = VK_ERROR_OUT_OF_DATE_KHR;
1897       /* Push a UINT32_MAX to wake up the manager */
1898       wsi_queue_push(&chain->present_queue, UINT32_MAX);
1899       pthread_join(chain->queue_manager, NULL);
1900
1901       if (chain->has_acquire_queue)
1902          wsi_queue_destroy(&chain->acquire_queue);
1903       wsi_queue_destroy(&chain->present_queue);
1904    }
1905
1906    for (uint32_t i = 0; i < chain->base.image_count; i++)
1907       x11_image_finish(chain, pAllocator, &chain->images[i]);
1908    wsi_destroy_image_info(&chain->base, &chain->base.image_info);
1909
1910    xcb_unregister_for_special_event(chain->conn, chain->special_event);
1911    cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
1912                                              chain->window,
1913                                              XCB_PRESENT_EVENT_MASK_NO_EVENT);
1914    xcb_discard_reply(chain->conn, cookie.sequence);
1915
1916    wsi_swapchain_finish(&chain->base);
1917
1918    vk_free(pAllocator, chain);
1919
1920    return VK_SUCCESS;
1921 }
1922
1923 static void
1924 wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
1925                                    xcb_drawable_t drawable,
1926                                    uint32_t state)
1927 {
1928    static char const name[] = "_VARIABLE_REFRESH";
1929    xcb_intern_atom_cookie_t cookie;
1930    xcb_intern_atom_reply_t* reply;
1931    xcb_void_cookie_t check;
1932
1933    cookie = xcb_intern_atom(conn, 0, strlen(name), name);
1934    reply = xcb_intern_atom_reply(conn, cookie, NULL);
1935    if (reply == NULL)
1936       return;
1937
1938    if (state)
1939       check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
1940                                           drawable, reply->atom,
1941                                           XCB_ATOM_CARDINAL, 32, 1, &state);
1942    else
1943       check = xcb_delete_property_checked(conn, drawable, reply->atom);
1944
1945    xcb_discard_reply(conn, check.sequence);
1946    free(reply);
1947 }
1948
1949 /**
1950  * Create the swapchain.
1951  *
1952  * Supports immediate, fifo and mailbox presentation mode.
1953  *
1954  */
1955 static VkResult
1956 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
1957                              VkDevice device,
1958                              struct wsi_device *wsi_device,
1959                              const VkSwapchainCreateInfoKHR *pCreateInfo,
1960                              const VkAllocationCallbacks* pAllocator,
1961                              struct wsi_swapchain **swapchain_out)
1962 {
1963    struct x11_swapchain *chain;
1964    xcb_void_cookie_t cookie;
1965    VkResult result;
1966    VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
1967
1968    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
1969
1970    /* Get xcb connection from the icd_surface and from that our internal struct
1971     * representing it.
1972     */
1973    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
1974    struct wsi_x11_connection *wsi_conn =
1975       wsi_x11_get_connection(wsi_device, conn);
1976    if (!wsi_conn)
1977       return VK_ERROR_OUT_OF_HOST_MEMORY;
1978
1979    /* Get number of images in our swapchain. This count depends on:
1980     * - requested minimal image count
1981     * - device characteristics
1982     * - presentation mode.
1983     */
1984    unsigned num_images = pCreateInfo->minImageCount;
1985    if (wsi_device->x11.strict_imageCount)
1986       num_images = pCreateInfo->minImageCount;
1987    else if (x11_needs_wait_for_fences(wsi_device, wsi_conn, present_mode))
1988       num_images = MAX2(num_images, 5);
1989    else if (wsi_device->x11.ensure_minImageCount)
1990       num_images = MAX2(num_images, x11_get_min_image_count(wsi_device));
1991
1992    /* Check that we have a window up-front. It is an error to not have one. */
1993    xcb_window_t window = x11_surface_get_window(icd_surface);
1994
1995    /* Get the geometry of that window. The bit depth of the swapchain will be fitted and the
1996     * chain's images extents should fit it for performance-optimizing flips.
1997     */
1998    xcb_get_geometry_reply_t *geometry =
1999       xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
2000    if (geometry == NULL)
2001       return VK_ERROR_SURFACE_LOST_KHR;
2002    const uint32_t bit_depth = geometry->depth;
2003    const uint16_t cur_width = geometry->width;
2004    const uint16_t cur_height = geometry->height;
2005    free(geometry);
2006
2007    /* Allocate the actual swapchain. The size depends on image count. */
2008    size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
2009    chain = vk_zalloc(pAllocator, size, 8,
2010                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2011    if (chain == NULL)
2012       return VK_ERROR_OUT_OF_HOST_MEMORY;
2013
2014    /* When our local device is not compatible with the DRI3 device provided by
2015     * the X server we assume this is a PRIME system.
2016     */
2017    bool use_buffer_blit = false;
2018    if (!wsi_device->sw)
2019       if (!wsi_x11_check_dri3_compatible(wsi_device, conn))
2020          use_buffer_blit = true;
2021
2022    result = wsi_swapchain_init(wsi_device, &chain->base, device,
2023                                pCreateInfo, pAllocator, use_buffer_blit);
2024    if (result != VK_SUCCESS)
2025       goto fail_alloc;
2026
2027    chain->base.destroy = x11_swapchain_destroy;
2028    chain->base.get_wsi_image = x11_get_wsi_image;
2029    chain->base.acquire_next_image = x11_acquire_next_image;
2030    chain->base.queue_present = x11_queue_present;
2031    chain->base.present_mode = present_mode;
2032    chain->base.image_count = num_images;
2033    chain->conn = conn;
2034    chain->window = window;
2035    chain->depth = bit_depth;
2036    chain->extent = pCreateInfo->imageExtent;
2037    chain->send_sbc = 0;
2038    chain->sent_image_count = 0;
2039    chain->last_present_msc = 0;
2040    chain->has_acquire_queue = false;
2041    chain->has_present_queue = false;
2042    chain->status = VK_SUCCESS;
2043    chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
2044    chain->has_mit_shm = wsi_conn->has_mit_shm;
2045
2046    /* When images in the swapchain don't fit the window, X can still present them, but it won't
2047     * happen by flip, only by copy. So this is a suboptimal copy, because if the client would change
2048     * the chain extents X may be able to flip
2049     */
2050    if (chain->extent.width != cur_width || chain->extent.height != cur_height)
2051        chain->status = VK_SUBOPTIMAL_KHR;
2052
2053    /* On a new swapchain this helper variable is set to false. Once we present it will have an
2054     * impact once we ever do at least one flip and go back to copying afterwards. It is presumed
2055     * that in this case here is a high likelihood X could do flips again if the client reallocates a
2056     * new swapchain.
2057     *
2058     * Note that we used to inheritted this property from 'pCreateInfo->oldSwapchain'. But when it
2059     * was true, and when the next present was completed with copying, we would return
2060     * VK_SUBOPTIMAL_KHR and hint the app to reallocate again for no good reason. If all following
2061     * presents on the surface were completed with copying because of some surface state change, we
2062     * would always return VK_SUBOPTIMAL_KHR no matter how many times the app had reallocated.
2063     *
2064     * Note also that is is questionable in general if that mechanism is really useful. It ist not
2065     * clear why on a change from flipping to copying we can assume a reallocation has a high chance
2066     * of making flips work again per se. In other words it is not clear why there is need for
2067     * another way to inform clients about suboptimal copies besides forwarding the
2068     * 'PresentOptionSuboptimal' complete mode.
2069     */
2070    chain->copy_is_suboptimal = false;
2071
2072    /* For our swapchain we need to listen to following Present extension events:
2073     * - Configure: Window dimensions changed. Images in the swapchain might need
2074     *              to be reallocated.
2075     * - Complete: An image from our swapchain was presented on the output.
2076     * - Idle: An image from our swapchain is not anymore accessed by the X
2077     *         server and can be reused.
2078     */
2079    chain->event_id = xcb_generate_id(chain->conn);
2080    xcb_present_select_input(chain->conn, chain->event_id, chain->window,
2081                             XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
2082                             XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
2083                             XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
2084
2085    /* Create an XCB event queue to hold present events outside of the usual
2086     * application event queue
2087     */
2088    chain->special_event =
2089       xcb_register_for_special_xge(chain->conn, &xcb_present_id,
2090                                    chain->event_id, NULL);
2091
2092    /* Create the graphics context. */
2093    chain->gc = xcb_generate_id(chain->conn);
2094    if (!chain->gc) {
2095       /* FINISHME: Choose a better error. */
2096       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2097       goto fail_register;
2098    }
2099
2100    cookie = xcb_create_gc(chain->conn,
2101                           chain->gc,
2102                           chain->window,
2103                           XCB_GC_GRAPHICS_EXPOSURES,
2104                           (uint32_t []) { 0 });
2105    xcb_discard_reply(chain->conn, cookie.sequence);
2106
2107    uint64_t *modifiers[2] = {NULL, NULL};
2108    uint32_t num_modifiers[2] = {0, 0};
2109    uint32_t num_tranches = 0;
2110    if (wsi_device->supports_modifiers)
2111       wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, chain->depth, 32,
2112                                  pCreateInfo->compositeAlpha,
2113                                  modifiers, num_modifiers, &num_tranches,
2114                                  pAllocator);
2115
2116    if (wsi_device->sw) {
2117       result = wsi_configure_cpu_image(&chain->base, pCreateInfo,
2118                                        chain->has_mit_shm ? &alloc_shm : NULL,
2119                                        &chain->base.image_info);
2120    } else if (chain->base.use_buffer_blit) {
2121       bool use_modifier = num_tranches > 0;
2122       result = wsi_configure_prime_image(&chain->base, pCreateInfo,
2123                                          use_modifier,
2124                                          &chain->base.image_info);
2125    } else {
2126       result = wsi_configure_native_image(&chain->base, pCreateInfo,
2127                                           num_tranches, num_modifiers,
2128                                           (const uint64_t *const *)modifiers,
2129                                           &chain->base.image_info);
2130    }
2131    if (result != VK_SUCCESS)
2132       goto fail_modifiers;
2133
2134    uint32_t image = 0;
2135    for (; image < chain->base.image_count; image++) {
2136       result = x11_image_init(device, chain, pCreateInfo, pAllocator,
2137                               &chain->images[image]);
2138       if (result != VK_SUCCESS)
2139          goto fail_init_images;
2140    }
2141
2142    /* Initialize queues for images in our swapchain. Possible queues are:
2143     * - Present queue: for images sent to the X server but not yet presented.
2144     * - Acquire queue: for images already presented but not yet released by the
2145     *                  X server.
2146     *
2147     * In general queues are not used on software drivers, otherwise which queues
2148     * are used depends on our presentation mode:
2149     * - Fifo: present and acquire
2150     * - Mailbox: present only
2151     * - Immediate: present when we wait on fences before buffer submission (Xwayland)
2152     */
2153    if ((chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2154         chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR ||
2155         x11_needs_wait_for_fences(wsi_device, wsi_conn,
2156                                   chain->base.present_mode)) &&
2157        !chain->base.wsi->sw) {
2158       chain->has_present_queue = true;
2159
2160       /* The queues have a length of base.image_count + 1 because we will
2161        * occasionally use UINT32_MAX to signal the other thread that an error
2162        * has occurred and we don't want an overflow.
2163        */
2164       int ret;
2165       ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
2166       if (ret) {
2167          goto fail_init_images;
2168       }
2169
2170       if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2171           chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
2172          chain->has_acquire_queue = true;
2173
2174          ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
2175          if (ret) {
2176             wsi_queue_destroy(&chain->present_queue);
2177             goto fail_init_images;
2178          }
2179
2180          for (unsigned i = 0; i < chain->base.image_count; i++)
2181             wsi_queue_push(&chain->acquire_queue, i);
2182       }
2183
2184       ret = pthread_create(&chain->queue_manager, NULL,
2185                            x11_manage_fifo_queues, chain);
2186       if (ret) {
2187          wsi_queue_destroy(&chain->present_queue);
2188          if (chain->has_acquire_queue)
2189             wsi_queue_destroy(&chain->acquire_queue);
2190
2191          goto fail_init_images;
2192       }
2193    }
2194
2195    assert(chain->has_present_queue || !chain->has_acquire_queue);
2196
2197    for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2198       vk_free(pAllocator, modifiers[i]);
2199
2200    /* It is safe to set it here as only one swapchain can be associated with
2201     * the window, and swapchain creation does the association. At this point
2202     * we know the creation is going to succeed. */
2203    wsi_x11_set_adaptive_sync_property(conn, window,
2204                                       wsi_device->enable_adaptive_sync);
2205
2206    *swapchain_out = &chain->base;
2207
2208    return VK_SUCCESS;
2209
2210 fail_init_images:
2211    for (uint32_t j = 0; j < image; j++)
2212       x11_image_finish(chain, pAllocator, &chain->images[j]);
2213
2214    wsi_destroy_image_info(&chain->base, &chain->base.image_info);
2215
2216 fail_modifiers:
2217    for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2218       vk_free(pAllocator, modifiers[i]);
2219
2220 fail_register:
2221    xcb_unregister_for_special_event(chain->conn, chain->special_event);
2222
2223    wsi_swapchain_finish(&chain->base);
2224
2225 fail_alloc:
2226    vk_free(pAllocator, chain);
2227
2228    return result;
2229 }
2230
2231 VkResult
2232 wsi_x11_init_wsi(struct wsi_device *wsi_device,
2233                  const VkAllocationCallbacks *alloc,
2234                  const struct driOptionCache *dri_options)
2235 {
2236    struct wsi_x11 *wsi;
2237    VkResult result;
2238
2239    wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2240                    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2241    if (!wsi) {
2242       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2243       goto fail;
2244    }
2245
2246    int ret = pthread_mutex_init(&wsi->mutex, NULL);
2247    if (ret != 0) {
2248       if (ret == ENOMEM) {
2249          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2250       } else {
2251          /* FINISHME: Choose a better error. */
2252          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2253       }
2254
2255       goto fail_alloc;
2256    }
2257
2258    wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2259                                               _mesa_key_pointer_equal);
2260    if (!wsi->connections) {
2261       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2262       goto fail_mutex;
2263    }
2264
2265    if (dri_options) {
2266       if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
2267          wsi_device->x11.override_minImageCount =
2268             driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
2269       }
2270       if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
2271          wsi_device->x11.strict_imageCount =
2272             driQueryOptionb(dri_options, "vk_x11_strict_image_count");
2273       }
2274       if (driCheckOption(dri_options, "vk_x11_ensure_min_image_count", DRI_BOOL)) {
2275          wsi_device->x11.ensure_minImageCount =
2276             driQueryOptionb(dri_options, "vk_x11_ensure_min_image_count");
2277       }
2278       wsi_device->x11.xwaylandWaitReady = true;
2279       if (driCheckOption(dri_options, "vk_xwayland_wait_ready", DRI_BOOL)) {
2280          wsi_device->x11.xwaylandWaitReady =
2281             driQueryOptionb(dri_options, "vk_xwayland_wait_ready");
2282       }
2283    }
2284
2285    wsi->base.get_support = x11_surface_get_support;
2286    wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
2287    wsi->base.get_formats = x11_surface_get_formats;
2288    wsi->base.get_formats2 = x11_surface_get_formats2;
2289    wsi->base.get_present_modes = x11_surface_get_present_modes;
2290    wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
2291    wsi->base.create_swapchain = x11_surface_create_swapchain;
2292
2293    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
2294    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
2295
2296    return VK_SUCCESS;
2297
2298 fail_mutex:
2299    pthread_mutex_destroy(&wsi->mutex);
2300 fail_alloc:
2301    vk_free(alloc, wsi);
2302 fail:
2303    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
2304    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
2305
2306    return result;
2307 }
2308
2309 void
2310 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
2311                    const VkAllocationCallbacks *alloc)
2312 {
2313    struct wsi_x11 *wsi =
2314       (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
2315
2316    if (wsi) {
2317       hash_table_foreach(wsi->connections, entry)
2318          wsi_x11_connection_destroy(wsi_device, entry->data);
2319
2320       _mesa_hash_table_destroy(wsi->connections, NULL);
2321
2322       pthread_mutex_destroy(&wsi->mutex);
2323
2324       vk_free(alloc, wsi);
2325    }
2326 }