30bd10c08c92d20858af711f2436d136cd857498
[platform/upstream/mesa.git] / src / vulkan / wsi / wsi_common_x11.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <X11/Xlib-xcb.h>
25 #include <X11/xshmfence.h>
26 #include <xcb/xcb.h>
27 #include <xcb/dri3.h>
28 #include <xcb/present.h>
29 #include <xcb/shm.h>
30
31 #include "util/macros.h"
32 #include <stdatomic.h>
33 #include <stdlib.h>
34 #include <stdio.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <string.h>
38 #include <fcntl.h>
39 #include <poll.h>
40 #include <xf86drm.h>
41 #include "drm-uapi/drm_fourcc.h"
42 #include "util/hash_table.h"
43 #include "util/os_file.h"
44 #include "util/os_time.h"
45 #include "util/u_debug.h"
46 #include "util/u_thread.h"
47 #include "util/xmlconfig.h"
48
49 #include "vk_instance.h"
50 #include "vk_physical_device.h"
51 #include "vk_util.h"
52 #include "vk_enum_to_str.h"
53 #include "wsi_common_entrypoints.h"
54 #include "wsi_common_private.h"
55 #include "wsi_common_queue.h"
56
57 #ifdef HAVE_SYS_SHM_H
58 #include <sys/ipc.h>
59 #include <sys/shm.h>
60 #endif
61
62 struct wsi_x11_connection {
63    bool has_dri3;
64    bool has_dri3_modifiers;
65    bool has_present;
66    bool is_proprietary_x11;
67    bool is_xwayland;
68    bool has_mit_shm;
69    bool has_xfixes;
70 };
71
72 struct wsi_x11 {
73    struct wsi_interface base;
74
75    pthread_mutex_t                              mutex;
76    /* Hash table of xcb_connection -> wsi_x11_connection mappings */
77    struct hash_table *connections;
78 };
79
80
81 /**
82  * Wrapper around xcb_dri3_open. Returns the opened fd or -1 on error.
83  */
84 static int
85 wsi_dri3_open(xcb_connection_t *conn,
86               xcb_window_t root,
87               uint32_t provider)
88 {
89    xcb_dri3_open_cookie_t       cookie;
90    xcb_dri3_open_reply_t        *reply;
91    int                          fd;
92
93    cookie = xcb_dri3_open(conn,
94                           root,
95                           provider);
96
97    reply = xcb_dri3_open_reply(conn, cookie, NULL);
98    if (!reply)
99       return -1;
100
101    /* According to DRI3 extension nfd must equal one. */
102    if (reply->nfd != 1) {
103       free(reply);
104       return -1;
105    }
106
107    fd = xcb_dri3_open_reply_fds(conn, reply)[0];
108    free(reply);
109    fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
110
111    return fd;
112 }
113
114 /**
115  * Checks compatibility of the device wsi_dev with the device the X server
116  * provides via DRI3.
117  *
118  * This returns true when no device could be retrieved from the X server or when
119  * the information for the X server device indicate that it is the same device.
120  */
121 static bool
122 wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
123                               xcb_connection_t *conn)
124 {
125    xcb_screen_iterator_t screen_iter =
126       xcb_setup_roots_iterator(xcb_get_setup(conn));
127    xcb_screen_t *screen = screen_iter.data;
128
129    /* Open the DRI3 device from the X server. If we do not retrieve one we
130     * assume our local device is compatible.
131     */
132    int dri3_fd = wsi_dri3_open(conn, screen->root, None);
133    if (dri3_fd == -1)
134       return true;
135
136    bool match = wsi_device_matches_drm_fd(wsi_dev, dri3_fd);
137
138    close(dri3_fd);
139
140    return match;
141 }
142
143 static bool
144 wsi_x11_detect_xwayland(xcb_connection_t *conn)
145 {
146    xcb_randr_query_version_cookie_t ver_cookie =
147       xcb_randr_query_version_unchecked(conn, 1, 3);
148    xcb_randr_query_version_reply_t *ver_reply =
149       xcb_randr_query_version_reply(conn, ver_cookie, NULL);
150    bool has_randr_v1_3 = ver_reply && (ver_reply->major_version > 1 ||
151                                        ver_reply->minor_version >= 3);
152    free(ver_reply);
153
154    if (!has_randr_v1_3)
155       return false;
156
157    const xcb_setup_t *setup = xcb_get_setup(conn);
158    xcb_screen_iterator_t iter = xcb_setup_roots_iterator(setup);
159
160    xcb_randr_get_screen_resources_current_cookie_t gsr_cookie =
161       xcb_randr_get_screen_resources_current_unchecked(conn, iter.data->root);
162    xcb_randr_get_screen_resources_current_reply_t *gsr_reply =
163       xcb_randr_get_screen_resources_current_reply(conn, gsr_cookie, NULL);
164
165    if (!gsr_reply || gsr_reply->num_outputs == 0) {
166       free(gsr_reply);
167       return false;
168    }
169
170    xcb_randr_output_t *randr_outputs =
171       xcb_randr_get_screen_resources_current_outputs(gsr_reply);
172    xcb_randr_get_output_info_cookie_t goi_cookie =
173       xcb_randr_get_output_info(conn, randr_outputs[0], gsr_reply->config_timestamp);
174    free(gsr_reply);
175
176    xcb_randr_get_output_info_reply_t *goi_reply =
177       xcb_randr_get_output_info_reply(conn, goi_cookie, NULL);
178    if (!goi_reply) {
179       return false;
180    }
181
182    char *output_name = (char*)xcb_randr_get_output_info_name(goi_reply);
183    bool is_xwayland = output_name && strncmp(output_name, "XWAYLAND", 8) == 0;
184    free(goi_reply);
185
186    return is_xwayland;
187 }
188
189 static struct wsi_x11_connection *
190 wsi_x11_connection_create(struct wsi_device *wsi_dev,
191                           xcb_connection_t *conn)
192 {
193    xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie,
194                                 amd_cookie, nv_cookie, shm_cookie, sync_cookie,
195                                 xfixes_cookie;
196    xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply,
197                                *amd_reply, *nv_reply, *shm_reply = NULL,
198                                *xfixes_reply;
199    bool wants_shm = wsi_dev->sw && !(WSI_DEBUG & WSI_DEBUG_NOSHM) &&
200                     wsi_dev->has_import_memory_host;
201    bool has_dri3_v1_2 = false;
202    bool has_present_v1_2 = false;
203
204    struct wsi_x11_connection *wsi_conn =
205       vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
206                 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
207    if (!wsi_conn)
208       return NULL;
209
210    sync_cookie = xcb_query_extension(conn, 4, "SYNC");
211    dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
212    pres_cookie = xcb_query_extension(conn, 7, "Present");
213    randr_cookie = xcb_query_extension(conn, 5, "RANDR");
214    xfixes_cookie = xcb_query_extension(conn, 6, "XFIXES");
215
216    if (wants_shm)
217       shm_cookie = xcb_query_extension(conn, 7, "MIT-SHM");
218
219    /* We try to be nice to users and emit a warning if they try to use a
220     * Vulkan application on a system without DRI3 enabled.  However, this ends
221     * up spewing the warning when a user has, for example, both Intel
222     * integrated graphics and a discrete card with proprietary drivers and are
223     * running on the discrete card with the proprietary DDX.  In this case, we
224     * really don't want to print the warning because it just confuses users.
225     * As a heuristic to detect this case, we check for a couple of proprietary
226     * X11 extensions.
227     */
228    amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
229    nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
230
231    xcb_discard_reply(conn, sync_cookie.sequence);
232    dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
233    pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
234    randr_reply = xcb_query_extension_reply(conn, randr_cookie, NULL);
235    amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
236    nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
237    xfixes_reply = xcb_query_extension_reply(conn, xfixes_cookie, NULL);
238    if (wants_shm)
239       shm_reply = xcb_query_extension_reply(conn, shm_cookie, NULL);
240    if (!dri3_reply || !pres_reply || !xfixes_reply) {
241       free(dri3_reply);
242       free(pres_reply);
243       free(xfixes_reply);
244       free(randr_reply);
245       free(amd_reply);
246       free(nv_reply);
247       if (wants_shm)
248          free(shm_reply);
249       vk_free(&wsi_dev->instance_alloc, wsi_conn);
250       return NULL;
251    }
252
253    wsi_conn->has_dri3 = dri3_reply->present != 0;
254 #ifdef HAVE_DRI3_MODIFIERS
255    if (wsi_conn->has_dri3) {
256       xcb_dri3_query_version_cookie_t ver_cookie;
257       xcb_dri3_query_version_reply_t *ver_reply;
258
259       ver_cookie = xcb_dri3_query_version(conn, 1, 2);
260       ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
261       has_dri3_v1_2 = ver_reply != NULL &&
262          (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
263       free(ver_reply);
264    }
265 #endif
266
267    wsi_conn->has_present = pres_reply->present != 0;
268 #ifdef HAVE_DRI3_MODIFIERS
269    if (wsi_conn->has_present) {
270       xcb_present_query_version_cookie_t ver_cookie;
271       xcb_present_query_version_reply_t *ver_reply;
272
273       ver_cookie = xcb_present_query_version(conn, 1, 2);
274       ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
275       has_present_v1_2 =
276         (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
277       free(ver_reply);
278    }
279 #endif
280
281    wsi_conn->has_xfixes = xfixes_reply->present != 0;
282    if (wsi_conn->has_xfixes) {
283       xcb_xfixes_query_version_cookie_t ver_cookie;
284       xcb_xfixes_query_version_reply_t *ver_reply;
285
286       ver_cookie = xcb_xfixes_query_version(conn, 6, 0);
287       ver_reply = xcb_xfixes_query_version_reply(conn, ver_cookie, NULL);
288       wsi_conn->has_xfixes = (ver_reply->major_version >= 2);
289       free(ver_reply);
290    }
291
292    if (randr_reply && randr_reply->present != 0)
293       wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn);
294    else
295       wsi_conn->is_xwayland = false;
296
297    wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
298    wsi_conn->is_proprietary_x11 = false;
299    if (amd_reply && amd_reply->present)
300       wsi_conn->is_proprietary_x11 = true;
301    if (nv_reply && nv_reply->present)
302       wsi_conn->is_proprietary_x11 = true;
303
304    wsi_conn->has_mit_shm = false;
305    if (wsi_conn->has_dri3 && wsi_conn->has_present && wants_shm) {
306       bool has_mit_shm = shm_reply->present != 0;
307
308       xcb_shm_query_version_cookie_t ver_cookie;
309       xcb_shm_query_version_reply_t *ver_reply;
310
311       ver_cookie = xcb_shm_query_version(conn);
312       ver_reply = xcb_shm_query_version_reply(conn, ver_cookie, NULL);
313
314       has_mit_shm = ver_reply->shared_pixmaps;
315       free(ver_reply);
316       xcb_void_cookie_t cookie;
317       xcb_generic_error_t *error;
318
319       if (has_mit_shm) {
320          cookie = xcb_shm_detach_checked(conn, 0);
321          if ((error = xcb_request_check(conn, cookie))) {
322             if (error->error_code != BadRequest)
323                wsi_conn->has_mit_shm = true;
324             free(error);
325          }
326       }
327    }
328
329    free(dri3_reply);
330    free(pres_reply);
331    free(randr_reply);
332    free(amd_reply);
333    free(nv_reply);
334    if (wants_shm)
335       free(shm_reply);
336
337    return wsi_conn;
338 }
339
340 static void
341 wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
342                            struct wsi_x11_connection *conn)
343 {
344    vk_free(&wsi_dev->instance_alloc, conn);
345 }
346
347 static bool
348 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
349 {
350   if (wsi_conn->has_dri3)
351     return true;
352   if (!wsi_conn->is_proprietary_x11) {
353     fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
354                     "Note: you can probably enable DRI3 in your Xorg config\n");
355   }
356   return false;
357 }
358
359 /**
360  * Get internal struct representing an xcb_connection_t.
361  *
362  * This can allocate the struct but the caller does not own the struct. It is
363  * deleted on wsi_x11_finish_wsi by the hash table it is inserted.
364  *
365  * If the allocation fails NULL is returned.
366  */
367 static struct wsi_x11_connection *
368 wsi_x11_get_connection(struct wsi_device *wsi_dev,
369                        xcb_connection_t *conn)
370 {
371    struct wsi_x11 *wsi =
372       (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
373
374    pthread_mutex_lock(&wsi->mutex);
375
376    struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
377    if (!entry) {
378       /* We're about to make a bunch of blocking calls.  Let's drop the
379        * mutex for now so we don't block up too badly.
380        */
381       pthread_mutex_unlock(&wsi->mutex);
382
383       struct wsi_x11_connection *wsi_conn =
384          wsi_x11_connection_create(wsi_dev, conn);
385       if (!wsi_conn)
386          return NULL;
387
388       pthread_mutex_lock(&wsi->mutex);
389
390       entry = _mesa_hash_table_search(wsi->connections, conn);
391       if (entry) {
392          /* Oops, someone raced us to it */
393          wsi_x11_connection_destroy(wsi_dev, wsi_conn);
394       } else {
395          entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
396       }
397    }
398
399    pthread_mutex_unlock(&wsi->mutex);
400
401    return entry->data;
402 }
403
404 struct surface_format {
405    VkFormat format;
406    unsigned bits_per_rgb;
407 };
408
409 static const struct surface_format formats[] = {
410    { VK_FORMAT_B8G8R8A8_SRGB,             8 },
411    { VK_FORMAT_B8G8R8A8_UNORM,            8 },
412    { VK_FORMAT_A2R10G10B10_UNORM_PACK32, 10 },
413 };
414
415 static const VkPresentModeKHR present_modes[] = {
416    VK_PRESENT_MODE_IMMEDIATE_KHR,
417    VK_PRESENT_MODE_MAILBOX_KHR,
418    VK_PRESENT_MODE_FIFO_KHR,
419    VK_PRESENT_MODE_FIFO_RELAXED_KHR,
420 };
421
422 static xcb_screen_t *
423 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
424 {
425    xcb_screen_iterator_t screen_iter =
426       xcb_setup_roots_iterator(xcb_get_setup(conn));
427
428    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
429       if (screen_iter.data->root == root)
430          return screen_iter.data;
431    }
432
433    return NULL;
434 }
435
436 static xcb_visualtype_t *
437 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
438                       unsigned *depth)
439 {
440    xcb_depth_iterator_t depth_iter =
441       xcb_screen_allowed_depths_iterator(screen);
442
443    for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
444       xcb_visualtype_iterator_t visual_iter =
445          xcb_depth_visuals_iterator (depth_iter.data);
446
447       for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
448          if (visual_iter.data->visual_id == visual_id) {
449             if (depth)
450                *depth = depth_iter.data->depth;
451             return visual_iter.data;
452          }
453       }
454    }
455
456    return NULL;
457 }
458
459 static xcb_visualtype_t *
460 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id)
461 {
462    xcb_screen_iterator_t screen_iter =
463       xcb_setup_roots_iterator(xcb_get_setup(conn));
464
465    /* For this we have to iterate over all of the screens which is rather
466     * annoying.  Fortunately, there is probably only 1.
467     */
468    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
469       xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
470                                                        visual_id, NULL);
471       if (visual)
472          return visual;
473    }
474
475    return NULL;
476 }
477
478 static xcb_visualtype_t *
479 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
480                           unsigned *depth)
481 {
482    xcb_query_tree_cookie_t tree_cookie;
483    xcb_get_window_attributes_cookie_t attrib_cookie;
484    xcb_query_tree_reply_t *tree;
485    xcb_get_window_attributes_reply_t *attrib;
486
487    tree_cookie = xcb_query_tree(conn, window);
488    attrib_cookie = xcb_get_window_attributes(conn, window);
489
490    tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
491    attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
492    if (attrib == NULL || tree == NULL) {
493       free(attrib);
494       free(tree);
495       return NULL;
496    }
497
498    xcb_window_t root = tree->root;
499    xcb_visualid_t visual_id = attrib->visual;
500    free(attrib);
501    free(tree);
502
503    xcb_screen_t *screen = get_screen_for_root(conn, root);
504    if (screen == NULL)
505       return NULL;
506
507    return screen_get_visualtype(screen, visual_id, depth);
508 }
509
510 static bool
511 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
512 {
513    uint32_t rgb_mask = visual->red_mask |
514                        visual->green_mask |
515                        visual->blue_mask;
516
517    uint32_t all_mask = 0xffffffff >> (32 - depth);
518
519    /* Do we have bits left over after RGB? */
520    return (all_mask & ~rgb_mask) != 0;
521 }
522
523 static bool
524 visual_supported(xcb_visualtype_t *visual)
525 {
526    if (!visual)
527       return false;
528
529    return visual->bits_per_rgb_value == 8 || visual->bits_per_rgb_value == 10;
530 }
531
532 VKAPI_ATTR VkBool32 VKAPI_CALL
533 wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
534                                                uint32_t queueFamilyIndex,
535                                                xcb_connection_t *connection,
536                                                xcb_visualid_t visual_id)
537 {
538    VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
539    struct wsi_device *wsi_device = pdevice->wsi_device;
540    struct wsi_x11_connection *wsi_conn =
541       wsi_x11_get_connection(wsi_device, connection);
542
543    if (!wsi_conn)
544       return false;
545
546    if (!wsi_device->sw) {
547       if (!wsi_x11_check_for_dri3(wsi_conn))
548          return false;
549    }
550
551    if (!visual_supported(connection_get_visualtype(connection, visual_id)))
552       return false;
553
554    return true;
555 }
556
557 VKAPI_ATTR VkBool32 VKAPI_CALL
558 wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
559                                                 uint32_t queueFamilyIndex,
560                                                 Display *dpy,
561                                                 VisualID visualID)
562 {
563    return wsi_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
564                                                          queueFamilyIndex,
565                                                          XGetXCBConnection(dpy),
566                                                          visualID);
567 }
568
569 static xcb_connection_t*
570 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
571 {
572    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
573       return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
574    else
575       return ((VkIcdSurfaceXcb *)icd_surface)->connection;
576 }
577
578 static xcb_window_t
579 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
580 {
581    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
582       return ((VkIcdSurfaceXlib *)icd_surface)->window;
583    else
584       return ((VkIcdSurfaceXcb *)icd_surface)->window;
585 }
586
587 static VkResult
588 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
589                         struct wsi_device *wsi_device,
590                         uint32_t queueFamilyIndex,
591                         VkBool32* pSupported)
592 {
593    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
594    xcb_window_t window = x11_surface_get_window(icd_surface);
595
596    struct wsi_x11_connection *wsi_conn =
597       wsi_x11_get_connection(wsi_device, conn);
598    if (!wsi_conn)
599       return VK_ERROR_OUT_OF_HOST_MEMORY;
600
601    if (!wsi_device->sw) {
602       if (!wsi_x11_check_for_dri3(wsi_conn)) {
603          *pSupported = false;
604          return VK_SUCCESS;
605       }
606    }
607
608    if (!visual_supported(get_visualtype_for_window(conn, window, NULL))) {
609       *pSupported = false;
610       return VK_SUCCESS;
611    }
612
613    *pSupported = true;
614    return VK_SUCCESS;
615 }
616
617 static uint32_t
618 x11_get_min_image_count(const struct wsi_device *wsi_device)
619 {
620    if (wsi_device->x11.override_minImageCount)
621       return wsi_device->x11.override_minImageCount;
622
623    /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
624     * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
625     * the render latency is CPU duration + GPU duration.
626     *
627     * This means that with scanout from pageflipping we need 3 frames to run
628     * full speed:
629     * 1) CPU rendering work
630     * 2) GPU rendering work
631     * 3) scanout
632     *
633     * Once we have a nonblocking acquire that returns a semaphore we can merge
634     * 1 and 3. Hence the ideal implementation needs only 2 images, but games
635     * cannot tellwe currently do not have an ideal implementation and that
636     * hence they need to allocate 3 images. So let us do it for them.
637     *
638     * This is a tradeoff as it uses more memory than needed for non-fullscreen
639     * and non-performance intensive applications.
640     */
641    return 3;
642 }
643
644 static VkResult
645 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
646                              struct wsi_device *wsi_device,
647                              VkSurfaceCapabilitiesKHR *caps)
648 {
649    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
650    xcb_window_t window = x11_surface_get_window(icd_surface);
651    xcb_get_geometry_cookie_t geom_cookie;
652    xcb_generic_error_t *err;
653    xcb_get_geometry_reply_t *geom;
654    unsigned visual_depth;
655
656    geom_cookie = xcb_get_geometry(conn, window);
657
658    /* This does a round-trip.  This is why we do get_geometry first and
659     * wait to read the reply until after we have a visual.
660     */
661    xcb_visualtype_t *visual =
662       get_visualtype_for_window(conn, window, &visual_depth);
663
664    if (!visual)
665       return VK_ERROR_SURFACE_LOST_KHR;
666
667    geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
668    if (geom) {
669       VkExtent2D extent = { geom->width, geom->height };
670       caps->currentExtent = extent;
671       caps->minImageExtent = extent;
672       caps->maxImageExtent = extent;
673    }
674    free(err);
675    free(geom);
676    if (!geom)
677        return VK_ERROR_SURFACE_LOST_KHR;
678
679    if (visual_has_alpha(visual, visual_depth)) {
680       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
681                                       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
682    } else {
683       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
684                                       VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
685    }
686
687    caps->minImageCount = x11_get_min_image_count(wsi_device);
688    /* There is no real maximum */
689    caps->maxImageCount = 0;
690
691    caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
692    caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
693    caps->maxImageArrayLayers = 1;
694    caps->supportedUsageFlags =
695       VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
696       VK_IMAGE_USAGE_SAMPLED_BIT |
697       VK_IMAGE_USAGE_TRANSFER_DST_BIT |
698       VK_IMAGE_USAGE_STORAGE_BIT |
699       VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
700       VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
701
702    return VK_SUCCESS;
703 }
704
705 static VkResult
706 x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
707                               struct wsi_device *wsi_device,
708                               const void *info_next,
709                               VkSurfaceCapabilities2KHR *caps)
710 {
711    assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
712
713    VkResult result =
714       x11_surface_get_capabilities(icd_surface, wsi_device,
715                                    &caps->surfaceCapabilities);
716
717    if (result != VK_SUCCESS)
718       return result;
719
720    vk_foreach_struct(ext, caps->pNext) {
721       switch (ext->sType) {
722       case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
723          VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
724          protected->supportsProtected = VK_FALSE;
725          break;
726       }
727
728       default:
729          /* Ignored */
730          break;
731       }
732    }
733
734    return result;
735 }
736
737 static bool
738 get_sorted_vk_formats(VkIcdSurfaceBase *surface, struct wsi_device *wsi_device,
739                       VkFormat *sorted_formats, unsigned *count)
740 {
741    xcb_connection_t *conn = x11_surface_get_connection(surface);
742    xcb_window_t window = x11_surface_get_window(surface);
743    xcb_visualtype_t *visual = get_visualtype_for_window(conn, window, NULL);
744    if (!visual)
745       return false;
746
747    *count = 0;
748    for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
749       if (formats[i].bits_per_rgb == visual->bits_per_rgb_value)
750          sorted_formats[(*count)++] = formats[i].format;
751    }
752
753    if (wsi_device->force_bgra8_unorm_first) {
754       for (unsigned i = 0; i < *count; i++) {
755          if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
756             sorted_formats[i] = sorted_formats[0];
757             sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
758             break;
759          }
760       }
761    }
762
763    return true;
764 }
765
766 static VkResult
767 x11_surface_get_formats(VkIcdSurfaceBase *surface,
768                         struct wsi_device *wsi_device,
769                         uint32_t *pSurfaceFormatCount,
770                         VkSurfaceFormatKHR *pSurfaceFormats)
771 {
772    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
773                           pSurfaceFormats, pSurfaceFormatCount);
774
775    unsigned count;
776    VkFormat sorted_formats[ARRAY_SIZE(formats)];
777    if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
778       return VK_ERROR_SURFACE_LOST_KHR;
779
780    for (unsigned i = 0; i < count; i++) {
781       vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
782          f->format = sorted_formats[i];
783          f->colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
784       }
785    }
786
787    return vk_outarray_status(&out);
788 }
789
790 static VkResult
791 x11_surface_get_formats2(VkIcdSurfaceBase *surface,
792                         struct wsi_device *wsi_device,
793                         const void *info_next,
794                         uint32_t *pSurfaceFormatCount,
795                         VkSurfaceFormat2KHR *pSurfaceFormats)
796 {
797    VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
798                           pSurfaceFormats, pSurfaceFormatCount);
799
800    unsigned count;
801    VkFormat sorted_formats[ARRAY_SIZE(formats)];
802    if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
803       return VK_ERROR_SURFACE_LOST_KHR;
804
805    for (unsigned i = 0; i < count; i++) {
806       vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
807          assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
808          f->surfaceFormat.format = sorted_formats[i];
809          f->surfaceFormat.colorSpace = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR;
810       }
811    }
812
813    return vk_outarray_status(&out);
814 }
815
816 static VkResult
817 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
818                               uint32_t *pPresentModeCount,
819                               VkPresentModeKHR *pPresentModes)
820 {
821    if (pPresentModes == NULL) {
822       *pPresentModeCount = ARRAY_SIZE(present_modes);
823       return VK_SUCCESS;
824    }
825
826    *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
827    typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
828
829    return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
830       VK_INCOMPLETE : VK_SUCCESS;
831 }
832
833 static VkResult
834 x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
835                                    struct wsi_device *wsi_device,
836                                    uint32_t* pRectCount,
837                                    VkRect2D* pRects)
838 {
839    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
840    xcb_window_t window = x11_surface_get_window(icd_surface);
841    VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
842
843    vk_outarray_append_typed(VkRect2D, &out, rect) {
844       xcb_generic_error_t *err = NULL;
845       xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
846       xcb_get_geometry_reply_t *geom =
847          xcb_get_geometry_reply(conn, geom_cookie, &err);
848       free(err);
849       if (geom) {
850          *rect = (VkRect2D) {
851             .offset = { 0, 0 },
852             .extent = { geom->width, geom->height },
853          };
854       }
855       free(geom);
856       if (!geom)
857           return VK_ERROR_SURFACE_LOST_KHR;
858    }
859
860    return vk_outarray_status(&out);
861 }
862
863 VKAPI_ATTR VkResult VKAPI_CALL
864 wsi_CreateXcbSurfaceKHR(VkInstance _instance,
865                         const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
866                         const VkAllocationCallbacks *pAllocator,
867                         VkSurfaceKHR *pSurface)
868 {
869    VK_FROM_HANDLE(vk_instance, instance, _instance);
870    VkIcdSurfaceXcb *surface;
871
872    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
873
874    surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
875                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
876    if (surface == NULL)
877       return VK_ERROR_OUT_OF_HOST_MEMORY;
878
879    surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
880    surface->connection = pCreateInfo->connection;
881    surface->window = pCreateInfo->window;
882
883    *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
884    return VK_SUCCESS;
885 }
886
887 VKAPI_ATTR VkResult VKAPI_CALL
888 wsi_CreateXlibSurfaceKHR(VkInstance _instance,
889                          const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
890                          const VkAllocationCallbacks *pAllocator,
891                          VkSurfaceKHR *pSurface)
892 {
893    VK_FROM_HANDLE(vk_instance, instance, _instance);
894    VkIcdSurfaceXlib *surface;
895
896    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
897
898    surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
899                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
900    if (surface == NULL)
901       return VK_ERROR_OUT_OF_HOST_MEMORY;
902
903    surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
904    surface->dpy = pCreateInfo->dpy;
905    surface->window = pCreateInfo->window;
906
907    *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
908    return VK_SUCCESS;
909 }
910
911 struct x11_image {
912    struct wsi_image                          base;
913    xcb_pixmap_t                              pixmap;
914    xcb_xfixes_region_t                       update_region; /* long lived XID */
915    xcb_xfixes_region_t                       update_area;   /* the above or None */
916    bool                                      busy;
917    bool                                      present_queued;
918    struct xshmfence *                        shm_fence;
919    uint32_t                                  sync_fence;
920    uint32_t                                  serial;
921    xcb_shm_seg_t                             shmseg;
922    int                                       shmid;
923    uint8_t *                                 shmaddr;
924 };
925
926 struct x11_swapchain {
927    struct wsi_swapchain                        base;
928
929    bool                                         has_dri3_modifiers;
930    bool                                         has_mit_shm;
931
932    xcb_connection_t *                           conn;
933    xcb_window_t                                 window;
934    xcb_gc_t                                     gc;
935    uint32_t                                     depth;
936    VkExtent2D                                   extent;
937
938    xcb_present_event_t                          event_id;
939    xcb_special_event_t *                        special_event;
940    uint64_t                                     send_sbc;
941    uint64_t                                     last_present_msc;
942    uint32_t                                     stamp;
943    atomic_int                                   sent_image_count;
944
945    bool                                         has_present_queue;
946    bool                                         has_acquire_queue;
947    VkResult                                     status;
948    bool                                         copy_is_suboptimal;
949    struct wsi_queue                             present_queue;
950    struct wsi_queue                             acquire_queue;
951    pthread_t                                    queue_manager;
952
953    struct x11_image                             images[0];
954 };
955 VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
956                                VK_OBJECT_TYPE_SWAPCHAIN_KHR)
957
958 /**
959  * Update the swapchain status with the result of an operation, and return
960  * the combined status. The chain status will eventually be returned from
961  * AcquireNextImage and QueuePresent.
962  *
963  * We make sure to 'stick' more pessimistic statuses: an out-of-date error
964  * is permanent once seen, and every subsequent call will return this. If
965  * this has not been seen, success will be returned.
966  */
967 static VkResult
968 _x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
969                       const char *file, int line)
970 {
971    /* Prioritise returning existing errors for consistency. */
972    if (chain->status < 0)
973       return chain->status;
974
975    /* If we have a new error, mark it as permanent on the chain and return. */
976    if (result < 0) {
977 #ifndef NDEBUG
978       fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
979               file, line, vk_Result_to_str(result));
980 #endif
981       chain->status = result;
982       return result;
983    }
984
985    /* Return temporary errors, but don't persist them. */
986    if (result == VK_TIMEOUT || result == VK_NOT_READY)
987       return result;
988
989    /* Suboptimal isn't an error, but is a status which sticks to the swapchain
990     * and is always returned rather than success.
991     */
992    if (result == VK_SUBOPTIMAL_KHR) {
993 #ifndef NDEBUG
994       if (chain->status != VK_SUBOPTIMAL_KHR) {
995          fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
996                  file, line, vk_Result_to_str(result));
997       }
998 #endif
999       chain->status = result;
1000       return result;
1001    }
1002
1003    /* No changes, so return the last status. */
1004    return chain->status;
1005 }
1006 #define x11_swapchain_result(chain, result) \
1007    _x11_swapchain_result(chain, result, __FILE__, __LINE__)
1008
1009 static struct wsi_image *
1010 x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
1011 {
1012    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
1013    return &chain->images[image_index].base;
1014 }
1015
1016 /**
1017  * Process an X11 Present event. Does not update chain->status.
1018  */
1019 static VkResult
1020 x11_handle_dri3_present_event(struct x11_swapchain *chain,
1021                               xcb_present_generic_event_t *event)
1022 {
1023    switch (event->evtype) {
1024    case XCB_PRESENT_CONFIGURE_NOTIFY: {
1025       xcb_present_configure_notify_event_t *config = (void *) event;
1026
1027       if (config->width != chain->extent.width ||
1028           config->height != chain->extent.height)
1029          return VK_SUBOPTIMAL_KHR;
1030
1031       break;
1032    }
1033
1034    case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
1035       xcb_present_idle_notify_event_t *idle = (void *) event;
1036
1037       for (unsigned i = 0; i < chain->base.image_count; i++) {
1038          if (chain->images[i].pixmap == idle->pixmap) {
1039             chain->images[i].busy = false;
1040             chain->sent_image_count--;
1041             assert(chain->sent_image_count >= 0);
1042             if (chain->has_acquire_queue)
1043                wsi_queue_push(&chain->acquire_queue, i);
1044             break;
1045          }
1046       }
1047
1048       break;
1049    }
1050
1051    case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
1052       xcb_present_complete_notify_event_t *complete = (void *) event;
1053       if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
1054          unsigned i;
1055          for (i = 0; i < chain->base.image_count; i++) {
1056             struct x11_image *image = &chain->images[i];
1057             if (image->present_queued && image->serial == complete->serial)
1058                image->present_queued = false;
1059          }
1060          chain->last_present_msc = complete->msc;
1061       }
1062
1063       VkResult result = VK_SUCCESS;
1064       switch (complete->mode) {
1065       case XCB_PRESENT_COMPLETE_MODE_COPY:
1066          if (chain->copy_is_suboptimal)
1067             result = VK_SUBOPTIMAL_KHR;
1068          break;
1069       case XCB_PRESENT_COMPLETE_MODE_FLIP:
1070          /* If we ever go from flipping to copying, the odds are very likely
1071           * that we could reallocate in a more optimal way if we didn't have
1072           * to care about scanout, so we always do this.
1073           */
1074          chain->copy_is_suboptimal = true;
1075          break;
1076 #ifdef HAVE_DRI3_MODIFIERS
1077       case XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY:
1078          /* The winsys is now trying to flip directly and cannot due to our
1079           * configuration. Request the user reallocate.
1080           */
1081          result = VK_SUBOPTIMAL_KHR;
1082          break;
1083 #endif
1084       default:
1085          break;
1086       }
1087
1088       return result;
1089    }
1090
1091    default:
1092       break;
1093    }
1094
1095    return VK_SUCCESS;
1096 }
1097
1098
1099 static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
1100 {
1101    uint64_t current_time = os_time_get_nano();
1102
1103    timeout = MIN2(UINT64_MAX - current_time, timeout);
1104
1105    return current_time + timeout;
1106 }
1107
1108 /**
1109  * Acquire a ready-to-use image directly from our swapchain. If all images are
1110  * busy wait until one is not anymore or till timeout.
1111  */
1112 static VkResult
1113 x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
1114                                 uint32_t *image_index, uint64_t timeout)
1115 {
1116    xcb_generic_event_t *event;
1117    struct pollfd pfds;
1118    uint64_t atimeout;
1119    while (1) {
1120       for (uint32_t i = 0; i < chain->base.image_count; i++) {
1121          if (!chain->images[i].busy) {
1122             /* We found a non-busy image */
1123             xshmfence_await(chain->images[i].shm_fence);
1124             *image_index = i;
1125             chain->images[i].busy = true;
1126             return x11_swapchain_result(chain, VK_SUCCESS);
1127          }
1128       }
1129
1130       xcb_flush(chain->conn);
1131
1132       if (timeout == UINT64_MAX) {
1133          event = xcb_wait_for_special_event(chain->conn, chain->special_event);
1134          if (!event)
1135             return x11_swapchain_result(chain, VK_ERROR_SURFACE_LOST_KHR);
1136       } else {
1137          event = xcb_poll_for_special_event(chain->conn, chain->special_event);
1138          if (!event) {
1139             int ret;
1140             if (timeout == 0)
1141                return x11_swapchain_result(chain, VK_NOT_READY);
1142
1143             atimeout = wsi_get_absolute_timeout(timeout);
1144
1145             pfds.fd = xcb_get_file_descriptor(chain->conn);
1146             pfds.events = POLLIN;
1147             ret = poll(&pfds, 1, timeout / 1000 / 1000);
1148             if (ret == 0)
1149                return x11_swapchain_result(chain, VK_TIMEOUT);
1150             if (ret == -1)
1151                return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
1152
1153             /* If a non-special event happens, the fd will still
1154              * poll. So recalculate the timeout now just in case.
1155              */
1156             uint64_t current_time = os_time_get_nano();
1157             if (atimeout > current_time)
1158                timeout = atimeout - current_time;
1159             else
1160                timeout = 0;
1161             continue;
1162          }
1163       }
1164
1165       /* Update the swapchain status here. We may catch non-fatal errors here,
1166        * in which case we need to update the status and continue.
1167        */
1168       VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
1169       /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1170       result = x11_swapchain_result(chain, result);
1171       free(event);
1172       if (result < 0)
1173          return result;
1174    }
1175 }
1176
1177 /**
1178  * Acquire a ready-to-use image from the acquire-queue. Only relevant in fifo
1179  * presentation mode.
1180  */
1181 static VkResult
1182 x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
1183                                   uint32_t *image_index_out, uint64_t timeout)
1184 {
1185    assert(chain->has_acquire_queue);
1186
1187    uint32_t image_index;
1188    VkResult result = wsi_queue_pull(&chain->acquire_queue,
1189                                     &image_index, timeout);
1190    if (result < 0 || result == VK_TIMEOUT) {
1191       /* On error, the thread has shut down, so safe to update chain->status.
1192        * Calling x11_swapchain_result with VK_TIMEOUT won't modify
1193        * chain->status so that is also safe.
1194        */
1195       return x11_swapchain_result(chain, result);
1196    } else if (chain->status < 0) {
1197       return chain->status;
1198    }
1199
1200    assert(image_index < chain->base.image_count);
1201    xshmfence_await(chain->images[image_index].shm_fence);
1202
1203    *image_index_out = image_index;
1204
1205    return chain->status;
1206 }
1207
1208 /**
1209  * Send image to X server via Present extension.
1210  */
1211 static VkResult
1212 x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
1213                         uint64_t target_msc)
1214 {
1215    struct x11_image *image = &chain->images[image_index];
1216
1217    assert(image_index < chain->base.image_count);
1218
1219    uint32_t options = XCB_PRESENT_OPTION_NONE;
1220
1221    int64_t divisor = 0;
1222    int64_t remainder = 0;
1223
1224    struct wsi_x11_connection *wsi_conn =
1225       wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1226    if (!wsi_conn)
1227       return VK_ERROR_OUT_OF_HOST_MEMORY;
1228
1229    if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
1230        (chain->base.present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
1231         wsi_conn->is_xwayland) ||
1232        chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
1233       options |= XCB_PRESENT_OPTION_ASYNC;
1234
1235 #ifdef HAVE_DRI3_MODIFIERS
1236    if (chain->has_dri3_modifiers)
1237       options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1238 #endif
1239
1240    /* Poll for any available event and update the swapchain status. This could
1241     * update the status of the swapchain to SUBOPTIMAL or OUT_OF_DATE if the
1242     * associated X11 surface has been resized.
1243     */
1244    xcb_generic_event_t *event;
1245    while ((event = xcb_poll_for_special_event(chain->conn, chain->special_event))) {
1246       VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
1247       /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1248       result = x11_swapchain_result(chain, result);
1249       free(event);
1250       if (result < 0)
1251          return result;
1252    }
1253
1254    xshmfence_reset(image->shm_fence);
1255
1256    ++chain->sent_image_count;
1257    assert(chain->sent_image_count <= chain->base.image_count);
1258
1259    ++chain->send_sbc;
1260    image->present_queued = true;
1261    image->serial = (uint32_t) chain->send_sbc;
1262
1263    xcb_void_cookie_t cookie =
1264       xcb_present_pixmap_checked(chain->conn,
1265                                  chain->window,
1266                                  image->pixmap,
1267                                  image->serial,
1268                                  0,                            /* valid */
1269                                  image->update_area,           /* update */
1270                                  0,                            /* x_off */
1271                                  0,                            /* y_off */
1272                                  XCB_NONE,                     /* target_crtc */
1273                                  XCB_NONE,
1274                                  image->sync_fence,
1275                                  options,
1276                                  target_msc,
1277                                  divisor,
1278                                  remainder, 0, NULL);
1279    xcb_generic_error_t *error = xcb_request_check(chain->conn, cookie);
1280    if (error) {
1281       free(error);
1282       return x11_swapchain_result(chain, VK_ERROR_SURFACE_LOST_KHR);
1283    }
1284
1285    return x11_swapchain_result(chain, VK_SUCCESS);
1286 }
1287
1288 /**
1289  * Send image to X server unaccelerated (software drivers).
1290  */
1291 static VkResult
1292 x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index,
1293                       uint64_t target_msc)
1294 {
1295    struct x11_image *image = &chain->images[image_index];
1296
1297    xcb_void_cookie_t cookie;
1298    void *myptr = image->base.cpu_map;
1299    size_t hdr_len = sizeof(xcb_put_image_request_t);
1300    int stride_b = image->base.row_pitches[0];
1301    size_t size = (hdr_len + stride_b * chain->extent.height) >> 2;
1302    uint64_t max_req_len = xcb_get_maximum_request_length(chain->conn);
1303    chain->images[image_index].busy = false;
1304
1305    if (size < max_req_len) {
1306       cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1307                              chain->window,
1308                              chain->gc,
1309                              image->base.row_pitches[0] / 4,
1310                              chain->extent.height,
1311                              0,0,0,24,
1312                              image->base.row_pitches[0] * chain->extent.height,
1313                              image->base.cpu_map);
1314       xcb_discard_reply(chain->conn, cookie.sequence);
1315    } else {
1316       int num_lines = ((max_req_len << 2) - hdr_len) / stride_b;
1317       int y_start = 0;
1318       int y_todo = chain->extent.height;
1319       while (y_todo) {
1320          int this_lines = MIN2(num_lines, y_todo);
1321          cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1322                                 chain->window,
1323                                 chain->gc,
1324                                 image->base.row_pitches[0] / 4,
1325                                 this_lines,
1326                                 0,y_start,0,24,
1327                                 this_lines * stride_b,
1328                                 (const uint8_t *)myptr + (y_start * stride_b));
1329          xcb_discard_reply(chain->conn, cookie.sequence);
1330          y_start += this_lines;
1331          y_todo -= this_lines;
1332       }
1333    }
1334
1335    xcb_flush(chain->conn);
1336    return x11_swapchain_result(chain, VK_SUCCESS);
1337 }
1338
1339 /**
1340  * Send image to the X server for presentation at target_msc.
1341  */
1342 static VkResult
1343 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
1344                    uint64_t target_msc)
1345 {
1346    if (chain->base.wsi->sw && !chain->has_mit_shm)
1347       return x11_present_to_x11_sw(chain, image_index, target_msc);
1348    return x11_present_to_x11_dri3(chain, image_index, target_msc);
1349 }
1350
1351 /**
1352  * Acquire a ready-to-use image from the swapchain.
1353  *
1354  * This means usually that the image is not waiting on presentation and that the
1355  * image has been released by the X server to be used again by the consumer.
1356  */
1357 static VkResult
1358 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
1359                        const VkAcquireNextImageInfoKHR *info,
1360                        uint32_t *image_index)
1361 {
1362    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1363    uint64_t timeout = info->timeout;
1364
1365    /* If the swapchain is in an error state, don't go any further. */
1366    if (chain->status < 0)
1367       return chain->status;
1368
1369    if (chain->base.wsi->sw && !chain->has_mit_shm) {
1370       for (unsigned i = 0; i < chain->base.image_count; i++) {
1371          if (!chain->images[i].busy) {
1372             *image_index = i;
1373             chain->images[i].busy = true;
1374             xcb_generic_error_t *err;
1375
1376             xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(chain->conn, chain->window);
1377             xcb_get_geometry_reply_t *geom = xcb_get_geometry_reply(chain->conn, geom_cookie, &err);
1378             VkResult result = VK_SUCCESS;
1379             if (geom) {
1380                if (chain->extent.width != geom->width ||
1381                    chain->extent.height != geom->height)
1382                   result = VK_SUBOPTIMAL_KHR;
1383             } else {
1384                result = VK_ERROR_SURFACE_LOST_KHR;
1385             }
1386             free(err);
1387             free(geom);
1388             return result;
1389          }
1390       }
1391       return VK_NOT_READY;
1392    }
1393
1394    if (chain->has_acquire_queue) {
1395       return x11_acquire_next_image_from_queue(chain, image_index, timeout);
1396    } else {
1397       return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
1398    }
1399 }
1400
1401 #define MAX_DAMAGE_RECTS 64
1402
1403 /**
1404  * Queue a new presentation of an image that was previously acquired by the
1405  * consumer.
1406  *
1407  * Note that in immediate presentation mode this does not really queue the
1408  * presentation but directly asks the X server to show it.
1409  */
1410 static VkResult
1411 x11_queue_present(struct wsi_swapchain *anv_chain,
1412                   uint32_t image_index,
1413                   const VkPresentRegionKHR *damage)
1414 {
1415    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1416    xcb_xfixes_region_t update_area = 0;
1417
1418    /* If the swapchain is in an error state, don't go any further. */
1419    if (chain->status < 0)
1420       return chain->status;
1421
1422    if (damage && damage->pRectangles && damage->rectangleCount > 0 &&
1423       damage->rectangleCount <= MAX_DAMAGE_RECTS) {
1424       xcb_rectangle_t rects[MAX_DAMAGE_RECTS];
1425
1426       update_area = chain->images[image_index].update_region;
1427       for (unsigned i = 0; i < damage->rectangleCount; i++) {
1428          const VkRectLayerKHR *rect = &damage->pRectangles[i];
1429          assert(rect->layer == 0);
1430          rects[i].x = rect->offset.x;
1431          rects[i].y = rect->offset.y;
1432          rects[i].width = rect->extent.width;
1433          rects[i].height = rect->extent.height;
1434       }
1435       xcb_xfixes_set_region(chain->conn, update_area, damage->rectangleCount, rects);
1436    }
1437    chain->images[image_index].update_area = update_area;
1438
1439    chain->images[image_index].busy = true;
1440    if (chain->has_present_queue) {
1441       wsi_queue_push(&chain->present_queue, image_index);
1442       return chain->status;
1443    } else {
1444       /* No present queue means immedate mode, so we present immediately. */
1445       return x11_present_to_x11(chain, image_index, 0);
1446    }
1447 }
1448
1449 /**
1450  * Decides if an early wait on buffer fences before buffer submission is required. That is for:
1451  *   - Mailbox mode, as otherwise the latest image in the queue might not be fully rendered at
1452  *     present time, what could lead to missing a frame.
1453  *   - Immediate mode under Xwayland, as it works practically the same as mailbox mode using the
1454  *     mailbox mechanism of Wayland. Sending a buffer with fences not yet signalled can make the
1455  *     compositor miss a frame when compositing the final image with this buffer.
1456  *
1457  * Note though that early waits can be disabled in general on Xwayland by setting the
1458  * 'vk_xwayland_wait_ready' DRIConf option to false.
1459  */
1460 static bool
1461 x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
1462                           struct wsi_x11_connection *wsi_conn,
1463                           VkPresentModeKHR present_mode)
1464 {
1465    if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
1466       return false;
1467    }
1468
1469    switch (present_mode) {
1470    case VK_PRESENT_MODE_MAILBOX_KHR:
1471       return true;
1472    case VK_PRESENT_MODE_IMMEDIATE_KHR:
1473       return wsi_conn->is_xwayland;
1474    default:
1475       return false;
1476    }
1477 }
1478
1479 /**
1480  * The number of images that are not owned by X11:
1481  *  (1) in the ownership of the app, or
1482  *  (2) app to take ownership through an acquire, or
1483  *  (3) in the present queue waiting for the FIFO thread to present to X11.
1484  */
1485 static unsigned x11_driver_owned_images(const struct x11_swapchain *chain)
1486 {
1487    return chain->base.image_count - chain->sent_image_count;
1488 }
1489
1490 /**
1491  * Our queue manager. Albeit called x11_manage_fifo_queues only directly
1492  * manages the present-queue and does this in general in fifo and mailbox presentation
1493  * modes (there is no present-queue in immediate mode with the exception of Xwayland).
1494  *
1495  * Runs in a separate thread, blocks and reacts to queued images on the
1496  * present-queue
1497  *
1498  * In mailbox mode the queue management is simplified since we only need to
1499  * pull new images from the present queue and can directly present them.
1500  *
1501  * In fifo mode images can only be presented one after the other. For that after
1502  * sending the image to the X server we wait until the image either has been
1503  * presented or released and only then pull a new image from the present-queue.
1504  */
1505 static void *
1506 x11_manage_fifo_queues(void *state)
1507 {
1508    struct x11_swapchain *chain = state;
1509    struct wsi_x11_connection *wsi_conn =
1510       wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1511    VkResult result = VK_SUCCESS;
1512
1513    assert(chain->has_present_queue);
1514
1515    u_thread_setname("WSI swapchain queue");
1516
1517    while (chain->status >= 0) {
1518       /* We can block here unconditionally because after an image was sent to
1519        * the server (later on in this loop) we ensure at least one image is
1520        * acquirable by the consumer or wait there on such an event.
1521        */
1522       uint32_t image_index = 0;
1523       result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
1524       assert(result != VK_TIMEOUT);
1525
1526       if (result < 0) {
1527          goto fail;
1528       } else if (chain->status < 0) {
1529          /* The status can change underneath us if the swapchain is destroyed
1530           * from another thread.
1531           */
1532          return NULL;
1533       }
1534
1535       /* Waiting for the GPU work to finish at this point in time is required in certain usage
1536        * scenarios. Otherwise we wait as usual in wsi_common_queue_present.
1537        */
1538       if (x11_needs_wait_for_fences(chain->base.wsi, wsi_conn,
1539                                     chain->base.present_mode)) {
1540          result = chain->base.wsi->WaitForFences(chain->base.device, 1,
1541                                         &chain->base.fences[image_index],
1542                                         true, UINT64_MAX);
1543          if (result != VK_SUCCESS) {
1544             result = VK_ERROR_OUT_OF_DATE_KHR;
1545             goto fail;
1546          }
1547       }
1548
1549       uint64_t target_msc = 0;
1550       if (chain->has_acquire_queue)
1551          target_msc = chain->last_present_msc + 1;
1552
1553       result = x11_present_to_x11(chain, image_index, target_msc);
1554       if (result < 0)
1555          goto fail;
1556
1557       if (chain->has_acquire_queue) {
1558          /* Assume this isn't a swapchain where we force 5 images, because those
1559           * don't end up with an acquire queue at the moment.
1560           */
1561          unsigned min_image_count = x11_get_min_image_count(chain->base.wsi);
1562
1563          /* With drirc overrides some games have swapchain with less than
1564           * minimum number of images. */
1565          min_image_count = MIN2(min_image_count, chain->base.image_count);
1566
1567          /* We always need to ensure that the app can have this number of images
1568           * acquired concurrently in between presents:
1569           * "VUID-vkAcquireNextImageKHR-swapchain-01802
1570           *  If the number of currently acquired images is greater than the difference
1571           *  between the number of images in swapchain and the value of
1572           *  VkSurfaceCapabilitiesKHR::minImageCount as returned by a call to
1573           *  vkGetPhysicalDeviceSurfaceCapabilities2KHR with the surface used to
1574           *  create swapchain, timeout must not be UINT64_MAX"
1575           */
1576          unsigned forward_progress_guaranteed_acquired_images =
1577             chain->base.image_count - min_image_count + 1;
1578
1579          /* Wait for our presentation to occur and ensure we have at least one
1580           * image that can be acquired by the client afterwards. This ensures we
1581           * can pull on the present-queue on the next loop.
1582           */
1583          while (chain->images[image_index].present_queued ||
1584                 /* If we have images in the present queue the outer loop won't block and a break
1585                  * here would end up at this loop again, otherwise a break here satisfies
1586                  * VUID-vkAcquireNextImageKHR-swapchain-01802 */
1587                 x11_driver_owned_images(chain) < forward_progress_guaranteed_acquired_images) {
1588
1589             xcb_generic_event_t *event =
1590                xcb_wait_for_special_event(chain->conn, chain->special_event);
1591             if (!event) {
1592                result = VK_ERROR_SURFACE_LOST_KHR;
1593                goto fail;
1594             }
1595
1596             result = x11_handle_dri3_present_event(chain, (void *)event);
1597             /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1598             result = x11_swapchain_result(chain, result);
1599             free(event);
1600             if (result < 0)
1601                goto fail;
1602          }
1603       }
1604    }
1605
1606 fail:
1607    x11_swapchain_result(chain, result);
1608    if (chain->has_acquire_queue)
1609       wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
1610
1611    return NULL;
1612 }
1613
1614 static uint8_t *
1615 alloc_shm(struct wsi_image *imagew, unsigned size)
1616 {
1617 #ifdef HAVE_SYS_SHM_H
1618    struct x11_image *image = (struct x11_image *)imagew;
1619    image->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
1620    if (image->shmid < 0)
1621       return NULL;
1622
1623    uint8_t *addr = (uint8_t *)shmat(image->shmid, 0, 0);
1624    /* mark the segment immediately for deletion to avoid leaks */
1625    shmctl(image->shmid, IPC_RMID, 0);
1626
1627    if (addr == (uint8_t *) -1)
1628       return NULL;
1629
1630    image->shmaddr = addr;
1631    return addr;
1632 #else
1633    return NULL;
1634 #endif
1635 }
1636
1637 static VkResult
1638 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
1639                const VkSwapchainCreateInfoKHR *pCreateInfo,
1640                const VkAllocationCallbacks* pAllocator,
1641                struct x11_image *image)
1642 {
1643    xcb_void_cookie_t cookie;
1644    VkResult result;
1645    uint32_t bpp = 32;
1646    int fence_fd;
1647
1648    result = wsi_create_image(&chain->base, &chain->base.image_info,
1649                              &image->base);
1650    if (result != VK_SUCCESS)
1651       return result;
1652
1653    image->update_region = xcb_generate_id(chain->conn);
1654    xcb_xfixes_create_region(chain->conn, image->update_region, 0, NULL);
1655
1656    if (chain->base.wsi->sw) {
1657       if (!chain->has_mit_shm) {
1658          image->busy = false;
1659          return VK_SUCCESS;
1660       }
1661
1662       image->shmseg = xcb_generate_id(chain->conn);
1663
1664       xcb_shm_attach(chain->conn,
1665                      image->shmseg,
1666                      image->shmid,
1667                      0);
1668       image->pixmap = xcb_generate_id(chain->conn);
1669       cookie = xcb_shm_create_pixmap_checked(chain->conn,
1670                                              image->pixmap,
1671                                              chain->window,
1672                                              image->base.row_pitches[0] / 4,
1673                                              pCreateInfo->imageExtent.height,
1674                                              chain->depth,
1675                                              image->shmseg, 0);
1676       xcb_discard_reply(chain->conn, cookie.sequence);
1677       goto out_fence;
1678    }
1679    image->pixmap = xcb_generate_id(chain->conn);
1680
1681 #ifdef HAVE_DRI3_MODIFIERS
1682    if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
1683       /* If the image has a modifier, we must have DRI3 v1.2. */
1684       assert(chain->has_dri3_modifiers);
1685
1686       /* XCB requires an array of file descriptors but we only have one */
1687       int fds[4] = { -1, -1, -1, -1 };
1688       for (int i = 0; i < image->base.num_planes; i++) {
1689          fds[i] = os_dupfd_cloexec(image->base.dma_buf_fd);
1690          if (fds[i] == -1) {
1691             for (int j = 0; j < i; j++)
1692                close(fds[j]);
1693
1694             return VK_ERROR_OUT_OF_HOST_MEMORY;
1695          }
1696       }
1697
1698       cookie =
1699          xcb_dri3_pixmap_from_buffers_checked(chain->conn,
1700                                               image->pixmap,
1701                                               chain->window,
1702                                               image->base.num_planes,
1703                                               pCreateInfo->imageExtent.width,
1704                                               pCreateInfo->imageExtent.height,
1705                                               image->base.row_pitches[0],
1706                                               image->base.offsets[0],
1707                                               image->base.row_pitches[1],
1708                                               image->base.offsets[1],
1709                                               image->base.row_pitches[2],
1710                                               image->base.offsets[2],
1711                                               image->base.row_pitches[3],
1712                                               image->base.offsets[3],
1713                                               chain->depth, bpp,
1714                                               image->base.drm_modifier,
1715                                               fds);
1716    } else
1717 #endif
1718    {
1719       /* Without passing modifiers, we can't have multi-plane RGB images. */
1720       assert(image->base.num_planes == 1);
1721
1722       /* XCB will take ownership of the FD we pass it. */
1723       int fd = os_dupfd_cloexec(image->base.dma_buf_fd);
1724       if (fd == -1)
1725          return VK_ERROR_OUT_OF_HOST_MEMORY;
1726
1727       cookie =
1728          xcb_dri3_pixmap_from_buffer_checked(chain->conn,
1729                                              image->pixmap,
1730                                              chain->window,
1731                                              image->base.sizes[0],
1732                                              pCreateInfo->imageExtent.width,
1733                                              pCreateInfo->imageExtent.height,
1734                                              image->base.row_pitches[0],
1735                                              chain->depth, bpp, fd);
1736    }
1737
1738    xcb_discard_reply(chain->conn, cookie.sequence);
1739
1740 out_fence:
1741    fence_fd = xshmfence_alloc_shm();
1742    if (fence_fd < 0)
1743       goto fail_pixmap;
1744
1745    image->shm_fence = xshmfence_map_shm(fence_fd);
1746    if (image->shm_fence == NULL)
1747       goto fail_shmfence_alloc;
1748
1749    image->sync_fence = xcb_generate_id(chain->conn);
1750    xcb_dri3_fence_from_fd(chain->conn,
1751                           image->pixmap,
1752                           image->sync_fence,
1753                           false,
1754                           fence_fd);
1755
1756    image->busy = false;
1757    xshmfence_trigger(image->shm_fence);
1758
1759    return VK_SUCCESS;
1760
1761 fail_shmfence_alloc:
1762    close(fence_fd);
1763
1764 fail_pixmap:
1765    cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1766    xcb_discard_reply(chain->conn, cookie.sequence);
1767
1768    wsi_destroy_image(&chain->base, &image->base);
1769
1770    return VK_ERROR_INITIALIZATION_FAILED;
1771 }
1772
1773 static void
1774 x11_image_finish(struct x11_swapchain *chain,
1775                  const VkAllocationCallbacks* pAllocator,
1776                  struct x11_image *image)
1777 {
1778    xcb_void_cookie_t cookie;
1779
1780    if (!chain->base.wsi->sw || chain->has_mit_shm) {
1781       cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
1782       xcb_discard_reply(chain->conn, cookie.sequence);
1783       xshmfence_unmap_shm(image->shm_fence);
1784
1785       cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1786       xcb_discard_reply(chain->conn, cookie.sequence);
1787
1788       cookie = xcb_xfixes_destroy_region(chain->conn, image->update_region);
1789       xcb_discard_reply(chain->conn, cookie.sequence);
1790    }
1791
1792    wsi_destroy_image(&chain->base, &image->base);
1793 #ifdef HAVE_SYS_SHM_H
1794    if (image->shmaddr)
1795       shmdt(image->shmaddr);
1796 #endif
1797 }
1798
1799 static void
1800 wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
1801                            xcb_connection_t *conn, xcb_window_t window,
1802                            uint8_t depth, uint8_t bpp,
1803                            VkCompositeAlphaFlagsKHR vk_alpha,
1804                            uint64_t **modifiers_in, uint32_t *num_modifiers_in,
1805                            uint32_t *num_tranches_in,
1806                            const VkAllocationCallbacks *pAllocator)
1807 {
1808    if (!wsi_conn->has_dri3_modifiers)
1809       goto out;
1810
1811 #ifdef HAVE_DRI3_MODIFIERS
1812    xcb_generic_error_t *error = NULL;
1813    xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
1814       xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
1815    xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
1816       xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
1817    free(error);
1818
1819    if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
1820                       mod_reply->num_screen_modifiers == 0)) {
1821       free(mod_reply);
1822       goto out;
1823    }
1824
1825    uint32_t n = 0;
1826    uint32_t counts[2];
1827    uint64_t *modifiers[2];
1828
1829    if (mod_reply->num_window_modifiers) {
1830       counts[n] = mod_reply->num_window_modifiers;
1831       modifiers[n] = vk_alloc(pAllocator,
1832                               counts[n] * sizeof(uint64_t),
1833                               8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1834       if (!modifiers[n]) {
1835          free(mod_reply);
1836          goto out;
1837       }
1838
1839       memcpy(modifiers[n],
1840              xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1841              counts[n] * sizeof(uint64_t));
1842       n++;
1843    }
1844
1845    if (mod_reply->num_screen_modifiers) {
1846       counts[n] = mod_reply->num_screen_modifiers;
1847       modifiers[n] = vk_alloc(pAllocator,
1848                               counts[n] * sizeof(uint64_t),
1849                               8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1850       if (!modifiers[n]) {
1851          if (n > 0)
1852             vk_free(pAllocator, modifiers[0]);
1853          free(mod_reply);
1854          goto out;
1855       }
1856
1857       memcpy(modifiers[n],
1858              xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1859              counts[n] * sizeof(uint64_t));
1860       n++;
1861    }
1862
1863    for (int i = 0; i < n; i++) {
1864       modifiers_in[i] = modifiers[i];
1865       num_modifiers_in[i] = counts[i];
1866    }
1867    *num_tranches_in = n;
1868
1869    free(mod_reply);
1870    return;
1871 #endif
1872 out:
1873    *num_tranches_in = 0;
1874 }
1875
1876 static VkResult
1877 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
1878                       const VkAllocationCallbacks *pAllocator)
1879 {
1880    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1881    xcb_void_cookie_t cookie;
1882
1883    if (chain->has_present_queue) {
1884       chain->status = VK_ERROR_OUT_OF_DATE_KHR;
1885       /* Push a UINT32_MAX to wake up the manager */
1886       wsi_queue_push(&chain->present_queue, UINT32_MAX);
1887       pthread_join(chain->queue_manager, NULL);
1888
1889       if (chain->has_acquire_queue)
1890          wsi_queue_destroy(&chain->acquire_queue);
1891       wsi_queue_destroy(&chain->present_queue);
1892    }
1893
1894    for (uint32_t i = 0; i < chain->base.image_count; i++)
1895       x11_image_finish(chain, pAllocator, &chain->images[i]);
1896    wsi_destroy_image_info(&chain->base, &chain->base.image_info);
1897
1898    xcb_unregister_for_special_event(chain->conn, chain->special_event);
1899    cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
1900                                              chain->window,
1901                                              XCB_PRESENT_EVENT_MASK_NO_EVENT);
1902    xcb_discard_reply(chain->conn, cookie.sequence);
1903
1904    wsi_swapchain_finish(&chain->base);
1905
1906    vk_free(pAllocator, chain);
1907
1908    return VK_SUCCESS;
1909 }
1910
1911 static void
1912 wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
1913                                    xcb_drawable_t drawable,
1914                                    uint32_t state)
1915 {
1916    static char const name[] = "_VARIABLE_REFRESH";
1917    xcb_intern_atom_cookie_t cookie;
1918    xcb_intern_atom_reply_t* reply;
1919    xcb_void_cookie_t check;
1920
1921    cookie = xcb_intern_atom(conn, 0, strlen(name), name);
1922    reply = xcb_intern_atom_reply(conn, cookie, NULL);
1923    if (reply == NULL)
1924       return;
1925
1926    if (state)
1927       check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
1928                                           drawable, reply->atom,
1929                                           XCB_ATOM_CARDINAL, 32, 1, &state);
1930    else
1931       check = xcb_delete_property_checked(conn, drawable, reply->atom);
1932
1933    xcb_discard_reply(conn, check.sequence);
1934    free(reply);
1935 }
1936
1937 /**
1938  * Create the swapchain.
1939  *
1940  * Supports immediate, fifo and mailbox presentation mode.
1941  *
1942  */
1943 static VkResult
1944 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
1945                              VkDevice device,
1946                              struct wsi_device *wsi_device,
1947                              const VkSwapchainCreateInfoKHR *pCreateInfo,
1948                              const VkAllocationCallbacks* pAllocator,
1949                              struct wsi_swapchain **swapchain_out)
1950 {
1951    struct x11_swapchain *chain;
1952    xcb_void_cookie_t cookie;
1953    VkResult result;
1954    VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
1955
1956    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
1957
1958    /* Get xcb connection from the icd_surface and from that our internal struct
1959     * representing it.
1960     */
1961    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
1962    struct wsi_x11_connection *wsi_conn =
1963       wsi_x11_get_connection(wsi_device, conn);
1964    if (!wsi_conn)
1965       return VK_ERROR_OUT_OF_HOST_MEMORY;
1966
1967    /* Get number of images in our swapchain. This count depends on:
1968     * - requested minimal image count
1969     * - device characteristics
1970     * - presentation mode.
1971     */
1972    unsigned num_images = pCreateInfo->minImageCount;
1973    if (wsi_device->x11.strict_imageCount)
1974       num_images = pCreateInfo->minImageCount;
1975    else if (x11_needs_wait_for_fences(wsi_device, wsi_conn, present_mode))
1976       num_images = MAX2(num_images, 5);
1977    else if (wsi_device->x11.ensure_minImageCount)
1978       num_images = MAX2(num_images, x11_get_min_image_count(wsi_device));
1979
1980    /* Check that we have a window up-front. It is an error to not have one. */
1981    xcb_window_t window = x11_surface_get_window(icd_surface);
1982
1983    /* Get the geometry of that window. The bit depth of the swapchain will be fitted and the
1984     * chain's images extents should fit it for performance-optimizing flips.
1985     */
1986    xcb_get_geometry_reply_t *geometry =
1987       xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
1988    if (geometry == NULL)
1989       return VK_ERROR_SURFACE_LOST_KHR;
1990    const uint32_t bit_depth = geometry->depth;
1991    const uint16_t cur_width = geometry->width;
1992    const uint16_t cur_height = geometry->height;
1993    free(geometry);
1994
1995    /* Allocate the actual swapchain. The size depends on image count. */
1996    size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
1997    chain = vk_zalloc(pAllocator, size, 8,
1998                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1999    if (chain == NULL)
2000       return VK_ERROR_OUT_OF_HOST_MEMORY;
2001
2002    /* When our local device is not compatible with the DRI3 device provided by
2003     * the X server we assume this is a PRIME system.
2004     */
2005    bool use_buffer_blit = false;
2006    if (!wsi_device->sw)
2007       if (!wsi_x11_check_dri3_compatible(wsi_device, conn))
2008          use_buffer_blit = true;
2009
2010    result = wsi_swapchain_init(wsi_device, &chain->base, device,
2011                                pCreateInfo, pAllocator, use_buffer_blit);
2012    if (result != VK_SUCCESS)
2013       goto fail_alloc;
2014
2015    chain->base.destroy = x11_swapchain_destroy;
2016    chain->base.get_wsi_image = x11_get_wsi_image;
2017    chain->base.acquire_next_image = x11_acquire_next_image;
2018    chain->base.queue_present = x11_queue_present;
2019    chain->base.present_mode = present_mode;
2020    chain->base.image_count = num_images;
2021    chain->conn = conn;
2022    chain->window = window;
2023    chain->depth = bit_depth;
2024    chain->extent = pCreateInfo->imageExtent;
2025    chain->send_sbc = 0;
2026    chain->sent_image_count = 0;
2027    chain->last_present_msc = 0;
2028    chain->has_acquire_queue = false;
2029    chain->has_present_queue = false;
2030    chain->status = VK_SUCCESS;
2031    chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
2032    chain->has_mit_shm = wsi_conn->has_mit_shm;
2033
2034    /* When images in the swapchain don't fit the window, X can still present them, but it won't
2035     * happen by flip, only by copy. So this is a suboptimal copy, because if the client would change
2036     * the chain extents X may be able to flip
2037     */
2038    if (chain->extent.width != cur_width || chain->extent.height != cur_height)
2039        chain->status = VK_SUBOPTIMAL_KHR;
2040
2041    /* On a new swapchain this helper variable is set to false. Once we present it will have an
2042     * impact once we ever do at least one flip and go back to copying afterwards. It is presumed
2043     * that in this case here is a high likelihood X could do flips again if the client reallocates a
2044     * new swapchain.
2045     *
2046     * Note that we used to inheritted this property from 'pCreateInfo->oldSwapchain'. But when it
2047     * was true, and when the next present was completed with copying, we would return
2048     * VK_SUBOPTIMAL_KHR and hint the app to reallocate again for no good reason. If all following
2049     * presents on the surface were completed with copying because of some surface state change, we
2050     * would always return VK_SUBOPTIMAL_KHR no matter how many times the app had reallocated.
2051     *
2052     * Note also that is is questionable in general if that mechanism is really useful. It ist not
2053     * clear why on a change from flipping to copying we can assume a reallocation has a high chance
2054     * of making flips work again per se. In other words it is not clear why there is need for
2055     * another way to inform clients about suboptimal copies besides forwarding the
2056     * 'PresentOptionSuboptimal' complete mode.
2057     */
2058    chain->copy_is_suboptimal = false;
2059
2060    /* For our swapchain we need to listen to following Present extension events:
2061     * - Configure: Window dimensions changed. Images in the swapchain might need
2062     *              to be reallocated.
2063     * - Complete: An image from our swapchain was presented on the output.
2064     * - Idle: An image from our swapchain is not anymore accessed by the X
2065     *         server and can be reused.
2066     */
2067    chain->event_id = xcb_generate_id(chain->conn);
2068    xcb_present_select_input(chain->conn, chain->event_id, chain->window,
2069                             XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
2070                             XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
2071                             XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
2072
2073    /* Create an XCB event queue to hold present events outside of the usual
2074     * application event queue
2075     */
2076    chain->special_event =
2077       xcb_register_for_special_xge(chain->conn, &xcb_present_id,
2078                                    chain->event_id, NULL);
2079
2080    /* Create the graphics context. */
2081    chain->gc = xcb_generate_id(chain->conn);
2082    if (!chain->gc) {
2083       /* FINISHME: Choose a better error. */
2084       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2085       goto fail_register;
2086    }
2087
2088    cookie = xcb_create_gc(chain->conn,
2089                           chain->gc,
2090                           chain->window,
2091                           XCB_GC_GRAPHICS_EXPOSURES,
2092                           (uint32_t []) { 0 });
2093    xcb_discard_reply(chain->conn, cookie.sequence);
2094
2095    uint64_t *modifiers[2] = {NULL, NULL};
2096    uint32_t num_modifiers[2] = {0, 0};
2097    uint32_t num_tranches = 0;
2098    if (wsi_device->supports_modifiers)
2099       wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, chain->depth, 32,
2100                                  pCreateInfo->compositeAlpha,
2101                                  modifiers, num_modifiers, &num_tranches,
2102                                  pAllocator);
2103
2104    if (wsi_device->sw) {
2105       result = wsi_configure_cpu_image(&chain->base, pCreateInfo,
2106                                        chain->has_mit_shm ? &alloc_shm : NULL,
2107                                        &chain->base.image_info);
2108    } else if (chain->base.use_buffer_blit) {
2109       bool use_modifier = num_tranches > 0;
2110       result = wsi_configure_prime_image(&chain->base, pCreateInfo,
2111                                          use_modifier,
2112                                          &chain->base.image_info);
2113    } else {
2114       result = wsi_configure_native_image(&chain->base, pCreateInfo,
2115                                           num_tranches, num_modifiers,
2116                                           (const uint64_t *const *)modifiers,
2117                                           &chain->base.image_info);
2118    }
2119    if (result != VK_SUCCESS)
2120       goto fail_modifiers;
2121
2122    uint32_t image = 0;
2123    for (; image < chain->base.image_count; image++) {
2124       result = x11_image_init(device, chain, pCreateInfo, pAllocator,
2125                               &chain->images[image]);
2126       if (result != VK_SUCCESS)
2127          goto fail_init_images;
2128    }
2129
2130    /* Initialize queues for images in our swapchain. Possible queues are:
2131     * - Present queue: for images sent to the X server but not yet presented.
2132     * - Acquire queue: for images already presented but not yet released by the
2133     *                  X server.
2134     *
2135     * In general queues are not used on software drivers, otherwise which queues
2136     * are used depends on our presentation mode:
2137     * - Fifo: present and acquire
2138     * - Mailbox: present only
2139     * - Immediate: present when we wait on fences before buffer submission (Xwayland)
2140     */
2141    if ((chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2142         chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR ||
2143         x11_needs_wait_for_fences(wsi_device, wsi_conn,
2144                                   chain->base.present_mode)) &&
2145        !chain->base.wsi->sw) {
2146       chain->has_present_queue = true;
2147
2148       /* The queues have a length of base.image_count + 1 because we will
2149        * occasionally use UINT32_MAX to signal the other thread that an error
2150        * has occurred and we don't want an overflow.
2151        */
2152       int ret;
2153       ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
2154       if (ret) {
2155          goto fail_init_images;
2156       }
2157
2158       if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2159           chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
2160          chain->has_acquire_queue = true;
2161
2162          ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
2163          if (ret) {
2164             wsi_queue_destroy(&chain->present_queue);
2165             goto fail_init_images;
2166          }
2167
2168          for (unsigned i = 0; i < chain->base.image_count; i++)
2169             wsi_queue_push(&chain->acquire_queue, i);
2170       }
2171
2172       ret = pthread_create(&chain->queue_manager, NULL,
2173                            x11_manage_fifo_queues, chain);
2174       if (ret) {
2175          wsi_queue_destroy(&chain->present_queue);
2176          if (chain->has_acquire_queue)
2177             wsi_queue_destroy(&chain->acquire_queue);
2178
2179          goto fail_init_images;
2180       }
2181    }
2182
2183    assert(chain->has_present_queue || !chain->has_acquire_queue);
2184
2185    for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2186       vk_free(pAllocator, modifiers[i]);
2187
2188    /* It is safe to set it here as only one swapchain can be associated with
2189     * the window, and swapchain creation does the association. At this point
2190     * we know the creation is going to succeed. */
2191    wsi_x11_set_adaptive_sync_property(conn, window,
2192                                       wsi_device->enable_adaptive_sync);
2193
2194    *swapchain_out = &chain->base;
2195
2196    return VK_SUCCESS;
2197
2198 fail_init_images:
2199    for (uint32_t j = 0; j < image; j++)
2200       x11_image_finish(chain, pAllocator, &chain->images[j]);
2201
2202    wsi_destroy_image_info(&chain->base, &chain->base.image_info);
2203
2204 fail_modifiers:
2205    for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2206       vk_free(pAllocator, modifiers[i]);
2207
2208 fail_register:
2209    xcb_unregister_for_special_event(chain->conn, chain->special_event);
2210
2211    wsi_swapchain_finish(&chain->base);
2212
2213 fail_alloc:
2214    vk_free(pAllocator, chain);
2215
2216    return result;
2217 }
2218
2219 VkResult
2220 wsi_x11_init_wsi(struct wsi_device *wsi_device,
2221                  const VkAllocationCallbacks *alloc,
2222                  const struct driOptionCache *dri_options)
2223 {
2224    struct wsi_x11 *wsi;
2225    VkResult result;
2226
2227    wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2228                    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2229    if (!wsi) {
2230       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2231       goto fail;
2232    }
2233
2234    int ret = pthread_mutex_init(&wsi->mutex, NULL);
2235    if (ret != 0) {
2236       if (ret == ENOMEM) {
2237          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2238       } else {
2239          /* FINISHME: Choose a better error. */
2240          result = VK_ERROR_OUT_OF_HOST_MEMORY;
2241       }
2242
2243       goto fail_alloc;
2244    }
2245
2246    wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2247                                               _mesa_key_pointer_equal);
2248    if (!wsi->connections) {
2249       result = VK_ERROR_OUT_OF_HOST_MEMORY;
2250       goto fail_mutex;
2251    }
2252
2253    if (dri_options) {
2254       if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
2255          wsi_device->x11.override_minImageCount =
2256             driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
2257       }
2258       if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
2259          wsi_device->x11.strict_imageCount =
2260             driQueryOptionb(dri_options, "vk_x11_strict_image_count");
2261       }
2262       if (driCheckOption(dri_options, "vk_x11_ensure_min_image_count", DRI_BOOL)) {
2263          wsi_device->x11.ensure_minImageCount =
2264             driQueryOptionb(dri_options, "vk_x11_ensure_min_image_count");
2265       }
2266       wsi_device->x11.xwaylandWaitReady = true;
2267       if (driCheckOption(dri_options, "vk_xwayland_wait_ready", DRI_BOOL)) {
2268          wsi_device->x11.xwaylandWaitReady =
2269             driQueryOptionb(dri_options, "vk_xwayland_wait_ready");
2270       }
2271    }
2272
2273    wsi->base.get_support = x11_surface_get_support;
2274    wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
2275    wsi->base.get_formats = x11_surface_get_formats;
2276    wsi->base.get_formats2 = x11_surface_get_formats2;
2277    wsi->base.get_present_modes = x11_surface_get_present_modes;
2278    wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
2279    wsi->base.create_swapchain = x11_surface_create_swapchain;
2280
2281    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
2282    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
2283
2284    return VK_SUCCESS;
2285
2286 fail_mutex:
2287    pthread_mutex_destroy(&wsi->mutex);
2288 fail_alloc:
2289    vk_free(alloc, wsi);
2290 fail:
2291    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
2292    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
2293
2294    return result;
2295 }
2296
2297 void
2298 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
2299                    const VkAllocationCallbacks *alloc)
2300 {
2301    struct wsi_x11 *wsi =
2302       (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
2303
2304    if (wsi) {
2305       hash_table_foreach(wsi->connections, entry)
2306          wsi_x11_connection_destroy(wsi_device, entry->data);
2307
2308       _mesa_hash_table_destroy(wsi->connections, NULL);
2309
2310       pthread_mutex_destroy(&wsi->mutex);
2311
2312       vk_free(alloc, wsi);
2313    }
2314 }