xkb_x11_keymap_new_from_device: Less X11 round-trips
authorUli Schlachter <psychon@znc.in>
Sun, 7 Mar 2021 06:42:28 +0000 (07:42 +0100)
committerRan Benita <ran@unusedvar.com>
Tue, 9 Mar 2021 09:00:13 +0000 (11:00 +0200)
On my system, calling xkb_x11_keymap_new_from_device() did 78 round trips to the
X11 server, which seems excessive. This commit brings this number down to about
9 to 10 round trips.

The existing functions adopt_atom() and adopt_atoms() guarantee that the atom
was adopted by the time they return. Thus, each call to these functions must do
a round-trip. However, none of the callers need this guarantee.

This commit makes "atom adopting" asynchronous: Only some time later is the atom
actually adopted. Until then, it is in some pending "limbo" state.

This actually fixes a TODO in the comments.

Fixes: https://github.com/xkbcommon/libxkbcommon/issues/216
Signed-off-by: Uli Schlachter <psychon@znc.in>
src/x11/keymap.c
src/x11/util.c
src/x11/x11-priv.h

index f5b368f..38be217 100644 (file)
@@ -852,7 +852,7 @@ fail:
 }
 
 static bool
-get_type_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
+get_type_names(struct xkb_keymap *keymap, struct x11_atom_interner *interner,
                xcb_xkb_get_names_reply_t *reply,
                xcb_xkb_get_names_value_list_t *list)
 {
@@ -880,13 +880,9 @@ get_type_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
 
         ALLOC_OR_FAIL(type->level_names, type->num_levels);
 
-        if (!adopt_atom(keymap->ctx, conn, wire_type_name, &type->name))
-            goto fail;
-
-        if (!adopt_atoms(keymap->ctx, conn,
-                         kt_level_names_iter, type->level_names,
-                         wire_num_levels))
-            goto fail;
+        x11_atom_interner_adopt_atom(interner, wire_type_name, &type->name);
+        x11_atom_interner_adopt_atoms(interner, kt_level_names_iter,
+                                     type->level_names, wire_num_levels);
 
         type->num_level_names = type->num_levels;
         kt_level_names_iter += wire_num_levels;
@@ -901,7 +897,8 @@ fail:
 }
 
 static bool
-get_indicator_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
+get_indicator_names(struct xkb_keymap *keymap,
+                    struct x11_atom_interner *interner,
                     xcb_xkb_get_names_reply_t *reply,
                     xcb_xkb_get_names_value_list_t *list)
 {
@@ -914,8 +911,7 @@ get_indicator_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
             xcb_atom_t wire = *iter;
             struct xkb_led *led = &keymap->leds[i];
 
-            if (!adopt_atom(keymap->ctx, conn, wire, &led->name))
-                return false;
+            x11_atom_interner_adopt_atom(interner, wire, &led->name);
 
             iter++;
         }
@@ -928,7 +924,7 @@ fail:
 }
 
 static bool
-get_vmod_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
+get_vmod_names(struct xkb_keymap *keymap, struct x11_atom_interner *interner,
                xcb_xkb_get_names_reply_t *reply,
                xcb_xkb_get_names_value_list_t *list)
 {
@@ -947,8 +943,7 @@ get_vmod_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
             xcb_atom_t wire = *iter;
             struct xkb_mod *mod = &keymap->mods.mods[NUM_REAL_MODS + i];
 
-            if (!adopt_atom(keymap->ctx, conn, wire, &mod->name))
-                return false;
+            x11_atom_interner_adopt_atom(interner, wire, &mod->name);
 
             iter++;
         }
@@ -958,7 +953,7 @@ get_vmod_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
 }
 
 static bool
-get_group_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
+get_group_names(struct xkb_keymap *keymap, struct x11_atom_interner *interner,
                 xcb_xkb_get_names_reply_t *reply,
                 xcb_xkb_get_names_value_list_t *list)
 {
@@ -968,9 +963,7 @@ get_group_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
     keymap->num_group_names = msb_pos(reply->groupNames);
     ALLOC_OR_FAIL(keymap->group_names, keymap->num_group_names);
 
-    if (!adopt_atoms(keymap->ctx, conn,
-                     iter, keymap->group_names, length))
-        goto fail;
+    x11_atom_interner_adopt_atoms(interner, iter, keymap->group_names, length);
 
     return true;
 
@@ -1051,7 +1044,7 @@ fail:
 }
 
 static bool
-get_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
+get_names(struct xkb_keymap *keymap, struct x11_atom_interner *interner,
           uint16_t device_id)
 {
     static const xcb_xkb_name_detail_t wanted =
@@ -1072,6 +1065,7 @@ get_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
          XCB_XKB_NAME_DETAIL_KEY_NAMES |
          XCB_XKB_NAME_DETAIL_VIRTUAL_MOD_NAMES);
 
+    xcb_connection_t *conn = interner->conn;
     xcb_xkb_get_names_cookie_t cookie =
         xcb_xkb_get_names(conn, device_id, wanted);
     xcb_xkb_get_names_reply_t *reply =
@@ -1097,10 +1091,10 @@ get_names(struct xkb_keymap *keymap, xcb_connection_t *conn,
         !get_atom_name(conn, list.symbolsName, &keymap->symbols_section_name) ||
         !get_atom_name(conn, list.typesName, &keymap->types_section_name) ||
         !get_atom_name(conn, list.compatName, &keymap->compat_section_name) ||
-        !get_type_names(keymap, conn, reply, &list) ||
-        !get_indicator_names(keymap, conn, reply, &list) ||
-        !get_vmod_names(keymap, conn, reply, &list) ||
-        !get_group_names(keymap, conn, reply, &list) ||
+        !get_type_names(keymap, interner, reply, &list) ||
+        !get_indicator_names(keymap, interner, reply, &list) ||
+        !get_vmod_names(keymap, interner, reply, &list) ||
+        !get_group_names(keymap, interner, reply, &list) ||
         !get_key_names(keymap, conn, reply, &list) ||
         !get_aliases(keymap, conn, reply, &list))
         goto fail;
@@ -1169,14 +1163,23 @@ xkb_x11_keymap_new_from_device(struct xkb_context *ctx,
     if (!keymap)
         return NULL;
 
+    struct x11_atom_interner interner;
+    x11_atom_interner_init(&interner, ctx, conn);
+
     if (!get_map(keymap, conn, device_id) ||
         !get_indicator_map(keymap, conn, device_id) ||
         !get_compat_map(keymap, conn, device_id) ||
-        !get_names(keymap, conn, device_id) ||
+        !get_names(keymap, &interner, device_id) ||
         !get_controls(keymap, conn, device_id)) {
         xkb_keymap_unref(keymap);
         return NULL;
     }
 
+    x11_atom_interner_round_trip(&interner);
+    if (interner.had_error) {
+        xkb_keymap_unref(keymap);
+        return NULL;
+    }
+
     return keymap;
 }
index 660d885..766e9a0 100644 (file)
@@ -169,14 +169,9 @@ struct x11_atom_cache {
     size_t len;
 };
 
-bool
-adopt_atoms(struct xkb_context *ctx, xcb_connection_t *conn,
-            const xcb_atom_t *from, xkb_atom_t *to, const size_t count)
+static struct x11_atom_cache *
+get_cache(struct xkb_context *ctx, xcb_connection_t *conn)
 {
-    enum { SIZE = 128 };
-    xcb_get_atom_name_cookie_t cookies[SIZE];
-    const size_t num_batches = ROUNDUP(count, SIZE) / SIZE;
-
     if (!ctx->x11_atom_cache) {
         ctx->x11_atom_cache = calloc(1, sizeof(struct x11_atom_cache));
     }
@@ -186,79 +181,112 @@ adopt_atoms(struct xkb_context *ctx, xcb_connection_t *conn,
         cache->conn = conn;
         cache->len = 0;
     }
+    return cache;
+}
 
-    memset(to, 0, count * sizeof(*to));
-
-    /* Send and collect the atoms in batches of reasonable SIZE. */
-    for (size_t batch = 0; batch < num_batches; batch++) {
-        const size_t start = batch * SIZE;
-        const size_t stop = MIN((batch + 1) * SIZE, count);
-
-        /* Send. */
-        for (size_t i = start; i < stop; i++) {
-            bool cache_hit = false;
-            if (cache) {
-                for (size_t c = 0; c < cache->len; c++) {
-                    if (cache->cache[c].from == from[i]) {
-                        to[i] = cache->cache[c].to;
-                        cache_hit = true;
-                        break;
-                    }
-                }
+void
+x11_atom_interner_init(struct x11_atom_interner *interner,
+                       struct xkb_context *ctx, xcb_connection_t *conn)
+{
+    interner->had_error = false;
+    interner->ctx = ctx;
+    interner->conn = conn;
+    interner->num_pending = 0;
+    interner->num_copies = 0;
+}
+
+void
+x11_atom_interner_adopt_atom(struct x11_atom_interner *interner,
+                             const xcb_atom_t atom, xkb_atom_t *out)
+{
+    *out = 0;
+
+    /* Can be NULL in case the malloc failed. */
+    struct x11_atom_cache *cache = get_cache(interner->ctx, interner->conn);
+
+retry:
+
+    /* Already in the cache? */
+    if (cache) {
+        for (size_t c = 0; c < cache->len; c++) {
+            if (cache->cache[c].from == atom) {
+                *out = cache->cache[c].to;
+                return;
             }
-            if (!cache_hit && from[i] != XCB_ATOM_NONE)
-                cookies[i % SIZE] = xcb_get_atom_name(conn, from[i]);
         }
+    }
 
-        /* Collect. */
-        for (size_t i = start; i < stop; i++) {
-            xcb_get_atom_name_reply_t *reply;
+    /* Already pending? */
+    for (size_t i = 0; i < interner->num_pending; i++) {
+        if (interner->pending[i].from == atom) {
+            if (interner->num_copies == ARRAY_SIZE(interner->copies)) {
+                x11_atom_interner_round_trip(interner);
+                goto retry;
+            }
 
-            if (from[i] == XCB_ATOM_NONE)
-                continue;
+            size_t idx = interner->num_copies++;
+            interner->copies[idx].from = atom;
+            interner->copies[idx].out = out;
+            return;
+        }
+    }
 
-            /* Was filled from cache. */
-            if (to[i] != 0)
-                continue;
+    /* We have to send a GetAtomName request */
+    if (interner->num_pending == ARRAY_SIZE(interner->pending)) {
+        x11_atom_interner_round_trip(interner);
+        assert(interner->num_pending < ARRAY_SIZE(interner->pending));
+    }
+    size_t idx = interner->num_pending++;
+    interner->pending[idx].from = atom;
+    interner->pending[idx].out = out;
+    interner->pending[idx].cookie = xcb_get_atom_name(interner->conn, atom);
+}
 
-            reply = xcb_get_atom_name_reply(conn, cookies[i % SIZE], NULL);
-            if (!reply)
-                goto err_discard;
+void
+x11_atom_interner_adopt_atoms(struct x11_atom_interner *interner,
+                              const xcb_atom_t *from, xkb_atom_t *to,
+                              size_t count)
+{
+    for (size_t i = 0; i < count; i++) {
+        x11_atom_interner_adopt_atom(interner, from[i], &to[i]);
+    }
+}
 
-            to[i] = xkb_atom_intern(ctx,
-                                    xcb_get_atom_name_name(reply),
-                                    xcb_get_atom_name_name_length(reply));
-            free(reply);
+void x11_atom_interner_round_trip(struct x11_atom_interner *interner) {
+    struct xkb_context *ctx = interner->ctx;
+    xcb_connection_t *conn = interner->conn;
 
-            if (to[i] == XKB_ATOM_NONE)
-                goto err_discard;
+    /* Can be NULL in case the malloc failed. */
+    struct x11_atom_cache *cache = get_cache(ctx, conn);
 
-            if (cache && cache->len < ARRAY_SIZE(cache->cache)) {
-                size_t idx = cache->len++;
-                cache->cache[idx].from = from[i];
-                cache->cache[idx].to = to[i];
-            }
+    for (size_t i = 0; i < interner->num_pending; i++) {
+        xcb_get_atom_name_reply_t *reply;
 
+        reply = xcb_get_atom_name_reply(conn, interner->pending[i].cookie, NULL);
+        if (!reply) {
+            interner->had_error = true;
             continue;
+        }
+        xcb_atom_t x11_atom = interner->pending[i].from;
+        xkb_atom_t atom = xkb_atom_intern(ctx,
+                                          xcb_get_atom_name_name(reply),
+                                          xcb_get_atom_name_name_length(reply));
+        free(reply);
 
-            /*
-             * If we don't discard the uncollected replies, they just
-             * sit in the XCB queue waiting forever. Sad.
-             */
-err_discard:
-            for (size_t j = i + 1; j < stop; j++)
-                if (from[j] != XCB_ATOM_NONE)
-                    xcb_discard_reply(conn, cookies[j % SIZE].sequence);
-            return false;
+        if (cache && cache->len < ARRAY_SIZE(cache->cache)) {
+            size_t idx = cache->len++;
+            cache->cache[idx].from = x11_atom;
+            cache->cache[idx].to = atom;
         }
-    }
 
-    return true;
-}
+        *interner->pending[i].out = atom;
 
-bool
-adopt_atom(struct xkb_context *ctx, xcb_connection_t *conn, xcb_atom_t atom,
-           xkb_atom_t *out)
-{
-    return adopt_atoms(ctx, conn, &atom, out, 1);
+        for (size_t j = 0; j < interner->num_copies; j++) {
+            if (interner->copies[j].from == x11_atom)
+                *interner->copies[j].out = atom;
+        }
+    }
+
+    interner->num_pending = 0;
+    interner->num_copies = 0;
 }
index 3a19e99..5b7f5c2 100644 (file)
 bool
 get_atom_name(xcb_connection_t *conn, xcb_atom_t atom, char **out);
 
+struct x11_atom_interner {
+    struct xkb_context *ctx;
+    xcb_connection_t *conn;
+    bool had_error;
+    /* Atoms for which we send a GetAtomName request */
+    struct {
+        xcb_atom_t from;
+        xkb_atom_t *out;
+        xcb_get_atom_name_cookie_t cookie;
+    } pending[128];
+    size_t num_pending;
+    /* Atoms which were already pending but queried again */
+    struct {
+        xcb_atom_t from;
+        xkb_atom_t *out;
+    } copies[128];
+    size_t num_copies;
+};
+
+void
+x11_atom_interner_init(struct x11_atom_interner *interner,
+                       struct xkb_context *ctx, xcb_connection_t *conn);
+
+void
+x11_atom_interner_round_trip(struct x11_atom_interner *interner);
+
 /*
- * Make a xkb_atom_t's from X atoms (prefer to send as many as possible
- * at once, to avoid many roundtrips).
- *
- * TODO: We can make this more flexible, such that @to doesn't have to
- *       be sequential. Then we can convert most adopt_atom() calls to
- *       adopt_atoms().
- *       Atom caching would also likely be useful for avoiding quite a
- *       few requests.
+ * Make a xkb_atom_t's from X atoms. The actual write is delayed until the next
+ * call to x11_atom_interner_round_trip() or when too many atoms are pending.
  */
-bool
-adopt_atoms(struct xkb_context *ctx, xcb_connection_t *conn,
-            const xcb_atom_t *from, xkb_atom_t *to, size_t count);
+void
+x11_atom_interner_adopt_atom(struct x11_atom_interner *interner,
+                             const xcb_atom_t atom, xkb_atom_t *out);
 
-bool
-adopt_atom(struct xkb_context *ctx, xcb_connection_t *conn, xcb_atom_t atom,
-           xkb_atom_t *out);
+
+void
+x11_atom_interner_adopt_atoms(struct x11_atom_interner *interner,
+                              const xcb_atom_t *from, xkb_atom_t *to,
+                              size_t count);
 
 #endif