bcache: Move insert_fixup() to btree_keys_ops
authorKent Overstreet <kmo@daterainc.com>
Tue, 12 Nov 2013 01:02:31 +0000 (17:02 -0800)
committerKent Overstreet <kmo@daterainc.com>
Wed, 8 Jan 2014 21:05:14 +0000 (13:05 -0800)
Now handling overlapping extents/keys is a method that's specific to what the
btree node contains.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
drivers/md/bcache/bset.c
drivers/md/bcache/bset.h
drivers/md/bcache/btree.c
drivers/md/bcache/extents.c

index 2ff75f3..4a71132 100644 (file)
@@ -676,6 +676,8 @@ void bch_bset_build_written_tree(struct btree_keys *b)
 }
 EXPORT_SYMBOL(bch_bset_build_written_tree);
 
+/* Insert */
+
 void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
 {
        struct bset_tree *t;
@@ -791,6 +793,54 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
 }
 EXPORT_SYMBOL(bch_bset_insert);
 
+unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
+                             struct bkey *replace_key)
+{
+       unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
+       struct bset *i = bset_tree_last(b)->data;
+       struct bkey *m, *prev = NULL;
+       struct btree_iter iter;
+
+       BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+
+       m = bch_btree_iter_init(b, &iter, b->ops->is_extents
+                               ? PRECEDING_KEY(&START_KEY(k))
+                               : PRECEDING_KEY(k));
+
+       if (b->ops->insert_fixup(b, k, &iter, replace_key))
+               return status;
+
+       status = BTREE_INSERT_STATUS_INSERT;
+
+       while (m != bset_bkey_last(i) &&
+              bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
+               prev = m, m = bkey_next(m);
+
+       /* prev is in the tree, if we merge we're done */
+       status = BTREE_INSERT_STATUS_BACK_MERGE;
+       if (prev &&
+           bch_bkey_try_merge(b, prev, k))
+               goto merged;
+#if 0
+       status = BTREE_INSERT_STATUS_OVERWROTE;
+       if (m != bset_bkey_last(i) &&
+           KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
+               goto copy;
+#endif
+       status = BTREE_INSERT_STATUS_FRONT_MERGE;
+       if (m != bset_bkey_last(i) &&
+           bch_bkey_try_merge(b, k, m))
+               goto copy;
+
+       bch_bset_insert(b, m, k);
+copy:  bkey_copy(m, k);
+merged:
+       return status;
+}
+EXPORT_SYMBOL(bch_btree_insert_key);
+
+/* Lookup */
+
 struct bset_search_iter {
        struct bkey *l, *r;
 };
index 4aa199d..759df83 100644 (file)
@@ -189,6 +189,8 @@ struct btree_keys_ops {
        bool            (*sort_cmp)(struct btree_iter_set,
                                    struct btree_iter_set);
        struct bkey     *(*sort_fixup)(struct btree_iter *, struct bkey *);
+       bool            (*insert_fixup)(struct btree_keys *, struct bkey *,
+                                       struct btree_iter *, struct bkey *);
        bool            (*key_invalid)(struct btree_keys *,
                                       const struct bkey *);
        bool            (*key_bad)(struct btree_keys *, const struct bkey *);
@@ -286,6 +288,16 @@ void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
 void bch_bset_build_written_tree(struct btree_keys *);
 void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
 void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
+unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
+                             struct bkey *);
+
+enum {
+       BTREE_INSERT_STATUS_NO_INSERT = 0,
+       BTREE_INSERT_STATUS_INSERT,
+       BTREE_INSERT_STATUS_BACK_MERGE,
+       BTREE_INSERT_STATUS_OVERWROTE,
+       BTREE_INSERT_STATUS_FRONT_MERGE,
+};
 
 /*
  * Tries to merge l and r: l should be lower than r
index b14f34a..463d280 100644 (file)
@@ -24,7 +24,6 @@
 #include "btree.h"
 #include "debug.h"
 #include "extents.h"
-#include "writeback.h"
 
 #include <linux/slab.h>
 #include <linux/bitops.h>
  * Test module load/unload
  */
 
-enum {
-       BTREE_INSERT_STATUS_INSERT,
-       BTREE_INSERT_STATUS_BACK_MERGE,
-       BTREE_INSERT_STATUS_OVERWROTE,
-       BTREE_INSERT_STATUS_FRONT_MERGE,
-};
-
 #define MAX_NEED_GC            64
 #define MAX_SAVE_PRIO          72
 
@@ -1792,230 +1784,23 @@ err:
 
 /* Btree insertion */
 
-static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
-                                   struct btree_iter *iter,
-                                   struct bkey *replace_key)
+static bool btree_insert_key(struct btree *b, struct bkey *k,
+                            struct bkey *replace_key)
 {
-       void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
-       {
-               if (KEY_DIRTY(k))
-                       bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
-                                                    offset, -sectors);
-       }
-
-       uint64_t old_offset;
-       unsigned old_size, sectors_found = 0;
-
-       while (1) {
-               struct bkey *k = bch_btree_iter_next(iter);
-               if (!k)
-                       break;
-
-               if (bkey_cmp(&START_KEY(k), insert) >= 0) {
-                       if (KEY_SIZE(k))
-                               break;
-                       else
-                               continue;
-               }
-
-               if (bkey_cmp(k, &START_KEY(insert)) <= 0)
-                       continue;
-
-               old_offset = KEY_START(k);
-               old_size = KEY_SIZE(k);
-
-               /*
-                * We might overlap with 0 size extents; we can't skip these
-                * because if they're in the set we're inserting to we have to
-                * adjust them so they don't overlap with the key we're
-                * inserting. But we don't want to check them for replace
-                * operations.
-                */
-
-               if (replace_key && KEY_SIZE(k)) {
-                       /*
-                        * k might have been split since we inserted/found the
-                        * key we're replacing
-                        */
-                       unsigned i;
-                       uint64_t offset = KEY_START(k) -
-                               KEY_START(replace_key);
-
-                       /* But it must be a subset of the replace key */
-                       if (KEY_START(k) < KEY_START(replace_key) ||
-                           KEY_OFFSET(k) > KEY_OFFSET(replace_key))
-                               goto check_failed;
-
-                       /* We didn't find a key that we were supposed to */
-                       if (KEY_START(k) > KEY_START(insert) + sectors_found)
-                               goto check_failed;
-
-                       if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
-                           KEY_DIRTY(k) != KEY_DIRTY(replace_key))
-                               goto check_failed;
-
-                       /* skip past gen */
-                       offset <<= 8;
-
-                       BUG_ON(!KEY_PTRS(replace_key));
-
-                       for (i = 0; i < KEY_PTRS(replace_key); i++)
-                               if (k->ptr[i] != replace_key->ptr[i] + offset)
-                                       goto check_failed;
-
-                       sectors_found = KEY_OFFSET(k) - KEY_START(insert);
-               }
-
-               if (bkey_cmp(insert, k) < 0 &&
-                   bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
-                       /*
-                        * We overlapped in the middle of an existing key: that
-                        * means we have to split the old key. But we have to do
-                        * slightly different things depending on whether the
-                        * old key has been written out yet.
-                        */
-
-                       struct bkey *top;
-
-                       subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
-
-                       if (bkey_written(&b->keys, k)) {
-                               /*
-                                * We insert a new key to cover the top of the
-                                * old key, and the old key is modified in place
-                                * to represent the bottom split.
-                                *
-                                * It's completely arbitrary whether the new key
-                                * is the top or the bottom, but it has to match
-                                * up with what btree_sort_fixup() does - it
-                                * doesn't check for this kind of overlap, it
-                                * depends on us inserting a new key for the top
-                                * here.
-                                */
-                               top = bch_bset_search(&b->keys,
-                                                     bset_tree_last(&b->keys),
-                                                     insert);
-                               bch_bset_insert(&b->keys, top, k);
-                       } else {
-                               BKEY_PADDED(key) temp;
-                               bkey_copy(&temp.key, k);
-                               bch_bset_insert(&b->keys, k, &temp.key);
-                               top = bkey_next(k);
-                       }
-
-                       bch_cut_front(insert, top);
-                       bch_cut_back(&START_KEY(insert), k);
-                       bch_bset_fix_invalidated_key(&b->keys, k);
-                       return false;
-               }
-
-               if (bkey_cmp(insert, k) < 0) {
-                       bch_cut_front(insert, k);
-               } else {
-                       if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
-                               old_offset = KEY_START(insert);
-
-                       if (bkey_written(&b->keys, k) &&
-                           bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
-                               /*
-                                * Completely overwrote, so we don't have to
-                                * invalidate the binary search tree
-                                */
-                               bch_cut_front(k, k);
-                       } else {
-                               __bch_cut_back(&START_KEY(insert), k);
-                               bch_bset_fix_invalidated_key(&b->keys, k);
-                       }
-               }
-
-               subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
-       }
-
-check_failed:
-       if (replace_key) {
-               if (!sectors_found) {
-                       return true;
-               } else if (sectors_found < KEY_SIZE(insert)) {
-                       SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
-                                      (KEY_SIZE(insert) - sectors_found));
-                       SET_KEY_SIZE(insert, sectors_found);
-               }
-       }
-
-       return false;
-}
-
-static bool btree_insert_key(struct btree *b, struct btree_op *op,
-                            struct bkey *k, struct bkey *replace_key)
-{
-       struct bset *i = btree_bset_last(b);
-       struct bkey *m, *prev;
-       unsigned status = BTREE_INSERT_STATUS_INSERT;
+       unsigned status;
 
        BUG_ON(bkey_cmp(k, &b->key) > 0);
-       BUG_ON(b->level && !KEY_PTRS(k));
-       BUG_ON(!b->level && !KEY_OFFSET(k));
 
-       if (!b->level) {
-               struct btree_iter iter;
+       status = bch_btree_insert_key(&b->keys, k, replace_key);
+       if (status != BTREE_INSERT_STATUS_NO_INSERT) {
+               bch_check_keys(&b->keys, "%u for %s", status,
+                              replace_key ? "replace" : "insert");
 
-               /*
-                * bset_search() returns the first key that is strictly greater
-                * than the search key - but for back merging, we want to find
-                * the previous key.
-                */
-               prev = NULL;
-               m = bch_btree_iter_init(&b->keys, &iter,
-                                       PRECEDING_KEY(&START_KEY(k)));
-
-               if (fix_overlapping_extents(b, k, &iter, replace_key)) {
-                       op->insert_collision = true;
-                       return false;
-               }
-
-               if (KEY_DIRTY(k))
-                       bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
-                                                    KEY_START(k), KEY_SIZE(k));
-
-               while (m != bset_bkey_last(i) &&
-                      bkey_cmp(k, &START_KEY(m)) > 0)
-                       prev = m, m = bkey_next(m);
-
-               if (key_merging_disabled(b->c))
-                       goto insert;
-
-               /* prev is in the tree, if we merge we're done */
-               status = BTREE_INSERT_STATUS_BACK_MERGE;
-               if (prev &&
-                   bch_bkey_try_merge(&b->keys, prev, k))
-                       goto merged;
-
-               status = BTREE_INSERT_STATUS_OVERWROTE;
-               if (m != bset_bkey_last(i) &&
-                   KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
-                       goto copy;
-
-               status = BTREE_INSERT_STATUS_FRONT_MERGE;
-               if (m != bset_bkey_last(i) &&
-                   bch_bkey_try_merge(&b->keys, k, m))
-                       goto copy;
-       } else {
-               BUG_ON(replace_key);
-               m = bch_bset_search(&b->keys, bset_tree_last(&b->keys), k);
-       }
-
-insert:        bch_bset_insert(&b->keys, m, k);
-copy:  bkey_copy(m, k);
-merged:
-       bch_check_keys(&b->keys, "%u for %s", status,
-                      replace_key ? "replace" : "insert");
-
-       if (b->level && !KEY_OFFSET(k))
-               btree_current_write(b)->prio_blocked++;
-
-       trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
-
-       return true;
+               trace_bcache_btree_insert_key(b, k, replace_key != NULL,
+                                             status);
+               return true;
+       } else
+               return false;
 }
 
 static size_t insert_u64s_remaining(struct btree *b)
@@ -2048,7 +1833,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
                        if (!b->level)
                                bkey_put(b->c, k);
 
-                       ret |= btree_insert_key(b, op, k, replace_key);
+                       ret |= btree_insert_key(b, k, replace_key);
                        bch_keylist_pop_front(insert_keys);
                } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
                        BKEY_PADDED(key) temp;
@@ -2057,13 +1842,16 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
                        bch_cut_back(&b->key, &temp.key);
                        bch_cut_front(&b->key, insert_keys->keys);
 
-                       ret |= btree_insert_key(b, op, &temp.key, replace_key);
+                       ret |= btree_insert_key(b, &temp.key, replace_key);
                        break;
                } else {
                        break;
                }
        }
 
+       if (!ret)
+               op->insert_collision = true;
+
        BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
 
        BUG_ON(bch_count_data(&b->keys) < oldsize);
index bc1c3ee..d6de3c7 100644 (file)
@@ -222,8 +222,22 @@ static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
        return false;
 }
 
+static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
+                                      struct bkey *insert,
+                                      struct btree_iter *iter,
+                                      struct bkey *replace_key)
+{
+       struct btree *b = container_of(bk, struct btree, keys);
+
+       if (!KEY_OFFSET(insert))
+               btree_current_write(b)->prio_blocked++;
+
+       return false;
+}
+
 const struct btree_keys_ops bch_btree_keys_ops = {
        .sort_cmp       = bch_key_sort_cmp,
+       .insert_fixup   = bch_btree_ptr_insert_fixup,
        .key_invalid    = bch_btree_ptr_invalid,
        .key_bad        = bch_btree_ptr_bad,
        .key_to_text    = bch_extent_to_text,
@@ -294,6 +308,169 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
        return NULL;
 }
 
+static bool bch_extent_insert_fixup(struct btree_keys *b,
+                                   struct bkey *insert,
+                                   struct btree_iter *iter,
+                                   struct bkey *replace_key)
+{
+       struct cache_set *c = container_of(b, struct btree, keys)->c;
+
+       void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
+       {
+               if (KEY_DIRTY(k))
+                       bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
+                                                    offset, -sectors);
+       }
+
+       uint64_t old_offset;
+       unsigned old_size, sectors_found = 0;
+
+       BUG_ON(!KEY_OFFSET(insert));
+       BUG_ON(!KEY_SIZE(insert));
+
+       while (1) {
+               struct bkey *k = bch_btree_iter_next(iter);
+               if (!k)
+                       break;
+
+               if (bkey_cmp(&START_KEY(k), insert) >= 0) {
+                       if (KEY_SIZE(k))
+                               break;
+                       else
+                               continue;
+               }
+
+               if (bkey_cmp(k, &START_KEY(insert)) <= 0)
+                       continue;
+
+               old_offset = KEY_START(k);
+               old_size = KEY_SIZE(k);
+
+               /*
+                * We might overlap with 0 size extents; we can't skip these
+                * because if they're in the set we're inserting to we have to
+                * adjust them so they don't overlap with the key we're
+                * inserting. But we don't want to check them for replace
+                * operations.
+                */
+
+               if (replace_key && KEY_SIZE(k)) {
+                       /*
+                        * k might have been split since we inserted/found the
+                        * key we're replacing
+                        */
+                       unsigned i;
+                       uint64_t offset = KEY_START(k) -
+                               KEY_START(replace_key);
+
+                       /* But it must be a subset of the replace key */
+                       if (KEY_START(k) < KEY_START(replace_key) ||
+                           KEY_OFFSET(k) > KEY_OFFSET(replace_key))
+                               goto check_failed;
+
+                       /* We didn't find a key that we were supposed to */
+                       if (KEY_START(k) > KEY_START(insert) + sectors_found)
+                               goto check_failed;
+
+                       if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
+                           KEY_DIRTY(k) != KEY_DIRTY(replace_key))
+                               goto check_failed;
+
+                       /* skip past gen */
+                       offset <<= 8;
+
+                       BUG_ON(!KEY_PTRS(replace_key));
+
+                       for (i = 0; i < KEY_PTRS(replace_key); i++)
+                               if (k->ptr[i] != replace_key->ptr[i] + offset)
+                                       goto check_failed;
+
+                       sectors_found = KEY_OFFSET(k) - KEY_START(insert);
+               }
+
+               if (bkey_cmp(insert, k) < 0 &&
+                   bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
+                       /*
+                        * We overlapped in the middle of an existing key: that
+                        * means we have to split the old key. But we have to do
+                        * slightly different things depending on whether the
+                        * old key has been written out yet.
+                        */
+
+                       struct bkey *top;
+
+                       subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
+
+                       if (bkey_written(b, k)) {
+                               /*
+                                * We insert a new key to cover the top of the
+                                * old key, and the old key is modified in place
+                                * to represent the bottom split.
+                                *
+                                * It's completely arbitrary whether the new key
+                                * is the top or the bottom, but it has to match
+                                * up with what btree_sort_fixup() does - it
+                                * doesn't check for this kind of overlap, it
+                                * depends on us inserting a new key for the top
+                                * here.
+                                */
+                               top = bch_bset_search(b, bset_tree_last(b),
+                                                     insert);
+                               bch_bset_insert(b, top, k);
+                       } else {
+                               BKEY_PADDED(key) temp;
+                               bkey_copy(&temp.key, k);
+                               bch_bset_insert(b, k, &temp.key);
+                               top = bkey_next(k);
+                       }
+
+                       bch_cut_front(insert, top);
+                       bch_cut_back(&START_KEY(insert), k);
+                       bch_bset_fix_invalidated_key(b, k);
+                       goto out;
+               }
+
+               if (bkey_cmp(insert, k) < 0) {
+                       bch_cut_front(insert, k);
+               } else {
+                       if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
+                               old_offset = KEY_START(insert);
+
+                       if (bkey_written(b, k) &&
+                           bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
+                               /*
+                                * Completely overwrote, so we don't have to
+                                * invalidate the binary search tree
+                                */
+                               bch_cut_front(k, k);
+                       } else {
+                               __bch_cut_back(&START_KEY(insert), k);
+                               bch_bset_fix_invalidated_key(b, k);
+                       }
+               }
+
+               subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
+       }
+
+check_failed:
+       if (replace_key) {
+               if (!sectors_found) {
+                       return true;
+               } else if (sectors_found < KEY_SIZE(insert)) {
+                       SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
+                                      (KEY_SIZE(insert) - sectors_found));
+                       SET_KEY_SIZE(insert, sectors_found);
+               }
+       }
+out:
+       if (KEY_DIRTY(insert))
+               bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
+                                            KEY_START(insert),
+                                            KEY_SIZE(insert));
+
+       return false;
+}
+
 static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
 {
        struct btree *b = container_of(bk, struct btree, keys);
@@ -435,6 +612,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
 const struct btree_keys_ops bch_extent_keys_ops = {
        .sort_cmp       = bch_extent_sort_cmp,
        .sort_fixup     = bch_extent_sort_fixup,
+       .insert_fixup   = bch_extent_insert_fixup,
        .key_invalid    = bch_extent_invalid,
        .key_bad        = bch_extent_bad,
        .key_merge      = bch_extent_merge,