md: always set MD_RECOVERY_INTR when aborting a reshape or other "resync".
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / bcache / super.c
index 93d593f..24a3a15 100644 (file)
@@ -9,6 +9,7 @@
 #include "bcache.h"
 #include "btree.h"
 #include "debug.h"
+#include "extents.h"
 #include "request.h"
 #include "writeback.h"
 
@@ -225,7 +226,7 @@ static void write_bdev_super_endio(struct bio *bio, int error)
        struct cached_dev *dc = bio->bi_private;
        /* XXX: error checking */
 
-       closure_put(&dc->sb_write.cl);
+       closure_put(&dc->sb_write);
 }
 
 static void __write_super(struct cache_sb *sb, struct bio *bio)
@@ -263,12 +264,20 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
        submit_bio(REQ_WRITE, bio);
 }
 
+static void bch_write_bdev_super_unlock(struct closure *cl)
+{
+       struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
+
+       up(&dc->sb_write_mutex);
+}
+
 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
 {
-       struct closure *cl = &dc->sb_write.cl;
+       struct closure *cl = &dc->sb_write;
        struct bio *bio = &dc->sb_bio;
 
-       closure_lock(&dc->sb_write, parent);
+       down(&dc->sb_write_mutex);
+       closure_init(cl, parent);
 
        bio_reset(bio);
        bio->bi_bdev    = dc->bdev;
@@ -278,7 +287,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
        closure_get(cl);
        __write_super(&dc->sb, bio);
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
 }
 
 static void write_super_endio(struct bio *bio, int error)
@@ -286,16 +295,24 @@ static void write_super_endio(struct bio *bio, int error)
        struct cache *ca = bio->bi_private;
 
        bch_count_io_errors(ca, error, "writing superblock");
-       closure_put(&ca->set->sb_write.cl);
+       closure_put(&ca->set->sb_write);
+}
+
+static void bcache_write_super_unlock(struct closure *cl)
+{
+       struct cache_set *c = container_of(cl, struct cache_set, sb_write);
+
+       up(&c->sb_write_mutex);
 }
 
 void bcache_write_super(struct cache_set *c)
 {
-       struct closure *cl = &c->sb_write.cl;
+       struct closure *cl = &c->sb_write;
        struct cache *ca;
        unsigned i;
 
-       closure_lock(&c->sb_write, &c->cl);
+       down(&c->sb_write_mutex);
+       closure_init(cl, &c->cl);
 
        c->sb.seq++;
 
@@ -317,7 +334,7 @@ void bcache_write_super(struct cache_set *c)
                __write_super(&ca->sb, bio);
        }
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, bcache_write_super_unlock);
 }
 
 /* UUID io */
@@ -325,23 +342,31 @@ void bcache_write_super(struct cache_set *c)
 static void uuid_endio(struct bio *bio, int error)
 {
        struct closure *cl = bio->bi_private;
-       struct cache_set *c = container_of(cl, struct cache_set, uuid_write.cl);
+       struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
 
        cache_set_err_on(error, c, "accessing uuids");
        bch_bbio_free(bio, c);
        closure_put(cl);
 }
 
+static void uuid_io_unlock(struct closure *cl)
+{
+       struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
+
+       up(&c->uuid_write_mutex);
+}
+
 static void uuid_io(struct cache_set *c, unsigned long rw,
                    struct bkey *k, struct closure *parent)
 {
-       struct closure *cl = &c->uuid_write.cl;
+       struct closure *cl = &c->uuid_write;
        struct uuid_entry *u;
        unsigned i;
        char buf[80];
 
        BUG_ON(!parent);
-       closure_lock(&c->uuid_write, parent);
+       down(&c->uuid_write_mutex);
+       closure_init(cl, parent);
 
        for (i = 0; i < KEY_PTRS(k); i++) {
                struct bio *bio = bch_bbio_alloc(c);
@@ -359,7 +384,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
                        break;
        }
 
-       bch_bkey_to_text(buf, sizeof(buf), k);
+       bch_extent_to_text(buf, sizeof(buf), k);
        pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
 
        for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
@@ -368,14 +393,14 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
                                 u - c->uuids, u->uuid, u->label,
                                 u->first_reg, u->last_reg, u->invalidated);
 
-       closure_return(cl);
+       closure_return_with_destructor(cl, uuid_io_unlock);
 }
 
 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
 {
        struct bkey *k = &j->uuid_bucket;
 
-       if (bch_btree_ptr_invalid(c, k))
+       if (__bch_btree_ptr_invalid(c, k))
                return "bad uuid pointer";
 
        bkey_copy(&c->uuid_bucket, k);
@@ -420,7 +445,7 @@ static int __uuid_write(struct cache_set *c)
 
        lockdep_assert_held(&bch_register_lock);
 
-       if (bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
+       if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
                return 1;
 
        SET_KEY_SIZE(&k.key, c->sb.bucket_size);
@@ -538,8 +563,8 @@ void bch_prio_write(struct cache *ca)
        atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
                        &ca->meta_sectors_written);
 
-       pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
-                fifo_used(&ca->free_inc), fifo_used(&ca->unused));
+       //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
+       //       fifo_used(&ca->free_inc), fifo_used(&ca->unused));
 
        for (i = prio_buckets(ca) - 1; i >= 0; --i) {
                long bucket;
@@ -558,7 +583,7 @@ void bch_prio_write(struct cache *ca)
                p->magic        = pset_magic(&ca->sb);
                p->csum         = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
 
-               bucket = bch_bucket_alloc(ca, WATERMARK_PRIO, true);
+               bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
                BUG_ON(bucket == -1);
 
                mutex_unlock(&ca->set->bucket_lock);
@@ -1098,7 +1123,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
        set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
        kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
        INIT_WORK(&dc->detach, cached_dev_detach_finish);
-       closure_init_unlocked(&dc->sb_write);
+       sema_init(&dc->sb_write_mutex, 1);
        INIT_LIST_HEAD(&dc->io_lru);
        spin_lock_init(&dc->io_lock);
        bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
@@ -1110,6 +1135,12 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
                hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
        }
 
+       dc->disk.stripe_size = q->limits.io_opt >> 9;
+
+       if (dc->disk.stripe_size)
+               dc->partial_stripes_expensive =
+                       q->limits.raid_partial_stripes_expensive;
+
        ret = bcache_device_init(&dc->disk, block_size,
                         dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
        if (ret)
@@ -1321,8 +1352,8 @@ static void cache_set_free(struct closure *cl)
                if (ca)
                        kobject_put(&ca->kobj);
 
+       bch_bset_sort_state_free(&c->sort);
        free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
-       free_pages((unsigned long) c->sort, ilog2(bucket_pages(c)));
 
        if (c->bio_split)
                bioset_free(c->bio_split);
@@ -1447,21 +1478,17 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
        c->block_bits           = ilog2(sb->block_size);
        c->nr_uuids             = bucket_bytes(c) / sizeof(struct uuid_entry);
 
-       c->btree_pages          = c->sb.bucket_size / PAGE_SECTORS;
+       c->btree_pages          = bucket_pages(c);
        if (c->btree_pages > BTREE_MAX_PAGES)
                c->btree_pages = max_t(int, c->btree_pages / 4,
                                       BTREE_MAX_PAGES);
 
-       c->sort_crit_factor = int_sqrt(c->btree_pages);
-
-       closure_init_unlocked(&c->sb_write);
+       sema_init(&c->sb_write_mutex, 1);
        mutex_init(&c->bucket_lock);
        init_waitqueue_head(&c->try_wait);
        init_waitqueue_head(&c->bucket_wait);
-       closure_init_unlocked(&c->uuid_write);
-       mutex_init(&c->sort_lock);
+       sema_init(&c->uuid_write_mutex, 1);
 
-       spin_lock_init(&c->sort_time.lock);
        spin_lock_init(&c->btree_gc_time.lock);
        spin_lock_init(&c->btree_split_time.lock);
        spin_lock_init(&c->btree_read_time.lock);
@@ -1489,11 +1516,11 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
                                bucket_pages(c))) ||
            !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
            !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
-           !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) ||
            !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
            bch_journal_alloc(c) ||
            bch_btree_cache_alloc(c) ||
-           bch_open_buckets_alloc(c))
+           bch_open_buckets_alloc(c) ||
+           bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
                goto err;
 
        c->congested_read_threshold_us  = 2000;
@@ -1549,7 +1576,7 @@ static void run_cache_set(struct cache_set *c)
                k = &j->btree_root;
 
                err = "bad btree root";
-               if (bch_btree_ptr_invalid(c, k))
+               if (__bch_btree_ptr_invalid(c, k))
                        goto err;
 
                err = "error reading btree root";
@@ -1743,6 +1770,7 @@ err:
 void bch_cache_release(struct kobject *kobj)
 {
        struct cache *ca = container_of(kobj, struct cache, kobj);
+       unsigned i;
 
        if (ca->set)
                ca->set->cache[ca->sb.nr_this_dev] = NULL;
@@ -1756,7 +1784,9 @@ void bch_cache_release(struct kobject *kobj)
        free_heap(&ca->heap);
        free_fifo(&ca->unused);
        free_fifo(&ca->free_inc);
-       free_fifo(&ca->free);
+
+       for (i = 0; i < RESERVE_NR; i++)
+               free_fifo(&ca->free[i]);
 
        if (ca->sb_bio.bi_inline_vecs[0].bv_page)
                put_page(ca->sb_bio.bi_io_vec[0].bv_page);
@@ -1782,10 +1812,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
        ca->journal.bio.bi_max_vecs = 8;
        ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
 
-       free = roundup_pow_of_two(ca->sb.nbuckets) >> 9;
-       free = max_t(size_t, free, (prio_buckets(ca) + 8) * 2);
+       free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
 
-       if (!init_fifo(&ca->free,       free, GFP_KERNEL) ||
+       if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
+           !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+           !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
+           !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
            !init_fifo(&ca->free_inc,   free << 2, GFP_KERNEL) ||
            !init_fifo(&ca->unused,     free << 2, GFP_KERNEL) ||
            !init_heap(&ca->heap,       free << 3, GFP_KERNEL) ||
@@ -2030,7 +2062,8 @@ static void bcache_exit(void)
                kobject_put(bcache_kobj);
        if (bcache_wq)
                destroy_workqueue(bcache_wq);
-       unregister_blkdev(bcache_major, "bcache");
+       if (bcache_major)
+               unregister_blkdev(bcache_major, "bcache");
        unregister_reboot_notifier(&reboot);
 }