dm thin: dont use map_context
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / md / dm-thin.c
index c29410a..e7743c6 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include "dm-thin-metadata.h"
+#include "dm-bio-prison.h"
 #include "dm.h"
 
 #include <linux/device-mapper.h>
@@ -21,7 +22,6 @@
  * Tunable constants
  */
 #define ENDIO_HOOK_POOL_SIZE 1024
-#define DEFERRED_SET_SIZE 64
 #define MAPPING_POOL_SIZE 1024
 #define PRISON_CELLS 1024
 #define COMMIT_PERIOD HZ
@@ -58,7 +58,7 @@
  * i) plug io further to this physical block. (see bio_prison code).
  *
  * ii) quiesce any read io to that shared data block.  Obviously
- * including all devices that share this block.  (see deferred_set code)
+ * including all devices that share this block.  (see dm_deferred_set code)
  *
  * iii) copy the data block to a newly allocate block.  This step can be
  * missed out if the io covers the block. (schedule_copy).
 /*----------------------------------------------------------------*/
 
 /*
- * Sometimes we can't deal with a bio straight away.  We put them in prison
- * where they can't cause any mischief.  Bios are put in a cell identified
- * by a key, multiple bios can be in the same cell.  When the cell is
- * subsequently unlocked the bios become available.
- */
-struct bio_prison;
-
-struct cell_key {
-       int virtual;
-       dm_thin_id dev;
-       dm_block_t block;
-};
-
-struct dm_bio_prison_cell {
-       struct hlist_node list;
-       struct bio_prison *prison;
-       struct cell_key key;
-       struct bio *holder;
-       struct bio_list bios;
-};
-
-struct bio_prison {
-       spinlock_t lock;
-       mempool_t *cell_pool;
-
-       unsigned nr_buckets;
-       unsigned hash_mask;
-       struct hlist_head *cells;
-};
-
-static uint32_t calc_nr_buckets(unsigned nr_cells)
-{
-       uint32_t n = 128;
-
-       nr_cells /= 4;
-       nr_cells = min(nr_cells, 8192u);
-
-       while (n < nr_cells)
-               n <<= 1;
-
-       return n;
-}
-
-static struct kmem_cache *_cell_cache;
-
-/*
- * @nr_cells should be the number of cells you want in use _concurrently_.
- * Don't confuse it with the number of distinct keys.
- */
-static struct bio_prison *prison_create(unsigned nr_cells)
-{
-       unsigned i;
-       uint32_t nr_buckets = calc_nr_buckets(nr_cells);
-       size_t len = sizeof(struct bio_prison) +
-               (sizeof(struct hlist_head) * nr_buckets);
-       struct bio_prison *prison = kmalloc(len, GFP_KERNEL);
-
-       if (!prison)
-               return NULL;
-
-       spin_lock_init(&prison->lock);
-       prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
-       if (!prison->cell_pool) {
-               kfree(prison);
-               return NULL;
-       }
-
-       prison->nr_buckets = nr_buckets;
-       prison->hash_mask = nr_buckets - 1;
-       prison->cells = (struct hlist_head *) (prison + 1);
-       for (i = 0; i < nr_buckets; i++)
-               INIT_HLIST_HEAD(prison->cells + i);
-
-       return prison;
-}
-
-static void prison_destroy(struct bio_prison *prison)
-{
-       mempool_destroy(prison->cell_pool);
-       kfree(prison);
-}
-
-static uint32_t hash_key(struct bio_prison *prison, struct cell_key *key)
-{
-       const unsigned long BIG_PRIME = 4294967291UL;
-       uint64_t hash = key->block * BIG_PRIME;
-
-       return (uint32_t) (hash & prison->hash_mask);
-}
-
-static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
-{
-              return (lhs->virtual == rhs->virtual) &&
-                      (lhs->dev == rhs->dev) &&
-                      (lhs->block == rhs->block);
-}
-
-static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
-                                                 struct cell_key *key)
-{
-       struct dm_bio_prison_cell *cell;
-       struct hlist_node *tmp;
-
-       hlist_for_each_entry(cell, tmp, bucket, list)
-               if (keys_equal(&cell->key, key))
-                       return cell;
-
-       return NULL;
-}
-
-/*
- * This may block if a new cell needs allocating.  You must ensure that
- * cells will be unlocked even if the calling thread is blocked.
- *
- * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
- */
-static int bio_detain(struct bio_prison *prison, struct cell_key *key,
-                     struct bio *inmate, struct dm_bio_prison_cell **ref)
-{
-       int r = 1;
-       unsigned long flags;
-       uint32_t hash = hash_key(prison, key);
-       struct dm_bio_prison_cell *cell, *cell2;
-
-       BUG_ON(hash > prison->nr_buckets);
-
-       spin_lock_irqsave(&prison->lock, flags);
-
-       cell = __search_bucket(prison->cells + hash, key);
-       if (cell) {
-               bio_list_add(&cell->bios, inmate);
-               goto out;
-       }
-
-       /*
-        * Allocate a new cell
-        */
-       spin_unlock_irqrestore(&prison->lock, flags);
-       cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
-       spin_lock_irqsave(&prison->lock, flags);
-
-       /*
-        * We've been unlocked, so we have to double check that
-        * nobody else has inserted this cell in the meantime.
-        */
-       cell = __search_bucket(prison->cells + hash, key);
-       if (cell) {
-               mempool_free(cell2, prison->cell_pool);
-               bio_list_add(&cell->bios, inmate);
-               goto out;
-       }
-
-       /*
-        * Use new cell.
-        */
-       cell = cell2;
-
-       cell->prison = prison;
-       memcpy(&cell->key, key, sizeof(cell->key));
-       cell->holder = inmate;
-       bio_list_init(&cell->bios);
-       hlist_add_head(&cell->list, prison->cells + hash);
-
-       r = 0;
-
-out:
-       spin_unlock_irqrestore(&prison->lock, flags);
-
-       *ref = cell;
-
-       return r;
-}
-
-/*
- * @inmates must have been initialised prior to this call
- */
-static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
-{
-       struct bio_prison *prison = cell->prison;
-
-       hlist_del(&cell->list);
-
-       if (inmates) {
-               bio_list_add(inmates, cell->holder);
-               bio_list_merge(inmates, &cell->bios);
-       }
-
-       mempool_free(cell, prison->cell_pool);
-}
-
-static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
-{
-       unsigned long flags;
-       struct bio_prison *prison = cell->prison;
-
-       spin_lock_irqsave(&prison->lock, flags);
-       __cell_release(cell, bios);
-       spin_unlock_irqrestore(&prison->lock, flags);
-}
-
-/*
- * There are a couple of places where we put a bio into a cell briefly
- * before taking it out again.  In these situations we know that no other
- * bio may be in the cell.  This function releases the cell, and also does
- * a sanity check.
- */
-static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
-       BUG_ON(cell->holder != bio);
-       BUG_ON(!bio_list_empty(&cell->bios));
-
-       __cell_release(cell, NULL);
-}
-
-static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
-       unsigned long flags;
-       struct bio_prison *prison = cell->prison;
-
-       spin_lock_irqsave(&prison->lock, flags);
-       __cell_release_singleton(cell, bio);
-       spin_unlock_irqrestore(&prison->lock, flags);
-}
-
-/*
- * Sometimes we don't want the holder, just the additional bios.
- */
-static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
-                                    struct bio_list *inmates)
-{
-       struct bio_prison *prison = cell->prison;
-
-       hlist_del(&cell->list);
-       bio_list_merge(inmates, &cell->bios);
-
-       mempool_free(cell, prison->cell_pool);
-}
-
-static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
-                                  struct bio_list *inmates)
-{
-       unsigned long flags;
-       struct bio_prison *prison = cell->prison;
-
-       spin_lock_irqsave(&prison->lock, flags);
-       __cell_release_no_holder(cell, inmates);
-       spin_unlock_irqrestore(&prison->lock, flags);
-}
-
-static void cell_error(struct dm_bio_prison_cell *cell)
-{
-       struct bio_prison *prison = cell->prison;
-       struct bio_list bios;
-       struct bio *bio;
-       unsigned long flags;
-
-       bio_list_init(&bios);
-
-       spin_lock_irqsave(&prison->lock, flags);
-       __cell_release(cell, &bios);
-       spin_unlock_irqrestore(&prison->lock, flags);
-
-       while ((bio = bio_list_pop(&bios)))
-               bio_io_error(bio);
-}
-
-/*----------------------------------------------------------------*/
-
-/*
- * We use the deferred set to keep track of pending reads to shared blocks.
- * We do this to ensure the new mapping caused by a write isn't performed
- * until these prior reads have completed.  Otherwise the insertion of the
- * new mapping could free the old block that the read bios are mapped to.
- */
-
-struct deferred_set;
-struct deferred_entry {
-       struct deferred_set *ds;
-       unsigned count;
-       struct list_head work_items;
-};
-
-struct deferred_set {
-       spinlock_t lock;
-       unsigned current_entry;
-       unsigned sweeper;
-       struct deferred_entry entries[DEFERRED_SET_SIZE];
-};
-
-static void ds_init(struct deferred_set *ds)
-{
-       int i;
-
-       spin_lock_init(&ds->lock);
-       ds->current_entry = 0;
-       ds->sweeper = 0;
-       for (i = 0; i < DEFERRED_SET_SIZE; i++) {
-               ds->entries[i].ds = ds;
-               ds->entries[i].count = 0;
-               INIT_LIST_HEAD(&ds->entries[i].work_items);
-       }
-}
-
-static struct deferred_entry *ds_inc(struct deferred_set *ds)
-{
-       unsigned long flags;
-       struct deferred_entry *entry;
-
-       spin_lock_irqsave(&ds->lock, flags);
-       entry = ds->entries + ds->current_entry;
-       entry->count++;
-       spin_unlock_irqrestore(&ds->lock, flags);
-
-       return entry;
-}
-
-static unsigned ds_next(unsigned index)
-{
-       return (index + 1) % DEFERRED_SET_SIZE;
-}
-
-static void __sweep(struct deferred_set *ds, struct list_head *head)
-{
-       while ((ds->sweeper != ds->current_entry) &&
-              !ds->entries[ds->sweeper].count) {
-               list_splice_init(&ds->entries[ds->sweeper].work_items, head);
-               ds->sweeper = ds_next(ds->sweeper);
-       }
-
-       if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
-               list_splice_init(&ds->entries[ds->sweeper].work_items, head);
-}
-
-static void ds_dec(struct deferred_entry *entry, struct list_head *head)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&entry->ds->lock, flags);
-       BUG_ON(!entry->count);
-       --entry->count;
-       __sweep(entry->ds, head);
-       spin_unlock_irqrestore(&entry->ds->lock, flags);
-}
-
-/*
- * Returns 1 if deferred or 0 if no pending items to delay job.
- */
-static int ds_add_work(struct deferred_set *ds, struct list_head *work)
-{
-       int r = 1;
-       unsigned long flags;
-       unsigned next_entry;
-
-       spin_lock_irqsave(&ds->lock, flags);
-       if ((ds->sweeper == ds->current_entry) &&
-           !ds->entries[ds->current_entry].count)
-               r = 0;
-       else {
-               list_add(work, &ds->entries[ds->current_entry].work_items);
-               next_entry = ds_next(ds->current_entry);
-               if (!ds->entries[next_entry].count)
-                       ds->current_entry = next_entry;
-       }
-       spin_unlock_irqrestore(&ds->lock, flags);
-
-       return r;
-}
-
-/*----------------------------------------------------------------*/
-
-/*
  * Key building.
  */
 static void build_data_key(struct dm_thin_device *td,
-                          dm_block_t b, struct cell_key *key)
+                          dm_block_t b, struct dm_cell_key *key)
 {
        key->virtual = 0;
        key->dev = dm_thin_dev_id(td);
@@ -481,7 +110,7 @@ static void build_data_key(struct dm_thin_device *td,
 }
 
 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
-                             struct cell_key *key)
+                             struct dm_cell_key *key)
 {
        key->virtual = 1;
        key->dev = dm_thin_dev_id(td);
@@ -534,7 +163,7 @@ struct pool {
        unsigned low_water_triggered:1; /* A dm event has been sent */
        unsigned no_free_space:1;       /* A -ENOSPC warning has been issued */
 
-       struct bio_prison *prison;
+       struct dm_bio_prison *prison;
        struct dm_kcopyd_client *copier;
 
        struct workqueue_struct *wq;
@@ -552,12 +181,11 @@ struct pool {
 
        struct bio_list retry_on_resume_list;
 
-       struct deferred_set shared_read_ds;
-       struct deferred_set all_io_ds;
+       struct dm_deferred_set *shared_read_ds;
+       struct dm_deferred_set *all_io_ds;
 
        struct dm_thin_new_mapping *next_mapping;
        mempool_t *mapping_pool;
-       mempool_t *endio_hook_pool;
 
        process_bio_fn process_bio;
        process_bio_fn process_discard;
@@ -660,8 +288,8 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
 
 struct dm_thin_endio_hook {
        struct thin_c *tc;
-       struct deferred_entry *shared_read_entry;
-       struct deferred_entry *all_io_entry;
+       struct dm_deferred_entry *shared_read_entry;
+       struct dm_deferred_entry *all_io_entry;
        struct dm_thin_new_mapping *overwrite_mapping;
 };
 
@@ -675,7 +303,7 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
        bio_list_init(master);
 
        while ((bio = bio_list_pop(&bios))) {
-               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 
                if (h->tc == tc)
                        bio_endio(bio, DM_ENDIO_REQUEUE);
@@ -739,6 +367,17 @@ static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
                dm_thin_changed_this_transaction(tc->td);
 }
 
+static void inc_all_io_entry(struct pool *pool, struct bio *bio)
+{
+       struct dm_thin_endio_hook *h;
+
+       if (bio->bi_rw & REQ_DISCARD)
+               return;
+
+       h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
+       h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
+}
+
 static void issue(struct thin_c *tc, struct bio *bio)
 {
        struct pool *pool = tc->pool;
@@ -845,7 +484,7 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
 static void overwrite_endio(struct bio *bio, int err)
 {
        unsigned long flags;
-       struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+       struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
        struct dm_thin_new_mapping *m = h->overwrite_mapping;
        struct pool *pool = m->tc->pool;
 
@@ -870,33 +509,28 @@ static void overwrite_endio(struct bio *bio, int err)
 /*
  * This sends the bios in the cell back to the deferred_bios list.
  */
-static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
-                      dm_block_t data_block)
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 {
        struct pool *pool = tc->pool;
        unsigned long flags;
 
        spin_lock_irqsave(&pool->lock, flags);
-       cell_release(cell, &pool->deferred_bios);
+       dm_cell_release(cell, &pool->deferred_bios);
        spin_unlock_irqrestore(&tc->pool->lock, flags);
 
        wake_worker(pool);
 }
 
 /*
- * Same as cell_defer above, except it omits one particular detainee,
- * a write bio that covers the block and has already been processed.
+ * Same as cell_defer except it omits the original holder of the cell.
  */
-static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
+static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 {
-       struct bio_list bios;
        struct pool *pool = tc->pool;
        unsigned long flags;
 
-       bio_list_init(&bios);
-
        spin_lock_irqsave(&pool->lock, flags);
-       cell_release_no_holder(cell, &pool->deferred_bios);
+       dm_cell_release_no_holder(cell, &pool->deferred_bios);
        spin_unlock_irqrestore(&pool->lock, flags);
 
        wake_worker(pool);
@@ -906,7 +540,7 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
 {
        if (m->bio)
                m->bio->bi_end_io = m->saved_bi_end_io;
-       cell_error(m->cell);
+       dm_cell_error(m->cell);
        list_del(&m->list);
        mempool_free(m, m->tc->pool->mapping_pool);
 }
@@ -921,7 +555,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
                bio->bi_end_io = m->saved_bi_end_io;
 
        if (m->err) {
-               cell_error(m->cell);
+               dm_cell_error(m->cell);
                goto out;
        }
 
@@ -932,8 +566,8 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
         */
        r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
        if (r) {
-               DMERR("dm_thin_insert_block() failed");
-               cell_error(m->cell);
+               DMERR_LIMIT("dm_thin_insert_block() failed");
+               dm_cell_error(m->cell);
                goto out;
        }
 
@@ -944,10 +578,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
         * the bios in the cell.
         */
        if (bio) {
-               cell_defer_except(tc, m->cell);
+               cell_defer_no_holder(tc, m->cell);
                bio_endio(bio, 0);
        } else
-               cell_defer(tc, m->cell, m->data_block);
+               cell_defer(tc, m->cell);
 
 out:
        list_del(&m->list);
@@ -959,8 +593,8 @@ static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
        struct thin_c *tc = m->tc;
 
        bio_io_error(m->bio);
-       cell_defer_except(tc, m->cell);
-       cell_defer_except(tc, m->cell2);
+       cell_defer_no_holder(tc, m->cell);
+       cell_defer_no_holder(tc, m->cell2);
        mempool_free(m, tc->pool->mapping_pool);
 }
 
@@ -968,13 +602,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
 {
        struct thin_c *tc = m->tc;
 
+       inc_all_io_entry(tc->pool, m->bio);
+       cell_defer_no_holder(tc, m->cell);
+       cell_defer_no_holder(tc, m->cell2);
+
        if (m->pass_discard)
                remap_and_issue(tc, m->bio, m->data_block);
        else
                bio_endio(m->bio, 0);
 
-       cell_defer_except(tc, m->cell);
-       cell_defer_except(tc, m->cell2);
        mempool_free(m, tc->pool->mapping_pool);
 }
 
@@ -985,7 +621,7 @@ static void process_prepared_discard(struct dm_thin_new_mapping *m)
 
        r = dm_thin_remove_block(tc->td, m->virt_block);
        if (r)
-               DMERR("dm_thin_remove_block() failed");
+               DMERR_LIMIT("dm_thin_remove_block() failed");
 
        process_prepared_discard_passdown(m);
 }
@@ -1067,7 +703,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
        m->err = 0;
        m->bio = NULL;
 
-       if (!ds_add_work(&pool->shared_read_ds, &m->list))
+       if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
                m->quiesced = 1;
 
        /*
@@ -1077,11 +713,12 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
         * bio immediately. Otherwise we use kcopyd to clone the data first.
         */
        if (io_overwrites_block(pool, bio)) {
-               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 
                h->overwrite_mapping = m;
                m->bio = bio;
                save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
+               inc_all_io_entry(pool, bio);
                remap_and_issue(tc, bio, data_dest);
        } else {
                struct dm_io_region from, to;
@@ -1098,8 +735,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
                                   0, copy_complete, m);
                if (r < 0) {
                        mempool_free(m, pool->mapping_pool);
-                       DMERR("dm_kcopyd_copy() failed");
-                       cell_error(cell);
+                       DMERR_LIMIT("dm_kcopyd_copy() failed");
+                       dm_cell_error(cell);
                }
        }
 }
@@ -1146,11 +783,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
                process_prepared_mapping(m);
 
        else if (io_overwrites_block(pool, bio)) {
-               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 
                h->overwrite_mapping = m;
                m->bio = bio;
                save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
+               inc_all_io_entry(pool, bio);
                remap_and_issue(tc, bio, data_block);
        } else {
                int r;
@@ -1163,8 +801,8 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
                r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
                if (r < 0) {
                        mempool_free(m, pool->mapping_pool);
-                       DMERR("dm_kcopyd_zero() failed");
-                       cell_error(cell);
+                       DMERR_LIMIT("dm_kcopyd_zero() failed");
+                       dm_cell_error(cell);
                }
        }
 }
@@ -1175,7 +813,7 @@ static int commit(struct pool *pool)
 
        r = dm_pool_commit_metadata(pool->pmd);
        if (r)
-               DMERR("commit failed, error = %d", r);
+               DMERR_LIMIT("commit failed: error = %d", r);
 
        return r;
 }
@@ -1260,7 +898,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
  */
 static void retry_on_resume(struct bio *bio)
 {
-       struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+       struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
        struct thin_c *tc = h->tc;
        struct pool *pool = tc->pool;
        unsigned long flags;
@@ -1276,7 +914,7 @@ static void no_space(struct dm_bio_prison_cell *cell)
        struct bio_list bios;
 
        bio_list_init(&bios);
-       cell_release(cell, &bios);
+       dm_cell_release(cell, &bios);
 
        while ((bio = bio_list_pop(&bios)))
                retry_on_resume(bio);
@@ -1288,13 +926,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
        unsigned long flags;
        struct pool *pool = tc->pool;
        struct dm_bio_prison_cell *cell, *cell2;
-       struct cell_key key, key2;
+       struct dm_cell_key key, key2;
        dm_block_t block = get_bio_block(tc, bio);
        struct dm_thin_lookup_result lookup_result;
        struct dm_thin_new_mapping *m;
 
        build_virtual_key(tc->td, block, &key);
-       if (bio_detain(tc->pool->prison, &key, bio, &cell))
+       if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
                return;
 
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1306,8 +944,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
                 * on this block.
                 */
                build_data_key(tc->td, lookup_result.block, &key2);
-               if (bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
-                       cell_release_singleton(cell, bio);
+               if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
+                       cell_defer_no_holder(tc, cell);
                        break;
                }
 
@@ -1326,20 +964,22 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
                        m->err = 0;
                        m->bio = bio;
 
-                       if (!ds_add_work(&pool->all_io_ds, &m->list)) {
+                       if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
                                spin_lock_irqsave(&pool->lock, flags);
                                list_add(&m->list, &pool->prepared_discards);
                                spin_unlock_irqrestore(&pool->lock, flags);
                                wake_worker(pool);
                        }
                } else {
+                       inc_all_io_entry(pool, bio);
+                       cell_defer_no_holder(tc, cell);
+                       cell_defer_no_holder(tc, cell2);
+
                        /*
                         * The DM core makes sure that the discard doesn't span
                         * a block boundary.  So we submit the discard of a
                         * partial block appropriately.
                         */
-                       cell_release_singleton(cell, bio);
-                       cell_release_singleton(cell2, bio);
                        if ((!lookup_result.shared) && pool->pf.discard_passdown)
                                remap_and_issue(tc, bio, lookup_result.block);
                        else
@@ -1351,20 +991,21 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
                /*
                 * It isn't provisioned, just forget it.
                 */
-               cell_release_singleton(cell, bio);
+               cell_defer_no_holder(tc, cell);
                bio_endio(bio, 0);
                break;
 
        default:
-               DMERR("discard: find block unexpectedly returned %d", r);
-               cell_release_singleton(cell, bio);
+               DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+                           __func__, r);
+               cell_defer_no_holder(tc, cell);
                bio_io_error(bio);
                break;
        }
 }
 
 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
-                         struct cell_key *key,
+                         struct dm_cell_key *key,
                          struct dm_thin_lookup_result *lookup_result,
                          struct dm_bio_prison_cell *cell)
 {
@@ -1383,8 +1024,9 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
                break;
 
        default:
-               DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
-               cell_error(cell);
+               DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
+                           __func__, r);
+               dm_cell_error(cell);
                break;
        }
 }
@@ -1395,24 +1037,25 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
 {
        struct dm_bio_prison_cell *cell;
        struct pool *pool = tc->pool;
-       struct cell_key key;
+       struct dm_cell_key key;
 
        /*
         * If cell is already occupied, then sharing is already in the process
         * of being broken so we have nothing further to do here.
         */
        build_data_key(tc->td, lookup_result->block, &key);
-       if (bio_detain(pool->prison, &key, bio, &cell))
+       if (dm_bio_detain(pool->prison, &key, bio, &cell))
                return;
 
        if (bio_data_dir(bio) == WRITE && bio->bi_size)
                break_sharing(tc, bio, block, &key, lookup_result, cell);
        else {
-               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 
-               h->shared_read_entry = ds_inc(&pool->shared_read_ds);
+               h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
+               inc_all_io_entry(pool, bio);
+               cell_defer_no_holder(tc, cell);
 
-               cell_release_singleton(cell, bio);
                remap_and_issue(tc, bio, lookup_result->block);
        }
 }
@@ -1427,7 +1070,9 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
         * Remap empty bios (flushes) immediately, without provisioning.
         */
        if (!bio->bi_size) {
-               cell_release_singleton(cell, bio);
+               inc_all_io_entry(tc->pool, bio);
+               cell_defer_no_holder(tc, cell);
+
                remap_and_issue(tc, bio, 0);
                return;
        }
@@ -1437,7 +1082,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
         */
        if (bio_data_dir(bio) == READ) {
                zero_fill_bio(bio);
-               cell_release_singleton(cell, bio);
+               cell_defer_no_holder(tc, cell);
                bio_endio(bio, 0);
                return;
        }
@@ -1456,9 +1101,10 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
                break;
 
        default:
-               DMERR("%s: alloc_data_block() failed, error = %d", __func__, r);
+               DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
+                           __func__, r);
                set_pool_mode(tc->pool, PM_READ_ONLY);
-               cell_error(cell);
+               dm_cell_error(cell);
                break;
        }
 }
@@ -1468,7 +1114,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
        int r;
        dm_block_t block = get_bio_block(tc, bio);
        struct dm_bio_prison_cell *cell;
-       struct cell_key key;
+       struct dm_cell_key key;
        struct dm_thin_lookup_result lookup_result;
 
        /*
@@ -1476,40 +1122,37 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
         * being provisioned so we have nothing further to do here.
         */
        build_virtual_key(tc->td, block, &key);
-       if (bio_detain(tc->pool->prison, &key, bio, &cell))
+       if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
                return;
 
        r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
        switch (r) {
        case 0:
-               /*
-                * We can release this cell now.  This thread is the only
-                * one that puts bios into a cell, and we know there were
-                * no preceding bios.
-                */
-               /*
-                * TODO: this will probably have to change when discard goes
-                * back in.
-                */
-               cell_release_singleton(cell, bio);
-
-               if (lookup_result.shared)
+               if (lookup_result.shared) {
                        process_shared_bio(tc, bio, block, &lookup_result);
-               else
+                       cell_defer_no_holder(tc, cell);
+               } else {
+                       inc_all_io_entry(tc->pool, bio);
+                       cell_defer_no_holder(tc, cell);
+
                        remap_and_issue(tc, bio, lookup_result.block);
+               }
                break;
 
        case -ENODATA:
                if (bio_data_dir(bio) == READ && tc->origin_dev) {
-                       cell_release_singleton(cell, bio);
+                       inc_all_io_entry(tc->pool, bio);
+                       cell_defer_no_holder(tc, cell);
+
                        remap_to_origin_and_issue(tc, bio);
                } else
                        provision_block(tc, bio, block, cell);
                break;
 
        default:
-               DMERR("dm_thin_find_block() failed, error = %d", r);
-               cell_release_singleton(cell, bio);
+               DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+                           __func__, r);
+               cell_defer_no_holder(tc, cell);
                bio_io_error(bio);
                break;
        }
@@ -1527,8 +1170,10 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
        case 0:
                if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
                        bio_io_error(bio);
-               else
+               else {
+                       inc_all_io_entry(tc->pool, bio);
                        remap_and_issue(tc, bio, lookup_result.block);
+               }
                break;
 
        case -ENODATA:
@@ -1538,6 +1183,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
                }
 
                if (tc->origin_dev) {
+                       inc_all_io_entry(tc->pool, bio);
                        remap_to_origin_and_issue(tc, bio);
                        break;
                }
@@ -1547,7 +1193,8 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
                break;
 
        default:
-               DMERR("dm_thin_find_block() failed, error = %d", r);
+               DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
+                           __func__, r);
                bio_io_error(bio);
                break;
        }
@@ -1578,7 +1225,7 @@ static void process_deferred_bios(struct pool *pool)
        spin_unlock_irqrestore(&pool->lock, flags);
 
        while ((bio = bio_list_pop(&bios))) {
-               struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+               struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
                struct thin_c *tc = h->tc;
 
                /*
@@ -1711,17 +1358,14 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
        wake_worker(pool);
 }
 
-static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
 {
-       struct pool *pool = tc->pool;
-       struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+       struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
 
        h->tc = tc;
        h->shared_read_entry = NULL;
-       h->all_io_entry = bio->bi_rw & REQ_DISCARD ? NULL : ds_inc(&pool->all_io_ds);
+       h->all_io_entry = NULL;
        h->overwrite_mapping = NULL;
-
-       return h;
 }
 
 /*
@@ -1735,8 +1379,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
        dm_block_t block = get_bio_block(tc, bio);
        struct dm_thin_device *td = tc->td;
        struct dm_thin_lookup_result result;
+       struct dm_bio_prison_cell *cell1, *cell2;
+       struct dm_cell_key key;
 
-       map_context->ptr = thin_hook_bio(tc, bio);
+       thin_hook_bio(tc, bio);
 
        if (get_pool_mode(tc->pool) == PM_FAIL) {
                bio_io_error(bio);
@@ -1771,12 +1417,25 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
                         * shared flag will be set in their case.
                         */
                        thin_defer_bio(tc, bio);
-                       r = DM_MAPIO_SUBMITTED;
-               } else {
-                       remap(tc, bio, result.block);
-                       r = DM_MAPIO_REMAPPED;
+                       return DM_MAPIO_SUBMITTED;
+               }
+
+               build_virtual_key(tc->td, block, &key);
+               if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
+                       return DM_MAPIO_SUBMITTED;
+
+               build_data_key(tc->td, result.block, &key);
+               if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
+                       cell_defer_no_holder(tc, cell1);
+                       return DM_MAPIO_SUBMITTED;
                }
-               break;
+
+               inc_all_io_entry(tc->pool, bio);
+               cell_defer_no_holder(tc, cell2);
+               cell_defer_no_holder(tc, cell1);
+
+               remap(tc, bio, result.block);
+               return DM_MAPIO_REMAPPED;
 
        case -ENODATA:
                if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
@@ -1785,8 +1444,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
                         * of doing so.  Just error it.
                         */
                        bio_io_error(bio);
-                       r = DM_MAPIO_SUBMITTED;
-                       break;
+                       return DM_MAPIO_SUBMITTED;
                }
                /* fall through */
 
@@ -1796,8 +1454,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
                 * provide the hint to load the metadata into cache.
                 */
                thin_defer_bio(tc, bio);
-               r = DM_MAPIO_SUBMITTED;
-               break;
+               return DM_MAPIO_SUBMITTED;
 
        default:
                /*
@@ -1806,11 +1463,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio,
                 * pool is switched to fail-io mode.
                 */
                bio_io_error(bio);
-               r = DM_MAPIO_SUBMITTED;
-               break;
+               return DM_MAPIO_SUBMITTED;
        }
-
-       return r;
 }
 
 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
@@ -1928,7 +1582,7 @@ static void __pool_destroy(struct pool *pool)
        if (dm_pool_metadata_close(pool->pmd) < 0)
                DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
 
-       prison_destroy(pool->prison);
+       dm_bio_prison_destroy(pool->prison);
        dm_kcopyd_client_destroy(pool->copier);
 
        if (pool->wq)
@@ -1937,12 +1591,12 @@ static void __pool_destroy(struct pool *pool)
        if (pool->next_mapping)
                mempool_free(pool->next_mapping, pool->mapping_pool);
        mempool_destroy(pool->mapping_pool);
-       mempool_destroy(pool->endio_hook_pool);
+       dm_deferred_set_destroy(pool->shared_read_ds);
+       dm_deferred_set_destroy(pool->all_io_ds);
        kfree(pool);
 }
 
 static struct kmem_cache *_new_mapping_cache;
-static struct kmem_cache *_endio_hook_cache;
 
 static struct pool *pool_create(struct mapped_device *pool_md,
                                struct block_device *metadata_dev,
@@ -1976,7 +1630,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
                pool->sectors_per_block_shift = __ffs(block_size);
        pool->low_water_blocks = 0;
        pool_features_init(&pool->pf);
-       pool->prison = prison_create(PRISON_CELLS);
+       pool->prison = dm_bio_prison_create(PRISON_CELLS);
        if (!pool->prison) {
                *error = "Error creating pool's bio prison";
                err_p = ERR_PTR(-ENOMEM);
@@ -2012,8 +1666,20 @@ static struct pool *pool_create(struct mapped_device *pool_md,
        pool->low_water_triggered = 0;
        pool->no_free_space = 0;
        bio_list_init(&pool->retry_on_resume_list);
-       ds_init(&pool->shared_read_ds);
-       ds_init(&pool->all_io_ds);
+
+       pool->shared_read_ds = dm_deferred_set_create();
+       if (!pool->shared_read_ds) {
+               *error = "Error creating pool's shared read deferred set";
+               err_p = ERR_PTR(-ENOMEM);
+               goto bad_shared_read_ds;
+       }
+
+       pool->all_io_ds = dm_deferred_set_create();
+       if (!pool->all_io_ds) {
+               *error = "Error creating pool's all io deferred set";
+               err_p = ERR_PTR(-ENOMEM);
+               goto bad_all_io_ds;
+       }
 
        pool->next_mapping = NULL;
        pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
@@ -2024,13 +1690,6 @@ static struct pool *pool_create(struct mapped_device *pool_md,
                goto bad_mapping_pool;
        }
 
-       pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
-                                                        _endio_hook_cache);
-       if (!pool->endio_hook_pool) {
-               *error = "Error creating pool's endio_hook mempool";
-               err_p = ERR_PTR(-ENOMEM);
-               goto bad_endio_hook_pool;
-       }
        pool->ref_count = 1;
        pool->last_commit_jiffies = jiffies;
        pool->pool_md = pool_md;
@@ -2039,14 +1698,16 @@ static struct pool *pool_create(struct mapped_device *pool_md,
 
        return pool;
 
-bad_endio_hook_pool:
-       mempool_destroy(pool->mapping_pool);
 bad_mapping_pool:
+       dm_deferred_set_destroy(pool->all_io_ds);
+bad_all_io_ds:
+       dm_deferred_set_destroy(pool->shared_read_ds);
+bad_shared_read_ds:
        destroy_workqueue(pool->wq);
 bad_wq:
        dm_kcopyd_client_destroy(pool->copier);
 bad_kcopyd_client:
-       prison_destroy(pool->prison);
+       dm_bio_prison_destroy(pool->prison);
 bad_prison:
        kfree(pool);
 bad_pool:
@@ -2272,15 +1933,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
                goto out_flags_changed;
        }
 
-       /*
-        * The block layer requires discard_granularity to be a power of 2.
-        */
-       if (pf.discard_enabled && !is_power_of_2(block_size)) {
-               ti->error = "Discard support must be disabled when the block size is not a power of 2";
-               r = -EINVAL;
-               goto out_flags_changed;
-       }
-
        pt->pool = pool;
        pt->ti = ti;
        pt->metadata_dev = metadata_dev;
@@ -2720,7 +2372,9 @@ static int pool_status(struct dm_target *ti, status_type_t type,
                else
                        DMEMIT("rw ");
 
-               if (pool->pf.discard_enabled && pool->pf.discard_passdown)
+               if (!pool->pf.discard_enabled)
+                       DMEMIT("ignore_discard");
+               else if (pool->pf.discard_passdown)
                        DMEMIT("discard_passdown");
                else
                        DMEMIT("no_discard_passdown");
@@ -2762,6 +2416,11 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
        return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
 }
 
+static bool block_size_is_power_of_two(struct pool *pool)
+{
+       return pool->sectors_per_block_shift >= 0;
+}
+
 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
 {
        struct pool *pool = pt->pool;
@@ -2775,8 +2434,15 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
        if (pt->adjusted_pf.discard_passdown) {
                data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
                limits->discard_granularity = data_limits->discard_granularity;
-       } else
+       } else if (block_size_is_power_of_two(pool))
                limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
+       else
+               /*
+                * Use largest power of 2 that is a factor of sectors_per_block
+                * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS.
+                */
+               limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1),
+                                                 DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT;
 }
 
 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2804,7 +2470,7 @@ static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 4, 0},
+       .version = {1, 6, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -2926,6 +2592,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        ti->num_flush_requests = 1;
        ti->flush_supported = true;
+       ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
 
        /* In case the pool supports discards, pass them on. */
        if (tc->pool->pf.discard_enabled) {
@@ -2972,14 +2639,14 @@ static int thin_endio(struct dm_target *ti,
                      union map_info *map_context)
 {
        unsigned long flags;
-       struct dm_thin_endio_hook *h = map_context->ptr;
+       struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
        struct list_head work;
        struct dm_thin_new_mapping *m, *tmp;
        struct pool *pool = h->tc->pool;
 
        if (h->shared_read_entry) {
                INIT_LIST_HEAD(&work);
-               ds_dec(h->shared_read_entry, &work);
+               dm_deferred_entry_dec(h->shared_read_entry, &work);
 
                spin_lock_irqsave(&pool->lock, flags);
                list_for_each_entry_safe(m, tmp, &work, list) {
@@ -2992,15 +2659,16 @@ static int thin_endio(struct dm_target *ti,
 
        if (h->all_io_entry) {
                INIT_LIST_HEAD(&work);
-               ds_dec(h->all_io_entry, &work);
-               spin_lock_irqsave(&pool->lock, flags);
-               list_for_each_entry_safe(m, tmp, &work, list)
-                       list_add(&m->list, &pool->prepared_discards);
-               spin_unlock_irqrestore(&pool->lock, flags);
+               dm_deferred_entry_dec(h->all_io_entry, &work);
+               if (!list_empty(&work)) {
+                       spin_lock_irqsave(&pool->lock, flags);
+                       list_for_each_entry_safe(m, tmp, &work, list)
+                               list_add(&m->list, &pool->prepared_discards);
+                       spin_unlock_irqrestore(&pool->lock, flags);
+                       wake_worker(pool);
+               }
        }
 
-       mempool_free(h, pool->endio_hook_pool);
-
        return 0;
 }
 
@@ -3095,7 +2763,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 4, 0},
+       .version = {1, 6, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,
@@ -3125,25 +2793,13 @@ static int __init dm_thin_init(void)
 
        r = -ENOMEM;
 
-       _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
-       if (!_cell_cache)
-               goto bad_cell_cache;
-
        _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
        if (!_new_mapping_cache)
                goto bad_new_mapping_cache;
 
-       _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
-       if (!_endio_hook_cache)
-               goto bad_endio_hook_cache;
-
        return 0;
 
-bad_endio_hook_cache:
-       kmem_cache_destroy(_new_mapping_cache);
 bad_new_mapping_cache:
-       kmem_cache_destroy(_cell_cache);
-bad_cell_cache:
        dm_unregister_target(&pool_target);
 bad_pool_target:
        dm_unregister_target(&thin_target);
@@ -3156,9 +2812,7 @@ static void dm_thin_exit(void)
        dm_unregister_target(&thin_target);
        dm_unregister_target(&pool_target);
 
-       kmem_cache_destroy(_cell_cache);
        kmem_cache_destroy(_new_mapping_cache);
-       kmem_cache_destroy(_endio_hook_cache);
 }
 
 module_init(dm_thin_init);