bcache: check CACHE_SET_IO_DISABLE in allocator code
authorColy Li <colyli@suse.de>
Fri, 28 Jun 2019 11:59:35 +0000 (19:59 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 26 Jul 2019 07:14:13 +0000 (09:14 +0200)
[ Upstream commit e775339e1ae1205b47d94881db124c11385e597c ]

If CACHE_SET_IO_DISABLE of a cache set flag is set by too many I/O
errors, currently allocator routines can still continue allocate
space which may introduce inconsistent metadata state.

This patch checkes CACHE_SET_IO_DISABLE bit in following allocator
routines,
- bch_bucket_alloc()
- __bch_bucket_alloc_set()
Once CACHE_SET_IO_DISABLE is set on cache set, the allocator routines
may reject allocation request earlier to avoid potential inconsistent
metadata.

Signed-off-by: Coly Li <colyli@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
drivers/md/bcache/alloc.c

index de85b3a..9c3beb1 100644 (file)
@@ -393,6 +393,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
        struct bucket *b;
        long r;
 
+
+       /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+       if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
+               return -1;
+
        /* fastpath */
        if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
            fifo_pop(&ca->free[reserve], r))
@@ -484,6 +489,10 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
 {
        int i;
 
+       /* No allocation if CACHE_SET_IO_DISABLE bit is set */
+       if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
+               return -1;
+
        lockdep_assert_held(&c->bucket_lock);
        BUG_ON(!n || n > c->caches_loaded || n > 8);