block: use atomic bitops for ->queue_flags
authorChristoph Hellwig <hch@lst.de>
Wed, 14 Nov 2018 16:02:07 +0000 (17:02 +0100)
committerJens Axboe <axboe@kernel.dk>
Thu, 15 Nov 2018 19:13:19 +0000 (12:13 -0700)
->queue_flags is generally not set or cleared in the fast path, and also
generally set or cleared one flag at a time.  Make use of the normal
atomic bitops for it so that we don't need to take the queue_lock,
which is otherwise mostly unused in the core block layer now.

Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
block/blk-settings.c
block/blk-sysfs.c
block/blk.h
include/linux/blkdev.h

index 1c9b697..5c8e66a 100644 (file)
@@ -74,11 +74,7 @@ static struct workqueue_struct *kblockd_workqueue;
  */
 void blk_queue_flag_set(unsigned int flag, struct request_queue *q)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       queue_flag_set(flag, q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       set_bit(flag, &q->queue_flags);
 }
 EXPORT_SYMBOL(blk_queue_flag_set);
 
@@ -89,11 +85,7 @@ EXPORT_SYMBOL(blk_queue_flag_set);
  */
 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       queue_flag_clear(flag, q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
+       clear_bit(flag, &q->queue_flags);
 }
 EXPORT_SYMBOL(blk_queue_flag_clear);
 
@@ -107,38 +99,10 @@ EXPORT_SYMBOL(blk_queue_flag_clear);
  */
 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
 {
-       unsigned long flags;
-       bool res;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       res = queue_flag_test_and_set(flag, q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-       return res;
+       return test_and_set_bit(flag, &q->queue_flags);
 }
 EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
 
-/**
- * blk_queue_flag_test_and_clear - atomically test and clear a queue flag
- * @flag: flag to be cleared
- * @q: request queue
- *
- * Returns the previous value of @flag - 0 if the flag was not set and 1 if
- * the flag was set.
- */
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q)
-{
-       unsigned long flags;
-       bool res;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-       res = queue_flag_test_and_clear(flag, q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-
-       return res;
-}
-EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_clear);
-
 void blk_rq_init(struct request_queue *q, struct request *rq)
 {
        memset(rq, 0, sizeof(*rq));
@@ -368,12 +332,10 @@ void blk_cleanup_queue(struct request_queue *q)
        /* mark @q DYING, no new request or merges will be allowed afterwards */
        mutex_lock(&q->sysfs_lock);
        blk_set_queue_dying(q);
-       spin_lock_irq(lock);
 
-       queue_flag_set(QUEUE_FLAG_NOMERGES, q);
-       queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
-       queue_flag_set(QUEUE_FLAG_DYING, q);
-       spin_unlock_irq(lock);
+       blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+       blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
+       blk_queue_flag_set(QUEUE_FLAG_DYING, q);
        mutex_unlock(&q->sysfs_lock);
 
        /*
@@ -384,9 +346,7 @@ void blk_cleanup_queue(struct request_queue *q)
 
        rq_qos_exit(q);
 
-       spin_lock_irq(lock);
-       queue_flag_set(QUEUE_FLAG_DEAD, q);
-       spin_unlock_irq(lock);
+       blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
 
        /*
         * make sure all in-progress dispatch are completed because
index 4c82b4b..e2717e8 100644 (file)
@@ -2756,7 +2756,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
 
        if (!(set->flags & BLK_MQ_F_SG_MERGE))
-               queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
+               blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);
 
        q->sg_reserved_size = INT_MAX;
 
index cca8359..3abe831 100644 (file)
@@ -834,16 +834,14 @@ EXPORT_SYMBOL(blk_set_queue_depth);
  */
 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
 {
-       spin_lock_irq(q->queue_lock);
        if (wc)
-               queue_flag_set(QUEUE_FLAG_WC, q);
+               blk_queue_flag_set(QUEUE_FLAG_WC, q);
        else
-               queue_flag_clear(QUEUE_FLAG_WC, q);
+               blk_queue_flag_clear(QUEUE_FLAG_WC, q);
        if (fua)
-               queue_flag_set(QUEUE_FLAG_FUA, q);
+               blk_queue_flag_set(QUEUE_FLAG_FUA, q);
        else
-               queue_flag_clear(QUEUE_FLAG_FUA, q);
-       spin_unlock_irq(q->queue_lock);
+               blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
 
        wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
 }
index d4b1b84..22fd086 100644 (file)
@@ -316,14 +316,12 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
        if (ret < 0)
                return ret;
 
-       spin_lock_irq(q->queue_lock);
-       queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
-       queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
+       blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
+       blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
        if (nm == 2)
-               queue_flag_set(QUEUE_FLAG_NOMERGES, q);
+               blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
        else if (nm)
-               queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
-       spin_unlock_irq(q->queue_lock);
+               blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
 
        return ret;
 }
@@ -347,18 +345,16 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
        if (ret < 0)
                return ret;
 
-       spin_lock_irq(q->queue_lock);
        if (val == 2) {
-               queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
-               queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+               blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+               blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
        } else if (val == 1) {
-               queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
-               queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+               blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+               blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
        } else if (val == 0) {
-               queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
-               queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+               blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+               blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
        }
-       spin_unlock_irq(q->queue_lock);
 #endif
        return ret;
 }
@@ -889,7 +885,7 @@ int blk_register_queue(struct gendisk *disk)
        WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags),
                  "%s is registering an already registered queue\n",
                  kobject_name(&dev->kobj));
-       queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q);
+       blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
 
        /*
         * SCSI probing may synchronously create and destroy a lot of
@@ -901,7 +897,7 @@ int blk_register_queue(struct gendisk *disk)
         * request_queues for non-existent devices never get registered.
         */
        if (!blk_queue_init_done(q)) {
-               queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q);
+               blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q);
                percpu_ref_switch_to_percpu(&q->q_usage_counter);
        }
 
index 08a5845..f2ddc71 100644 (file)
@@ -48,62 +48,6 @@ static inline void queue_lockdep_assert_held(struct request_queue *q)
                lockdep_assert_held(q->queue_lock);
 }
 
-static inline void queue_flag_set_unlocked(unsigned int flag,
-                                          struct request_queue *q)
-{
-       if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
-           kref_read(&q->kobj.kref))
-               lockdep_assert_held(q->queue_lock);
-       __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear_unlocked(unsigned int flag,
-                                            struct request_queue *q)
-{
-       if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
-           kref_read(&q->kobj.kref))
-               lockdep_assert_held(q->queue_lock);
-       __clear_bit(flag, &q->queue_flags);
-}
-
-static inline int queue_flag_test_and_clear(unsigned int flag,
-                                           struct request_queue *q)
-{
-       queue_lockdep_assert_held(q);
-
-       if (test_bit(flag, &q->queue_flags)) {
-               __clear_bit(flag, &q->queue_flags);
-               return 1;
-       }
-
-       return 0;
-}
-
-static inline int queue_flag_test_and_set(unsigned int flag,
-                                         struct request_queue *q)
-{
-       queue_lockdep_assert_held(q);
-
-       if (!test_bit(flag, &q->queue_flags)) {
-               __set_bit(flag, &q->queue_flags);
-               return 0;
-       }
-
-       return 1;
-}
-
-static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
-{
-       queue_lockdep_assert_held(q);
-       __set_bit(flag, &q->queue_flags);
-}
-
-static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
-{
-       queue_lockdep_assert_held(q);
-       __clear_bit(flag, &q->queue_flags);
-}
-
 static inline struct blk_flush_queue *
 blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
 {
index 60507ab..30d8e0f 100644 (file)
@@ -621,7 +621,6 @@ struct request_queue {
 void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
 void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
 bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
-bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
 
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
 #define blk_queue_dying(q)     test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)