Merge branch 'for-linus' into for-4.12/block
authorJens Axboe <axboe@fb.com>
Fri, 7 Apr 2017 18:45:20 +0000 (12:45 -0600)
committerJens Axboe <axboe@fb.com>
Fri, 7 Apr 2017 18:45:20 +0000 (12:45 -0600)
We've added a considerable amount of fixes for stalls and issues
with the blk-mq scheduling in the 4.11 series since forking
off the for-4.12/block branch. We need to do improvements on
top of that for 4.12, so pull in the previous fixes to make
our lives easier going forward.

Signed-off-by: Jens Axboe <axboe@fb.com>
1  2 
block/blk-mq.c
block/blk-mq.h
block/blk-sysfs.c
drivers/block/nbd.c
drivers/md/dm-rq.c
drivers/nvme/host/core.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/io-cmd.c
drivers/scsi/scsi_lib.c
include/linux/blk-mq.h
include/linux/blkdev.h

diff --cc block/blk-mq.c
@@@ -980,13 -960,24 +978,16 @@@ static bool blk_mq_dispatch_wait_add(st
        return true;
  }
  
- bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
+ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
  {
-       struct request_queue *q = hctx->queue;
+       struct blk_mq_hw_ctx *hctx;
        struct request *rq;
 -      LIST_HEAD(driver_list);
 -      struct list_head *dptr;
        int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
  
+       if (list_empty(list))
+               return false;
        /*
 -       * Start off with dptr being NULL, so we start the first request
 -       * immediately, even if we have more pending.
 -       */
 -      dptr = NULL;
 -
 -      /*
         * Now process all the entries, sending them to the driver.
         */
        errors = queued = 0;
  
                if (ret == BLK_MQ_RQ_QUEUE_BUSY)
                        break;
-       }
 -
 -              /*
 -               * We've done the first request. If we have more than 1
 -               * left in the list, set dptr to defer issue.
 -               */
 -              if (!dptr && list->next != list->prev)
 -                      dptr = &driver_list;
+       } while (!list_empty(list));
  
        hctx->dispatched[queued_to_index(queued)]++;
  
@@@ -2610,8 -2752,19 +2649,9 @@@ void blk_mq_update_nr_hw_queues(struct 
                blk_mq_freeze_queue(q);
  
        set->nr_hw_queues = nr_hw_queues;
+       blk_mq_update_queue_map(set);
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
                blk_mq_realloc_hw_ctxs(set, q);
 -
 -              /*
 -               * Manually set the make_request_fn as blk_queue_make_request
 -               * resets a lot of the queue settings.
 -               */
 -              if (q->nr_hw_queues > 1)
 -                      q->make_request_fn = blk_mq_make_request;
 -              else
 -                      q->make_request_fn = blk_sq_make_request;
 -
                blk_mq_queue_reinit(q, cpu_online_mask);
        }
  
diff --cc block/blk-mq.h
Simple merge
@@@ -803,11 -816,9 +803,11 @@@ static void blk_release_queue(struct ko
  
        if (q->elevator) {
                ioc_clear_queue(q);
-               elevator_exit(q->elevator);
+               elevator_exit(q, q->elevator);
        }
  
 +      blk_free_queue_stats(q->stats);
 +
        blk_exit_rl(&q->root_rl);
  
        if (q->queue_tags)
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -615,9 -610,6 +615,8 @@@ struct request_queue 
  #define QUEUE_FLAG_FLUSH_NQ    25     /* flush not queueuable */
  #define QUEUE_FLAG_DAX         26     /* device supports DAX */
  #define QUEUE_FLAG_STATS       27     /* track rq completion times */
- #define QUEUE_FLAG_RESTART     28     /* queue needs restart at completion */
- #define QUEUE_FLAG_POLL_STATS  29     /* collecting stats for hybrid polling */
- #define QUEUE_FLAG_REGISTERED  30     /* queue has been registered to a disk */
++#define QUEUE_FLAG_POLL_STATS  28     /* collecting stats for hybrid polling */
++#define QUEUE_FLAG_REGISTERED  29     /* queue has been registered to a disk */
  
  #define QUEUE_FLAG_DEFAULT    ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \