Merge branch 'for-4.14/block-postmerge' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 9 Sep 2017 19:49:01 +0000 (12:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 9 Sep 2017 19:49:01 +0000 (12:49 -0700)
Pull followup block layer updates from Jens Axboe:
 "I ended up splitting the main pull request for this series into two,
  mainly because of clashes between NVMe fixes that went into 4.13 after
  the for-4.14 branches were split off. This pull request is mostly
  NVMe, but not exclusively. In detail, it contains:

   - Two pull request for NVMe changes from Christoph. Nothing new on
     the feature front, basically just fixes all over the map for the
     core bits, transport, rdma, etc.

   - Series from Bart, cleaning up various bits in the BFQ scheduler.

   - Series of bcache fixes, which has been lingering for a release or
     two. Coly sent this in, but patches from various people in this
     area.

   - Set of patches for BFQ from Paolo himself, updating both
     documentation and fixing some corner cases in performance.

   - Series from Omar, attempting to now get the 4k loop support
     correct. Our confidence level is higher this time.

   - Series from Shaohua for loop as well, improving O_DIRECT
     performance and fixing a use-after-free"

* 'for-4.14/block-postmerge' of git://git.kernel.dk/linux-block: (74 commits)
  bcache: initialize dirty stripes in flash_dev_run()
  loop: set physical block size to logical block size
  bcache: fix bch_hprint crash and improve output
  bcache: Update continue_at() documentation
  bcache: silence static checker warning
  bcache: fix for gc and write-back race
  bcache: increase the number of open buckets
  bcache: Correct return value for sysfs attach errors
  bcache: correct cache_dirty_target in __update_writeback_rate()
  bcache: gc does not work when triggering by manual command
  bcache: Don't reinvent the wheel but use existing llist API
  bcache: do not subtract sectors_to_gc for bypassed IO
  bcache: fix sequential large write IO bypass
  bcache: Fix leak of bdev reference
  block/loop: remove unused field
  block/loop: fix use after free
  bfq: Use icq_to_bic() consistently
  bfq: Suppress compiler warnings about comparisons
  bfq: Check kstrtoul() return value
  bfq: Declare local functions static
  ...

1  2 
block/bfq-iosched.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
include/linux/nvme.h
include/linux/string.h

Simple merge
Simple merge
@@@ -605,10 -629,9 +626,10 @@@ out_stop_queues
        return ret;
  }
  
- static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl)
+ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
  {
        struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
 +      struct ib_device *ibdev = ctrl->device->dev;
        unsigned int nr_io_queues;
        int i, ret;
  
@@@ -656,10 -734,144 +741,144 @@@ static void nvme_rdma_destroy_admin_que
  {
        nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
-       nvme_rdma_stop_and_free_queue(&ctrl->queues[0]);
-       blk_cleanup_queue(ctrl->ctrl.admin_q);
-       blk_mq_free_tag_set(&ctrl->admin_tag_set);
-       nvme_rdma_dev_put(ctrl->device);
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
+       if (remove) {
+               blk_cleanup_queue(ctrl->ctrl.admin_q);
+               nvme_rdma_free_tagset(&ctrl->ctrl, true);
+       }
+       nvme_rdma_free_queue(&ctrl->queues[0]);
+ }
+ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
+               bool new)
+ {
+       int error;
+       error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
+       if (error)
+               return error;
+       ctrl->device = ctrl->queues[0].device;
+       ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+               ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+       if (new) {
+               ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
+               if (IS_ERR(ctrl->ctrl.admin_tagset))
+                       goto out_free_queue;
+               ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
+               if (IS_ERR(ctrl->ctrl.admin_q)) {
+                       error = PTR_ERR(ctrl->ctrl.admin_q);
+                       goto out_free_tagset;
+               }
+       } else {
+               error = blk_mq_reinit_tagset(&ctrl->admin_tag_set,
+                                            nvme_rdma_reinit_request);
+               if (error)
+                       goto out_free_queue;
+       }
+       error = nvme_rdma_start_queue(ctrl, 0);
+       if (error)
+               goto out_cleanup_queue;
+       error = ctrl->ctrl.ops->reg_read64(&ctrl->ctrl, NVME_REG_CAP,
+                       &ctrl->ctrl.cap);
+       if (error) {
+               dev_err(ctrl->ctrl.device,
+                       "prop_get NVME_REG_CAP failed\n");
+               goto out_cleanup_queue;
+       }
+       ctrl->ctrl.sqsize =
+               min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
+       error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
+       if (error)
+               goto out_cleanup_queue;
+       ctrl->ctrl.max_hw_sectors =
 -              (ctrl->max_fr_pages - 1) << (PAGE_SHIFT - 9);
++              (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
+       error = nvme_init_identify(&ctrl->ctrl);
+       if (error)
+               goto out_cleanup_queue;
+       error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
+                       &ctrl->async_event_sqe, sizeof(struct nvme_command),
+                       DMA_TO_DEVICE);
+       if (error)
+               goto out_cleanup_queue;
+       return 0;
+ out_cleanup_queue:
+       if (new)
+               blk_cleanup_queue(ctrl->ctrl.admin_q);
+ out_free_tagset:
+       if (new)
+               nvme_rdma_free_tagset(&ctrl->ctrl, true);
+ out_free_queue:
+       nvme_rdma_free_queue(&ctrl->queues[0]);
+       return error;
+ }
+ static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
+               bool remove)
+ {
+       nvme_rdma_stop_io_queues(ctrl);
+       if (remove) {
+               blk_cleanup_queue(ctrl->ctrl.connect_q);
+               nvme_rdma_free_tagset(&ctrl->ctrl, false);
+       }
+       nvme_rdma_free_io_queues(ctrl);
+ }
+ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
+ {
+       int ret;
+       ret = nvme_rdma_alloc_io_queues(ctrl);
+       if (ret)
+               return ret;
+       if (new) {
+               ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
+               if (IS_ERR(ctrl->ctrl.tagset))
+                       goto out_free_io_queues;
+               ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
+               if (IS_ERR(ctrl->ctrl.connect_q)) {
+                       ret = PTR_ERR(ctrl->ctrl.connect_q);
+                       goto out_free_tag_set;
+               }
+       } else {
+               ret = blk_mq_reinit_tagset(&ctrl->tag_set,
+                                          nvme_rdma_reinit_request);
+               if (ret)
+                       goto out_free_io_queues;
+               blk_mq_update_nr_hw_queues(&ctrl->tag_set,
+                       ctrl->ctrl.queue_count - 1);
+       }
+       ret = nvme_rdma_start_io_queues(ctrl);
+       if (ret)
+               goto out_cleanup_connect_q;
+       return 0;
+ out_cleanup_connect_q:
+       if (new)
+               blk_cleanup_queue(ctrl->ctrl.connect_q);
+ out_free_tag_set:
+       if (new)
+               nvme_rdma_free_tagset(&ctrl->ctrl, false);
+ out_free_io_queues:
+       nvme_rdma_free_io_queues(ctrl);
+       return ret;
  }
  
  static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
@@@ -929,12 -1111,8 +1118,12 @@@ static int nvme_rdma_map_sg_fr(struct n
        struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
        int nr;
  
 -      nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, PAGE_SIZE);
 +      /*
 +       * Align the MR to a 4K page size to match the ctrl page size and
 +       * the block virtual boundary.
 +       */
 +      nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
-       if (nr < count) {
+       if (unlikely(nr < count)) {
                if (nr < 0)
                        return nr;
                return -EINVAL;
Simple merge
Simple merge