Merge tag 'block-5.13-2021-05-28' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 29 May 2021 00:42:37 +0000 (14:42 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 29 May 2021 00:42:37 +0000 (14:42 -1000)
Pull block fixes from Jens Axboe:

 - NVMe pull request (Christoph):
      - fix a memory leak in nvme_cdev_add (Guoqing Jiang)
      - fix inline data size comparison in nvmet_tcp_queue_response (Hou
        Pu)
      - fix false keep-alive timeout when a controller is torn down
        (Sagi Grimberg)
      - fix a nvme-tcp Kconfig dependency (Sagi Grimberg)
      - short-circuit reconnect retries for FC (Hannes Reinecke)
      - decode host pathing error for connect (Hannes Reinecke)

 - MD pull request (Song):
      - Fix incorrect chunk boundary assert (Christoph)

 - Fix s390/dasd verification panic (Stefan)

* tag 'block-5.13-2021-05-28' of git://git.kernel.dk/linux-block:
  nvmet: fix false keep-alive timeout when a controller is torn down
  nvmet-tcp: fix inline data size comparison in nvmet_tcp_queue_response
  nvme-tcp: remove incorrect Kconfig dep in BLK_DEV_NVME
  md/raid5: remove an incorrect assert in in_chunk_boundary
  s390/dasd: add missing discipline function
  nvme-fabrics: decode host pathing error for connect
  nvme-fc: short-circuit reconnect retries
  nvme: fix potential memory leaks in nvme_cdev_add

drivers/md/raid5.c
drivers/nvme/host/Kconfig
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.c
drivers/nvme/host/fc.c
drivers/nvme/target/core.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/tcp.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h

index 841e1c1..7d4ff8a 100644 (file)
@@ -5311,8 +5311,6 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
        unsigned int chunk_sectors;
        unsigned int bio_sectors = bio_sectors(bio);
 
-       WARN_ON_ONCE(bio->bi_bdev->bd_partno);
-
        chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
        return  chunk_sectors >=
                ((sector & (chunk_sectors - 1)) + bio_sectors);
index a44d49d..494675a 100644 (file)
@@ -71,7 +71,8 @@ config NVME_FC
 config NVME_TCP
        tristate "NVM Express over Fabrics TCP host driver"
        depends on INET
-       depends on BLK_DEV_NVME
+       depends on BLOCK
+       select NVME_CORE
        select NVME_FABRICS
        select CRYPTO
        select CRYPTO_CRC32C
index 762125f..66973bb 100644 (file)
@@ -3485,8 +3485,10 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
        cdev_init(cdev, fops);
        cdev->owner = owner;
        ret = cdev_device_add(cdev, cdev_device);
-       if (ret)
+       if (ret) {
+               put_device(cdev_device);
                ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
+       }
        return ret;
 }
 
index a2bb7fc..34a84d2 100644 (file)
@@ -336,6 +336,11 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl,
                        cmd->connect.recfmt);
                break;
 
+       case NVME_SC_HOST_PATH_ERROR:
+               dev_err(ctrl->device,
+                       "Connect command failed: host path error\n");
+               break;
+
        default:
                dev_err(ctrl->device,
                        "Connect command failed, error wo/DNR bit: %d\n",
index 256e877..f183f9f 100644 (file)
@@ -3107,6 +3107,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (ctrl->ctrl.icdoff) {
                dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
                                ctrl->ctrl.icdoff);
+               ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
                goto out_disconnect_admin_queue;
        }
 
@@ -3114,6 +3115,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
        if (!(ctrl->ctrl.sgls & ((1 << 0) | (1 << 1)))) {
                dev_err(ctrl->ctrl.device,
                        "Mandatory sgls are not supported!\n");
+               ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
                goto out_disconnect_admin_queue;
        }
 
@@ -3280,11 +3282,13 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
        if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
                return;
 
-       if (portptr->port_state == FC_OBJSTATE_ONLINE)
+       if (portptr->port_state == FC_OBJSTATE_ONLINE) {
                dev_info(ctrl->ctrl.device,
                        "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
                        ctrl->cnum, status);
-       else if (time_after_eq(jiffies, rport->dev_loss_end))
+               if (status > 0 && (status & NVME_SC_DNR))
+                       recon = false;
+       } else if (time_after_eq(jiffies, rport->dev_loss_end))
                recon = false;
 
        if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
@@ -3298,12 +3302,17 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 
                queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
        } else {
-               if (portptr->port_state == FC_OBJSTATE_ONLINE)
-                       dev_warn(ctrl->ctrl.device,
-                               "NVME-FC{%d}: Max reconnect attempts (%d) "
-                               "reached.\n",
-                               ctrl->cnum, ctrl->ctrl.nr_reconnects);
-               else
+               if (portptr->port_state == FC_OBJSTATE_ONLINE) {
+                       if (status > 0 && (status & NVME_SC_DNR))
+                               dev_warn(ctrl->ctrl.device,
+                                        "NVME-FC{%d}: reconnect failure\n",
+                                        ctrl->cnum);
+                       else
+                               dev_warn(ctrl->ctrl.device,
+                                        "NVME-FC{%d}: Max reconnect attempts "
+                                        "(%d) reached.\n",
+                                        ctrl->cnum, ctrl->ctrl.nr_reconnects);
+               } else
                        dev_warn(ctrl->ctrl.device,
                                "NVME-FC{%d}: dev_loss_tmo (%d) expired "
                                "while waiting for remoteport connectivity.\n",
index 1853db3..4b29a5b 100644 (file)
@@ -388,10 +388,10 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
 {
        struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
                        struct nvmet_ctrl, ka_work);
-       bool cmd_seen = ctrl->cmd_seen;
+       bool reset_tbkas = ctrl->reset_tbkas;
 
-       ctrl->cmd_seen = false;
-       if (cmd_seen) {
+       ctrl->reset_tbkas = false;
+       if (reset_tbkas) {
                pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
                        ctrl->cntlid);
                schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
@@ -804,6 +804,13 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
        percpu_ref_exit(&sq->ref);
 
        if (ctrl) {
+               /*
+                * The teardown flow may take some time, and the host may not
+                * send us keep-alive during this period, hence reset the
+                * traffic based keep-alive timer so we don't trigger a
+                * controller teardown as a result of a keep-alive expiration.
+                */
+               ctrl->reset_tbkas = true;
                nvmet_ctrl_put(ctrl);
                sq->ctrl = NULL; /* allows reusing the queue later */
        }
@@ -952,7 +959,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
        }
 
        if (sq->ctrl)
-               sq->ctrl->cmd_seen = true;
+               sq->ctrl->reset_tbkas = true;
 
        return true;
 
index d69a409..53aea9a 100644 (file)
@@ -167,7 +167,7 @@ struct nvmet_ctrl {
        struct nvmet_subsys     *subsys;
        struct nvmet_sq         **sqs;
 
-       bool                    cmd_seen;
+       bool                    reset_tbkas;
 
        struct mutex            lock;
        u64                     cap;
index f9f34f6..d8aceef 100644 (file)
@@ -550,7 +550,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
                 * nvmet_req_init is completed.
                 */
                if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
-                   len && len < cmd->req.port->inline_data_size &&
+                   len && len <= cmd->req.port->inline_data_size &&
                    nvme_is_write(cmd->req.cmd))
                        return;
        }
index 1b9e144..fd42a5f 100644 (file)
@@ -642,12 +642,18 @@ static void dasd_diag_setup_blk_queue(struct dasd_block *block)
        blk_queue_segment_boundary(q, PAGE_SIZE - 1);
 }
 
+static int dasd_diag_pe_handler(struct dasd_device *device,
+                               __u8 tbvpm, __u8 fcsecpm)
+{
+       return dasd_generic_verify_path(device, tbvpm);
+}
+
 static struct dasd_discipline dasd_diag_discipline = {
        .owner = THIS_MODULE,
        .name = "DIAG",
        .ebcname = "DIAG",
        .check_device = dasd_diag_check_device,
-       .verify_path = dasd_generic_verify_path,
+       .pe_handler = dasd_diag_pe_handler,
        .fill_geometry = dasd_diag_fill_geometry,
        .setup_blk_queue = dasd_diag_setup_blk_queue,
        .start_IO = dasd_start_diag,
index 4789410..3ad319a 100644 (file)
@@ -794,13 +794,19 @@ static void dasd_fba_setup_blk_queue(struct dasd_block *block)
        blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
 }
 
+static int dasd_fba_pe_handler(struct dasd_device *device,
+                              __u8 tbvpm, __u8 fcsecpm)
+{
+       return dasd_generic_verify_path(device, tbvpm);
+}
+
 static struct dasd_discipline dasd_fba_discipline = {
        .owner = THIS_MODULE,
        .name = "FBA ",
        .ebcname = "FBA ",
        .check_device = dasd_fba_check_characteristics,
        .do_analysis = dasd_fba_do_analysis,
-       .verify_path = dasd_generic_verify_path,
+       .pe_handler = dasd_fba_pe_handler,
        .setup_blk_queue = dasd_fba_setup_blk_queue,
        .fill_geometry = dasd_fba_fill_geometry,
        .start_IO = dasd_start_IO,
index 1c59b0e..155428b 100644 (file)
@@ -297,7 +297,6 @@ struct dasd_discipline {
         * e.g. verify that new path is compatible with the current
         * configuration.
         */
-       int (*verify_path)(struct dasd_device *, __u8);
        int (*pe_handler)(struct dasd_device *, __u8, __u8);
 
        /*