Merge tag 'block-6.0-2022-09-09' of git://git.kernel.dk/linux-block
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Sep 2022 19:03:08 +0000 (15:03 -0400)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Sep 2022 19:03:08 +0000 (15:03 -0400)
Pull block fixes from Jens Axboe:

 - NVMe pull via Christoph:
      - fix a use after free in nvmet (Bart Van Assche)
      - fix a use after free when detecting digest errors
        (Sagi Grimberg)
      - fix regression that causes sporadic TCP requests to time out
        (Sagi Grimberg)
      - fix two off by ones errors in the nvmet ZNS support
        (Dennis Maisenbacher)
      - requeue aen after firmware activation (Keith Busch)

 - Fix missing request flags in debugfs code (me)

 - Partition scan fix (Ming)

* tag 'block-6.0-2022-09-09' of git://git.kernel.dk/linux-block:
  block: add missing request flags to debugfs code
  nvme: requeue aen after firmware activation
  nvmet: fix mar and mor off-by-one errors
  nvme-tcp: fix regression that causes sporadic requests to time out
  nvme-tcp: fix UAF when detecting digest errors
  nvmet: fix a use-after-free
  block: don't add partitions if GD_SUPPRESS_PART_SCAN is set

1  2 
drivers/nvme/host/core.c

diff --combined drivers/nvme/host/core.c
@@@ -4198,8 -4198,7 +4198,8 @@@ static void nvme_alloc_ns(struct nvme_c
                blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue);
  
        blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
 -      if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
 +      if (ctrl->ops->supports_pci_p2pdma &&
 +          ctrl->ops->supports_pci_p2pdma(ctrl))
                blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
  
        ns->ctrl = ctrl;
@@@ -4703,6 -4702,8 +4703,8 @@@ static void nvme_fw_act_work(struct wor
        nvme_start_queues(ctrl);
        /* read FW slot information to clear the AER */
        nvme_get_fw_slot_info(ctrl);
+       queue_work(nvme_wq, &ctrl->async_event_work);
  }
  
  static u32 nvme_aer_type(u32 result)
@@@ -4715,9 -4716,10 +4717,10 @@@ static u32 nvme_aer_subtype(u32 result
        return (result & 0xff00) >> 8;
  }
  
- static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
  {
        u32 aer_notice_type = nvme_aer_subtype(result);
+       bool requeue = true;
  
        trace_nvme_async_event(ctrl, aer_notice_type);
  
                 */
                if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
                        nvme_auth_stop(ctrl);
+                       requeue = false;
                        queue_work(nvme_wq, &ctrl->fw_act_work);
                }
                break;
        default:
                dev_warn(ctrl->device, "async event result %08x\n", result);
        }
+       return requeue;
  }
  
  static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
@@@ -4765,13 -4769,14 +4770,14 @@@ void nvme_complete_async_event(struct n
        u32 result = le32_to_cpu(res->u32);
        u32 aer_type = nvme_aer_type(result);
        u32 aer_subtype = nvme_aer_subtype(result);
+       bool requeue = true;
  
        if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
                return;
  
        switch (aer_type) {
        case NVME_AER_NOTICE:
-               nvme_handle_aen_notice(ctrl, result);
+               requeue = nvme_handle_aen_notice(ctrl, result);
                break;
        case NVME_AER_ERROR:
                /*
        default:
                break;
        }
-       queue_work(nvme_wq, &ctrl->async_event_work);
+       if (requeue)
+               queue_work(nvme_wq, &ctrl->async_event_work);
  }
  EXPORT_SYMBOL_GPL(nvme_complete_async_event);