RDMA/core: introduce ib_dma_pci_p2p_dma_supported()
authorLogan Gunthorpe <logang@deltatee.com>
Fri, 8 Jul 2022 16:51:02 +0000 (10:51 -0600)
committerChristoph Hellwig <hch@lst.de>
Tue, 26 Jul 2022 11:28:07 +0000 (07:28 -0400)
Introduce the helper function ib_dma_pci_p2p_dma_supported() to check
if a given ib_device can be used in P2PDMA transfers. This ensures
the ib_device is not using virt_dma and also that the underlying
dma_device supports P2PDMA.

Use the new helper in nvme-rdma to replace the existing check for
ib_uses_virt_dma(). Adding the dma_pci_p2pdma_supported() check allows
switching away from pci_p2pdma_[un]map_sg().

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Christoph Hellwig <hch@lst.de>
drivers/nvme/target/rdma.c
include/rdma/ib_verbs.h

index 09fdcac..4597bca 100644 (file)
@@ -415,7 +415,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
        if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
                goto out_free_rsp;
 
-       if (!ib_uses_virt_dma(ndev->device))
+       if (ib_dma_pci_p2p_dma_supported(ndev->device))
                r->req.p2p_client = &ndev->device->dev;
        r->send_sge.length = sizeof(*r->req.cqe);
        r->send_sge.lkey = ndev->pd->local_dma_lkey;
index 9c6317c..523843d 100644 (file)
@@ -4013,6 +4013,17 @@ static inline bool ib_uses_virt_dma(struct ib_device *dev)
        return IS_ENABLED(CONFIG_INFINIBAND_VIRT_DMA) && !dev->dma_device;
 }
 
+/*
+ * Check if a IB device's underlying DMA mapping supports P2PDMA transfers.
+ */
+static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
+{
+       if (ib_uses_virt_dma(dev))
+               return false;
+
+       return dma_pci_p2pdma_supported(dev->dma_device);
+}
+
 /**
  * ib_dma_mapping_error - check a DMA addr for error
  * @dev: The device for which the dma_addr was created