Merge tag 'dmaengine-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 3 Sep 2023 17:49:42 +0000 (10:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 3 Sep 2023 17:49:42 +0000 (10:49 -0700)
Pull dmaengine updates from Vinod Koul:
 "New controller support and updates to drivers.

  New support:
   - Qualcomm SM6115 and QCM2290 dmaengine support
   - at_xdma support for microchip,sam9x7 controller

  Updates:
   - idxd updates for wq simplification and ats knob updates
   - fsl edma updates for v3 support
   - Xilinx AXI4-Stream control support
   - Yaml conversion for bcm dma binding"

* tag 'dmaengine-6.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (53 commits)
  dmaengine: fsl-edma: integrate v3 support
  dt-bindings: fsl-dma: fsl-edma: add edma3 compatible string
  dmaengine: fsl-edma: move tcd into struct fsl_dma_chan
  dmaengine: fsl-edma: refactor chan_name setup and safety
  dmaengine: fsl-edma: move clearing of register interrupt into setup_irq function
  dmaengine: fsl-edma: refactor using devm_clk_get_enabled
  dmaengine: fsl-edma: simply ATTR_DSIZE and ATTR_SSIZE by using ffs()
  dmaengine: fsl-edma: move common IRQ handler to common.c
  dmaengine: fsl-edma: Remove enum edma_version
  dmaengine: fsl-edma: transition from bool fields to bitmask flags in drvdata
  dmaengine: fsl-edma: clean up EXPORT_SYMBOL_GPL in fsl-edma-common.c
  dmaengine: fsl-edma: fix build error when arch is s390
  dmaengine: idxd: Fix issues with PRS disable sysfs knob
  dmaengine: idxd: Allow ATS disable update only for configurable devices
  dmaengine: xilinx_dma: Program interrupt delay timeout
  dmaengine: xilinx_dma: Use tasklet_hi_schedule for timing critical usecase
  dmaengine: xilinx_dma: Freeup active list based on descriptor completion bit
  dmaengine: xilinx_dma: Increase AXI DMA transaction segment count
  dmaengine: xilinx_dma: Pass AXI4-Stream control words to dma client
  dt-bindings: dmaengine: xilinx_dma: Add xlnx,irq-delay property
  ...

1  2 
Documentation/devicetree/bindings/dma/qcom,bam-dma.yaml
drivers/dma/Kconfig
drivers/dma/idxd/device.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/sysfs.c
drivers/dma/mcf-edma-main.c
drivers/dma/owl-dma.c

@@@ -15,13 -15,19 +15,19 @@@ allOf
  
  properties:
    compatible:
-     enum:
-         # APQ8064, IPQ8064 and MSM8960
-       - qcom,bam-v1.3.0
-         # MSM8974, APQ8074 and APQ8084
-       - qcom,bam-v1.4.0
-         # MSM8916 and SDM845
-       - qcom,bam-v1.7.0
+     oneOf:
+       - enum:
+           # APQ8064, IPQ8064 and MSM8960
+           - qcom,bam-v1.3.0
+           # MSM8974, APQ8074 and APQ8084
+           - qcom,bam-v1.4.0
+           # MSM8916, SDM630
+           - qcom,bam-v1.7.0
+       - items:
+           - enum:
+               # SDM845, SM6115, SM8150, SM8250 and QCM2290
+               - qcom,bam-v1.7.4
+           - const: qcom,bam-v1.7.0
  
    clocks:
      maxItems: 1
@@@ -38,7 -44,7 +44,7 @@@
  
    iommus:
      minItems: 1
-     maxItems: 4
+     maxItems: 6
  
    num-channels:
      $ref: /schemas/types.yaml#/definitions/uint32
@@@ -48,7 -54,7 +54,7 @@@
    qcom,controlled-remotely:
      type: boolean
      description:
 -      Indicates that the bam is controlled by remote proccessor i.e. execution
 +      Indicates that the bam is controlled by remote processor i.e. execution
        environment.
  
    qcom,ee:
@@@ -81,6 -87,15 +87,15 @@@ required
    - qcom,ee
    - reg
  
+ anyOf:
+   - required:
+       - qcom,powered-remotely
+   - required:
+       - qcom,controlled-remotely
+   - required:
+       - clocks
+       - clock-names
  additionalProperties: false
  
  examples:
diff --combined drivers/dma/Kconfig
@@@ -281,7 -281,6 +281,7 @@@ config IMX_SDM
  
  config INTEL_IDMA64
        tristate "Intel integrated DMA 64-bit support"
 +      depends on HAS_IOMEM
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
        help
@@@ -474,25 -473,6 +474,6 @@@ config MXS_DM
          Support the MXS DMA engine. This engine including APBH-DMA
          and APBX-DMA is integrated into some Freescale chips.
  
- config MX3_IPU
-       bool "MX3x Image Processing Unit support"
-       depends on ARCH_MXC
-       select DMA_ENGINE
-       default y
-       help
-         If you plan to use the Image Processing unit in the i.MX3x, say
-         Y here. If unsure, select Y.
- config MX3_IPU_IRQS
-       int "Number of dynamically mapped interrupts for IPU"
-       depends on MX3_IPU
-       range 2 137
-       default 4
-       help
-         Out of 137 interrupt sources on i.MX31 IPU only very few are used.
-         To avoid bloating the irq_desc[] array we allocate a sufficient
-         number of IRQ slots and map them dynamically to specific sources.
  config NBPFAXI_DMA
        tristate "Renesas Type-AXI NBPF DMA support"
        select DMA_ENGINE
@@@ -699,7 -679,7 +680,7 @@@ config XGENE_DM
  
  config XILINX_DMA
        tristate "Xilinx AXI DMAS Engine"
-       depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
+       depends on HAS_IOMEM
        select DMA_ENGINE
        help
          Enable support for Xilinx AXI VDMA Soft IP.
@@@ -299,6 -299,21 +299,6 @@@ void idxd_wqs_unmap_portal(struct idxd_
        }
  }
  
 -static void __idxd_wq_set_priv_locked(struct idxd_wq *wq, int priv)
 -{
 -      struct idxd_device *idxd = wq->idxd;
 -      union wqcfg wqcfg;
 -      unsigned int offset;
 -
 -      offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PRIVL_IDX);
 -      spin_lock(&idxd->dev_lock);
 -      wqcfg.bits[WQCFG_PRIVL_IDX] = ioread32(idxd->reg_base + offset);
 -      wqcfg.priv = priv;
 -      wq->wqcfg->bits[WQCFG_PRIVL_IDX] = wqcfg.bits[WQCFG_PRIVL_IDX];
 -      iowrite32(wqcfg.bits[WQCFG_PRIVL_IDX], idxd->reg_base + offset);
 -      spin_unlock(&idxd->dev_lock);
 -}
 -
  static void __idxd_wq_set_pasid_locked(struct idxd_wq *wq, int pasid)
  {
        struct idxd_device *idxd = wq->idxd;
@@@ -369,7 -384,9 +369,7 @@@ static void idxd_wq_disable_cleanup(str
        wq->threshold = 0;
        wq->priority = 0;
        wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
 -      clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
 -      clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
 -      clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
 +      wq->flags = 0;
        memset(wq->name, 0, WQ_NAME_SIZE);
        wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
        idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH);
@@@ -769,8 -786,6 +769,6 @@@ static int idxd_device_evl_setup(struc
                goto err_alloc;
        }
  
-       memset(addr, 0, size);
        spin_lock(&evl->lock);
        evl->log = addr;
        evl->dma = dma_addr;
@@@ -1406,14 -1421,15 +1404,14 @@@ int drv_enable_wq(struct idxd_wq *wq
        }
  
        /*
 -       * In the event that the WQ is configurable for pasid and priv bits.
 -       * For kernel wq, the driver should setup the pasid, pasid_en, and priv bit.
 -       * However, for non-kernel wq, the driver should only set the pasid_en bit for
 -       * shared wq. A dedicated wq that is not 'kernel' type will configure pasid and
 +       * In the event that the WQ is configurable for pasid, the driver
 +       * should setup the pasid, pasid_en bit. This is true for both kernel
 +       * and user shared workqueues. There is no need to setup priv bit in
 +       * that in-kernel DMA will also do user privileged requests.
 +       * A dedicated wq that is not 'kernel' type will configure pasid and
         * pasid_en later on so there is no need to setup.
         */
        if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) {
 -              int priv = 0;
 -
                if (wq_pasid_enabled(wq)) {
                        if (is_idxd_wq_kernel(wq) || wq_shared(wq)) {
                                u32 pasid = wq_dedicated(wq) ? idxd->pasid : 0;
                                __idxd_wq_set_pasid_locked(wq, pasid);
                        }
                }
 -
 -              if (is_idxd_wq_kernel(wq))
 -                      priv = 1;
 -              __idxd_wq_set_priv_locked(wq, priv);
        }
  
        rc = 0;
@@@ -1528,15 -1548,6 +1526,15 @@@ int idxd_device_drv_probe(struct idxd_d
        if (rc < 0)
                return -ENXIO;
  
 +      /*
 +       * System PASID is preserved across device disable/enable cycle, but
 +       * genconfig register content gets cleared during device reset. We
 +       * need to re-enable user interrupts for kernel work queue completion
 +       * IRQ to function.
 +       */
 +      if (idxd->pasid != IOMMU_PASID_INVALID)
 +              idxd_set_user_intr(idxd, 1);
 +
        rc = idxd_device_evl_setup(idxd);
        if (rc < 0) {
                idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
diff --combined drivers/dma/idxd/idxd.h
@@@ -473,15 -473,6 +473,15 @@@ static inline struct idxd_device *ie_to
        return container_of(ie, struct idxd_device, ie);
  }
  
 +static inline void idxd_set_user_intr(struct idxd_device *idxd, bool enable)
 +{
 +      union gencfg_reg reg;
 +
 +      reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
 +      reg.user_int_en = enable;
 +      iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
 +}
 +
  extern struct bus_type dsa_bus_type;
  
  extern bool support_enqcmd;
@@@ -660,8 -651,6 +660,6 @@@ int idxd_register_bus_type(void)
  void idxd_unregister_bus_type(void);
  int idxd_register_devices(struct idxd_device *idxd);
  void idxd_unregister_devices(struct idxd_device *idxd);
- int idxd_register_driver(void);
- void idxd_unregister_driver(void);
  void idxd_wqs_quiesce(struct idxd_device *idxd);
  bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
  void multi_u64_to_bmap(unsigned long *bmap, u64 *val, int count);
@@@ -673,8 -662,6 +671,6 @@@ void idxd_mask_error_interrupts(struct 
  void idxd_unmask_error_interrupts(struct idxd_device *idxd);
  
  /* device control */
- int idxd_register_idxd_drv(void);
- void idxd_unregister_idxd_drv(void);
  int idxd_device_drv_probe(struct idxd_dev *idxd_dev);
  void idxd_device_drv_remove(struct idxd_dev *idxd_dev);
  int drv_enable_wq(struct idxd_wq *wq);
@@@ -719,7 -706,6 +715,6 @@@ int idxd_enqcmds(struct idxd_wq *wq, vo
  /* dmaengine */
  int idxd_register_dma_device(struct idxd_device *idxd);
  void idxd_unregister_dma_device(struct idxd_device *idxd);
- void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
  void idxd_dma_complete_txd(struct idxd_desc *desc,
                           enum idxd_complete_type comp_type, bool free_desc);
  
diff --combined drivers/dma/idxd/sysfs.c
@@@ -948,6 -948,13 +948,6 @@@ static ssize_t wq_name_store(struct dev
        if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
                return -EINVAL;
  
 -      /*
 -       * This is temporarily placed here until we have SVM support for
 -       * dmaengine.
 -       */
 -      if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
 -              return -EOPNOTSUPP;
 -
        input = kstrndup(buf, count, GFP_KERNEL);
        if (!input)
                return -ENOMEM;
@@@ -1088,8 -1095,8 +1088,8 @@@ static ssize_t wq_ats_disable_store(str
        if (wq->state != IDXD_WQ_DISABLED)
                return -EPERM;
  
-       if (!idxd->hw.wq_cap.wq_ats_support)
-               return -EOPNOTSUPP;
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
  
        rc = kstrtobool(buf, &ats_dis);
        if (rc < 0)
@@@ -1124,8 -1131,8 +1124,8 @@@ static ssize_t wq_prs_disable_store(str
        if (wq->state != IDXD_WQ_DISABLED)
                return -EPERM;
  
-       if (!idxd->hw.wq_cap.wq_prs_support)
-               return -EOPNOTSUPP;
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
  
        rc = kstrtobool(buf, &prs_dis);
        if (rc < 0)
@@@ -1281,12 -1288,9 +1281,9 @@@ static struct attribute *idxd_wq_attrib
        NULL,
  };
  
- static bool idxd_wq_attr_op_config_invisible(struct attribute *attr,
-                                            struct idxd_device *idxd)
- {
-       return attr == &dev_attr_wq_op_config.attr &&
-              !idxd->hw.wq_cap.op_config;
- }
+ /*  A WQ attr is invisible if the feature is not supported in WQCAP. */
+ #define idxd_wq_attr_invisible(name, cap_field, a, idxd)              \
+       ((a) == &dev_attr_wq_##name.attr && !(idxd)->hw.wq_cap.cap_field)
  
  static bool idxd_wq_attr_max_batch_size_invisible(struct attribute *attr,
                                                  struct idxd_device *idxd)
               idxd->data->type == IDXD_TYPE_IAX;
  }
  
- static bool idxd_wq_attr_wq_prs_disable_invisible(struct attribute *attr,
-                                                 struct idxd_device *idxd)
- {
-       return attr == &dev_attr_wq_prs_disable.attr &&
-              !idxd->hw.wq_cap.wq_prs_support;
- }
  static umode_t idxd_wq_attr_visible(struct kobject *kobj,
                                    struct attribute *attr, int n)
  {
        struct idxd_wq *wq = confdev_to_wq(dev);
        struct idxd_device *idxd = wq->idxd;
  
-       if (idxd_wq_attr_op_config_invisible(attr, idxd))
+       if (idxd_wq_attr_invisible(op_config, op_config, attr, idxd))
                return 0;
  
        if (idxd_wq_attr_max_batch_size_invisible(attr, idxd))
                return 0;
  
-       if (idxd_wq_attr_wq_prs_disable_invisible(attr, idxd))
+       if (idxd_wq_attr_invisible(prs_disable, wq_prs_support, attr, idxd))
+               return 0;
+       if (idxd_wq_attr_invisible(ats_disable, wq_ats_support, attr, idxd))
                return 0;
  
        return attr->mode;
@@@ -1473,7 -1473,7 +1466,7 @@@ static ssize_t pasid_enabled_show(struc
  {
        struct idxd_device *idxd = confdev_to_idxd(dev);
  
-       return sysfs_emit(buf, "%u\n", device_pasid_enabled(idxd));
+       return sysfs_emit(buf, "%u\n", device_user_pasid_enabled(idxd));
  }
  static DEVICE_ATTR_RO(pasid_enabled);
  
@@@ -19,7 -19,6 +19,6 @@@ static irqreturn_t mcf_edma_tx_handler(
        struct fsl_edma_engine *mcf_edma = dev_id;
        struct edma_regs *regs = &mcf_edma->regs;
        unsigned int ch;
-       struct fsl_edma_chan *mcf_chan;
        u64 intmap;
  
        intmap = ioread32(regs->inth);
        for (ch = 0; ch < mcf_edma->n_chans; ch++) {
                if (intmap & BIT(ch)) {
                        iowrite8(EDMA_MASK_CH(ch), regs->cint);
-                       mcf_chan = &mcf_edma->chans[ch];
-                       spin_lock(&mcf_chan->vchan.lock);
-                       if (!mcf_chan->edesc) {
-                               /* terminate_all called before */
-                               spin_unlock(&mcf_chan->vchan.lock);
-                               continue;
-                       }
-                       if (!mcf_chan->edesc->iscyclic) {
-                               list_del(&mcf_chan->edesc->vdesc.node);
-                               vchan_cookie_complete(&mcf_chan->edesc->vdesc);
-                               mcf_chan->edesc = NULL;
-                               mcf_chan->status = DMA_COMPLETE;
-                               mcf_chan->idle = true;
-                       } else {
-                               vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
-                       }
-                       if (!mcf_chan->edesc)
-                               fsl_edma_xfer_desc(mcf_chan);
-                       spin_unlock(&mcf_chan->vchan.lock);
+                       fsl_edma_tx_chan_handler(&mcf_edma->chans[ch]);
                }
        }
  
@@@ -76,8 -51,7 +51,7 @@@ static irqreturn_t mcf_edma_err_handler
                if (err & BIT(ch)) {
                        fsl_edma_disable_request(&mcf_edma->chans[ch]);
                        iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
-                       mcf_edma->chans[ch].status = DMA_ERROR;
-                       mcf_edma->chans[ch].idle = true;
+                       fsl_edma_err_chan_handler(&mcf_edma->chans[ch]);
                }
        }
  
@@@ -172,7 -146,7 +146,7 @@@ static void mcf_edma_irq_free(struct pl
  }
  
  static struct fsl_edma_drvdata mcf_data = {
-       .version = v2,
+       .flags = FSL_EDMA_DRV_EDMA64,
        .setup_irq = mcf_edma_irq_init,
  };
  
@@@ -180,9 -154,8 +154,8 @@@ static int mcf_edma_probe(struct platfo
  {
        struct mcf_edma_platform_data *pdata;
        struct fsl_edma_engine *mcf_edma;
-       struct fsl_edma_chan *mcf_chan;
        struct edma_regs *regs;
-       int ret, i, len, chans;
+       int ret, i, chans;
  
        pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                return -EINVAL;
        }
  
 -      chans = pdata->dma_channels;
 +      if (!pdata->dma_channels) {
 +              dev_info(&pdev->dev, "setting default channel number to 64");
 +              chans = 64;
 +      } else {
 +              chans = pdata->dma_channels;
 +      }
 +
-       len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
-       mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+       mcf_edma = devm_kzalloc(&pdev->dev, struct_size(mcf_edma, chans, chans),
+                               GFP_KERNEL);
        if (!mcf_edma)
                return -ENOMEM;
  
        mcf_edma->drvdata = &mcf_data;
        mcf_edma->big_endian = 1;
  
 -      if (!mcf_edma->n_chans) {
 -              dev_info(&pdev->dev, "setting default channel number to 64");
 -              mcf_edma->n_chans = 64;
 -      }
 -
        mutex_init(&mcf_edma->fsl_edma_mutex);
  
        mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0);
                mcf_chan->dma_dir = DMA_NONE;
                mcf_chan->vchan.desc_free = fsl_edma_free_desc;
                vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
-               iowrite32(0x0, &regs->tcd[i].csr);
+               mcf_chan->tcd = mcf_edma->membase + EDMA_TCD
+                               + i * sizeof(struct fsl_edma_hw_tcd);
+               iowrite32(0x0, &mcf_chan->tcd->csr);
        }
  
        iowrite32(~0, regs->inth);
diff --combined drivers/dma/owl-dma.c
@@@ -20,8 -20,9 +20,9 @@@
  #include <linux/io.h>
  #include <linux/mm.h>
  #include <linux/module.h>
- #include <linux/of_device.h>
+ #include <linux/of.h>
  #include <linux/of_dma.h>
+ #include <linux/platform_device.h>
  #include <linux/slab.h>
  #include "virt-dma.h"
  
@@@ -192,7 -193,7 +193,7 @@@ struct owl_dma_pchan 
  };
  
  /**
 - * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
 + * struct owl_dma_vchan - Wrapper for DMA ENGINE channel
   * @vc: wrapped virtual channel
   * @pchan: the physical channel utilized by this channel
   * @txd: active transaction on this channel
@@@ -1116,7 -1117,7 +1117,7 @@@ static int owl_dma_probe(struct platfor
        dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n",
                 nr_channels, nr_requests);
  
-       od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev);
+       od->devid = (uintptr_t)of_device_get_match_data(&pdev->dev);
  
        od->nr_pchans = nr_channels;
        od->nr_vchans = nr_requests;