Merge tag 'dmaengine-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
[platform/kernel/linux-starfive.git] / drivers / dma / idxd / device.c
index 6fca8fa..5abbcc6 100644 (file)
@@ -752,6 +752,101 @@ void idxd_device_clear_state(struct idxd_device *idxd)
        spin_unlock(&idxd->dev_lock);
 }
 
+static int idxd_device_evl_setup(struct idxd_device *idxd)
+{
+       union gencfg_reg gencfg;
+       union evlcfg_reg evlcfg;
+       union genctrl_reg genctrl;
+       struct device *dev = &idxd->pdev->dev;
+       void *addr;
+       dma_addr_t dma_addr;
+       int size;
+       struct idxd_evl *evl = idxd->evl;
+       unsigned long *bmap;
+       int rc;
+
+       if (!evl)
+               return 0;
+
+       size = evl_size(idxd);
+
+       bmap = bitmap_zalloc(size, GFP_KERNEL);
+       if (!bmap) {
+               rc = -ENOMEM;
+               goto err_bmap;
+       }
+
+       /*
+        * Address needs to be page aligned. However, dma_alloc_coherent() provides
+        * at minimal page size aligned address. No manual alignment required.
+        */
+       addr = dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
+       if (!addr) {
+               rc = -ENOMEM;
+               goto err_alloc;
+       }
+
+       memset(addr, 0, size);
+
+       spin_lock(&evl->lock);
+       evl->log = addr;
+       evl->dma = dma_addr;
+       evl->log_size = size;
+       evl->bmap = bmap;
+
+       memset(&evlcfg, 0, sizeof(evlcfg));
+       evlcfg.bits[0] = dma_addr & GENMASK(63, 12);
+       evlcfg.size = evl->size;
+
+       iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET);
+       iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
+
+       genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+       genctrl.evl_int_en = 1;
+       iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+
+       gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+       gencfg.evl_en = 1;
+       iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+
+       spin_unlock(&evl->lock);
+       return 0;
+
+err_alloc:
+       bitmap_free(bmap);
+err_bmap:
+       return rc;
+}
+
+static void idxd_device_evl_free(struct idxd_device *idxd)
+{
+       union gencfg_reg gencfg;
+       union genctrl_reg genctrl;
+       struct device *dev = &idxd->pdev->dev;
+       struct idxd_evl *evl = idxd->evl;
+
+       gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+       if (!gencfg.evl_en)
+               return;
+
+       spin_lock(&evl->lock);
+       gencfg.evl_en = 0;
+       iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+
+       genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+       genctrl.evl_int_en = 0;
+       iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+
+       iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
+       iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
+
+       dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
+       bitmap_free(evl->bmap);
+       evl->log = NULL;
+       evl->size = IDXD_EVL_SIZE_MIN;
+       spin_unlock(&evl->lock);
+}
+
 static void idxd_group_config_write(struct idxd_group *group)
 {
        struct idxd_device *idxd = group->idxd;
@@ -872,12 +967,16 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
        wq->wqcfg->priority = wq->priority;
 
        if (idxd->hw.gen_cap.block_on_fault &&
-           test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
+           test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags) &&
+           !test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags))
                wq->wqcfg->bof = 1;
 
        if (idxd->hw.wq_cap.wq_ats_support)
                wq->wqcfg->wq_ats_disable = test_bit(WQ_FLAG_ATS_DISABLE, &wq->flags);
 
+       if (idxd->hw.wq_cap.wq_prs_support)
+               wq->wqcfg->wq_prs_disable = test_bit(WQ_FLAG_PRS_DISABLE, &wq->flags);
+
        /* bytes 12-15 */
        wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
        idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size));
@@ -1451,15 +1550,24 @@ int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
        if (rc < 0)
                return -ENXIO;
 
+       rc = idxd_device_evl_setup(idxd);
+       if (rc < 0) {
+               idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
+               return rc;
+       }
+
        /* Start device */
        rc = idxd_device_enable(idxd);
-       if (rc < 0)
+       if (rc < 0) {
+               idxd_device_evl_free(idxd);
                return rc;
+       }
 
        /* Setup DMA device without channels */
        rc = idxd_register_dma_device(idxd);
        if (rc < 0) {
                idxd_device_disable(idxd);
+               idxd_device_evl_free(idxd);
                idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
                return rc;
        }
@@ -1488,6 +1596,7 @@ void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
        idxd_device_disable(idxd);
        if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
                idxd_device_reset(idxd);
+       idxd_device_evl_free(idxd);
 }
 
 static enum idxd_dev_type dev_types[] = {