From: Marek Szyprowski Date: Thu, 6 Aug 2015 08:03:46 +0000 (+0200) Subject: fimc-is: Adapt to latest changes DMA-mapping/IOMMU frameworks X-Git-Tag: submit/tizen/20190329.020226~260 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=1936ca970a1587af214bceb0f87d50fa2b277931;p=platform%2Fkernel%2Flinux-exynos.git fimc-is: Adapt to latest changes DMA-mapping/IOMMU frameworks This patch is a temporary workaround. 31-bit DMA mask is used to prevent overlapping with the Cortex-A5 reserved range. FIMC-IS CPU uses 0xe0000000-0xffffffff address range for mapping peripheral devices, so those addresses must not be used for memory buffers. Change-Id: I74599b8037644bf7582043ac17a098d49b11193c Signed-off-by: Marek Szyprowski --- diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9d1cebe7f6cb..bff7445ed1b7 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -841,6 +841,22 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) return dma_addr == IOMMU_MAPPING_ERROR; } +int iommu_dma_reserve(struct device *dev, dma_addr_t addr, size_t size) +{ + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + unsigned long lo, hi; + + size = iova_align(iovad, size); + lo = iova_pfn(iovad, addr); + hi = iova_pfn(iovad, addr + size - 1); + if (!reserve_iova(iovad, lo, hi)) + return -EBUSY; + + return 0; +} + static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, phys_addr_t msi_addr, struct iommu_domain *domain) { diff --git a/drivers/media/platform/exynos/fimc-is/fimc-is-core.c b/drivers/media/platform/exynos/fimc-is/fimc-is-core.c index c6a829c110bf..b242f1c1a5fa 100644 --- a/drivers/media/platform/exynos/fimc-is/fimc-is-core.c +++ b/drivers/media/platform/exynos/fimc-is/fimc-is-core.c @@ -29,6 +29,8 @@ #include #include #include +#include +#include #include #include @@ -88,10 +90,48 @@ extern int fimc_is_3a1c_video_probe(void *data); /* sysfs global variable for debug */ struct fimc_is_sysfs_debug sysfs_debug; +int dma_alloc_coherent_at(struct device *dev, unsigned int size, void **kaddr, dma_addr_t dma_addr, gfp_t flags) +{ + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + struct sg_table sgt; + dma_addr_t alloc_dma_addr; + int ret; + + *kaddr = dma_alloc_coherent(dev, size, &alloc_dma_addr, flags); + + dev_info(dev, "Allocated %d buffer at %pad\n", size, &alloc_dma_addr); + + /* + * HW requires firmware to be mapped at the begging of its address + * space. IOMMU_DMA glue layer uses allocator, which assigns + * virtual addresses from the end of defined address space. + * There is no direct way to enforce different IOVA address for the + * allocated buffer, so as a workaround, the firmware buffer will + * be mapped second time at the beggining of the address space. + */ + + if (iommu_dma_reserve(dev, dma_addr, size)) + return -EBUSY; + dev_info(dev, "Reserved %d bytes at %pad\n", size, &dma_addr); + + ret = dma_get_sgtable(dev, &sgt, *kaddr, alloc_dma_addr, size); + + if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.nents, + IOMMU_READ | IOMMU_WRITE) != size) { + ret = -ENOMEM; + } + + dev_info(dev, "Remapped buffer to %pad address\n", &dma_addr); + + sg_free_table(&sgt); + + return ret; +} + static int fimc_is_ischain_allocmem(struct fimc_is_core *this) { struct device *dev = &this->pdev->dev; - /* int ret = 0; */ + int ret = 0; /* void *fw_cookie; */ size_t fw_size = #ifdef ENABLE_ODC @@ -147,10 +187,12 @@ exit: info("[COR] Device virtual for internal: %08x\n", this->minfo.kvaddr); this->minfo.fw_cookie = fw_cookie; #endif - this->minfo.kvaddr = dma_alloc_coherent(dev, fw_size, - &this->minfo.dvaddr, - GFP_KERNEL); - if (this->minfo.kvaddr == NULL) + + this->minfo.dvaddr = 0x10000000; + ret = dma_alloc_coherent_at(dev, fw_size, + &this->minfo.kvaddr, this->minfo.dvaddr, + GFP_KERNEL); + if (ret) return -ENOMEM; /* memset((void *)this->minfo.kvaddr, 0, fw_size); */ @@ -876,6 +918,9 @@ static int fimc_is_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, true); + /* TEMPORARY HACK */ + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(31); + /* init mutex for spi read */ mutex_init(&core->spi_lock); diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index e8ca5e654277..0791922940a4 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -71,6 +71,8 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, unsigned long attrs); int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); +int iommu_dma_reserve(struct device *dev, dma_addr_t addr, size_t size); + /* The DMA API isn't _quite_ the whole story, though... */ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg); void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);