return dma_addr == IOMMU_MAPPING_ERROR;
}
+int iommu_dma_reserve(struct device *dev, dma_addr_t addr, size_t size)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
+ unsigned long lo, hi;
+
+ size = iova_align(iovad, size);
+ lo = iova_pfn(iovad, addr);
+ hi = iova_pfn(iovad, addr + size - 1);
+ if (!reserve_iova(iovad, lo, hi))
+ return -EBUSY;
+
+ return 0;
+}
+
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
phys_addr_t msi_addr, struct iommu_domain *domain)
{
#include <linux/bug.h>
#include <linux/v4l2-mediabus.h>
#include <linux/gpio.h>
+#include <linux/dma-iommu.h>
+#include <linux/iommu.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
/* sysfs global variable for debug */
struct fimc_is_sysfs_debug sysfs_debug;
+int dma_alloc_coherent_at(struct device *dev, unsigned int size, void **kaddr, dma_addr_t dma_addr, gfp_t flags)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct sg_table sgt;
+ dma_addr_t alloc_dma_addr;
+ int ret;
+
+ *kaddr = dma_alloc_coherent(dev, size, &alloc_dma_addr, flags);
+
+ dev_info(dev, "Allocated %d buffer at %pad\n", size, &alloc_dma_addr);
+
+ /*
+ * HW requires firmware to be mapped at the begging of its address
+ * space. IOMMU_DMA glue layer uses allocator, which assigns
+ * virtual addresses from the end of defined address space.
+ * There is no direct way to enforce different IOVA address for the
+ * allocated buffer, so as a workaround, the firmware buffer will
+ * be mapped second time at the beggining of the address space.
+ */
+
+ if (iommu_dma_reserve(dev, dma_addr, size))
+ return -EBUSY;
+ dev_info(dev, "Reserved %d bytes at %pad\n", size, &dma_addr);
+
+ ret = dma_get_sgtable(dev, &sgt, *kaddr, alloc_dma_addr, size);
+
+ if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.nents,
+ IOMMU_READ | IOMMU_WRITE) != size) {
+ ret = -ENOMEM;
+ }
+
+ dev_info(dev, "Remapped buffer to %pad address\n", &dma_addr);
+
+ sg_free_table(&sgt);
+
+ return ret;
+}
+
static int fimc_is_ischain_allocmem(struct fimc_is_core *this)
{
struct device *dev = &this->pdev->dev;
- /* int ret = 0; */
+ int ret = 0;
/* void *fw_cookie; */
size_t fw_size =
#ifdef ENABLE_ODC
info("[COR] Device virtual for internal: %08x\n", this->minfo.kvaddr);
this->minfo.fw_cookie = fw_cookie;
#endif
- this->minfo.kvaddr = dma_alloc_coherent(dev, fw_size,
- &this->minfo.dvaddr,
- GFP_KERNEL);
- if (this->minfo.kvaddr == NULL)
+
+ this->minfo.dvaddr = 0x10000000;
+ ret = dma_alloc_coherent_at(dev, fw_size,
+ &this->minfo.kvaddr, this->minfo.dvaddr,
+ GFP_KERNEL);
+ if (ret)
return -ENOMEM;
/* memset((void *)this->minfo.kvaddr, 0, fw_size); */
device_init_wakeup(&pdev->dev, true);
+ /* TEMPORARY HACK */
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(31);
+
/* init mutex for spi read */
mutex_init(&core->spi_lock);