}
}
-#ifdef CONFIG_MMU
-bool platform_vaddr_cached(const void *p)
-{
- unsigned long addr = (unsigned long)p;
-
- return addr >= XCHAL_KSEG_CACHED_VADDR &&
- addr - XCHAL_KSEG_CACHED_VADDR < XCHAL_KSEG_SIZE;
-}
-
-bool platform_vaddr_uncached(const void *p)
-{
- unsigned long addr = (unsigned long)p;
-
- return addr >= XCHAL_KSEG_BYPASS_VADDR &&
- addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE;
-}
-
-void *platform_vaddr_to_uncached(void *p)
-{
- return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
-}
-
-void *platform_vaddr_to_cached(void *p)
-{
- return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
-}
-#else
-bool __attribute__((weak)) platform_vaddr_cached(const void *p)
-{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return true;
-}
-
-bool __attribute__((weak)) platform_vaddr_uncached(const void *p)
-{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return false;
-}
-
-void __attribute__((weak)) *platform_vaddr_to_uncached(void *p)
+void arch_dma_prep_coherent(struct page *page, size_t size)
{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return p;
-}
-
-void __attribute__((weak)) *platform_vaddr_to_cached(void *p)
-{
- WARN_ONCE(1, "Default %s implementation is used\n", __func__);
- return p;
+ __invalidate_dcache_range((unsigned long)page_address(page), size);
}
-#endif
/*
- * Note: We assume that the full memory space is always mapped to 'kseg'
- * Otherwise we have to use page attributes (not implemented).
+ * Memory caching is platform-dependent in noMMU xtensa configurations.
+ * The following two functions should be implemented in platform code
+ * in order to enable coherent DMA memory operations when CONFIG_MMU is not
+ * enabled.
*/
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t flag, unsigned long attrs)
-{
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct page *page = NULL;
-
- /* ignore region speicifiers */
-
- flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
-
- if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
- flag |= GFP_DMA;
-
- if (gfpflags_allow_blocking(flag))
- page = dma_alloc_from_contiguous(dev, count, get_order(size),
- flag & __GFP_NOWARN);
-
- if (!page)
- page = alloc_pages(flag | __GFP_ZERO, get_order(size));
-
- if (!page)
- return NULL;
-
- *handle = phys_to_dma(dev, page_to_phys(page));
-
#ifdef CONFIG_MMU
- if (PageHighMem(page)) {
- void *p;
-
- p = dma_common_contiguous_remap(page, size,
- pgprot_noncached(PAGE_KERNEL),
- __builtin_return_address(0));
- if (!p) {
- if (!dma_release_from_contiguous(dev, page, count))
- __free_pages(page, get_order(size));
- }
- return p;
- }
-#endif
- BUG_ON(!platform_vaddr_cached(page_address(page)));
- __invalidate_dcache_range((unsigned long)page_address(page), size);
- return platform_vaddr_to_uncached(page_address(page));
+void *uncached_kernel_address(void *p)
+{
+ return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
}
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
+void *cached_kernel_address(void *p)
{
- unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct page *page;
-
- if (platform_vaddr_uncached(vaddr)) {
- page = virt_to_page(platform_vaddr_to_cached(vaddr));
- } else {
-#ifdef CONFIG_MMU
- dma_common_free_remap(vaddr, size);
-#endif
- page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
- }
-
- if (!dma_release_from_contiguous(dev, page, count))
- __free_pages(page, get_order(size));
+ return p + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
}
+#endif /* CONFIG_MMU */