min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}
+static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
+{
+ if (!force_dma_unencrypted(dev))
+ return 0;
+ return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
+static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
+{
+ if (!force_dma_unencrypted(dev))
+ return 0;
+ return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
static void __dma_direct_free_pages(struct device *dev, struct page *page,
size_t size)
{
{
struct page *page;
void *ret;
- int err;
size = PAGE_ALIGN(size);
if (attrs & DMA_ATTR_NO_WARN)
__builtin_return_address(0));
if (!ret)
goto out_free_pages;
- if (force_dma_unencrypted(dev)) {
- err = set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size));
- if (err)
- goto out_free_pages;
- }
+ if (dma_set_decrypted(dev, ret, size))
+ goto out_free_pages;
memset(ret, 0, size);
goto done;
}
}
ret = page_address(page);
- if (force_dma_unencrypted(dev)) {
- err = set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size));
- if (err)
- goto out_free_pages;
- }
-
+ if (dma_set_decrypted(dev, ret, size))
+ goto out_free_pages;
memset(ret, 0, size);
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
return ret;
out_encrypt_pages:
- if (force_dma_unencrypted(dev)) {
- err = set_memory_encrypted((unsigned long)page_address(page),
- 1 << get_order(size));
- /* If memory cannot be re-encrypted, it must be leaked */
- if (err)
- return NULL;
- }
+ /* If memory cannot be re-encrypted, it must be leaked */
+ if (dma_set_encrypted(dev, page_address(page), size))
+ return NULL;
out_free_pages:
__dma_direct_free_pages(dev, page, size);
return NULL;
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;
- if (force_dma_unencrypted(dev))
- set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+ dma_set_encrypted(dev, cpu_addr, 1 << page_order);
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
vunmap(cpu_addr);
return NULL;
ret = page_address(page);
- if (force_dma_unencrypted(dev)) {
- if (set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size)))
- goto out_free_pages;
- }
+ if (dma_set_decrypted(dev, ret, size))
+ goto out_free_pages;
memset(ret, 0, size);
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return page;
dma_free_from_pool(dev, vaddr, size))
return;
- if (force_dma_unencrypted(dev))
- set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
-
+ dma_set_encrypted(dev, vaddr, 1 << page_order);
__dma_direct_free_pages(dev, page, size);
}