void cpu_cache_init(void);
+static inline void *sh_cacheop_vaddr(void *vaddr)
+{
+ if (__in_29bit_mode())
+ vaddr = (void *)CAC_ADDR((unsigned long)vaddr);
+ return vaddr;
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */
void sh_sync_dma_for_device(void *vaddr, size_t size,
enum dma_data_direction direction)
{
- void *addr;
-
- addr = __in_29bit_mode() ?
- (void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
+ void *addr = sh_cacheop_vaddr(vaddr);
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
BUG();
}
}
-EXPORT_SYMBOL(sh_sync_dma_for_device);
static int __init memchunk_setup(char *str)
{
mutex_unlock(&maple_wlist_lock);
if (maple_packets > 0) {
for (i = 0; i < (1 << MAPLE_DMA_PAGES); i++)
- sh_sync_dma_for_device(maple_sendbuf + i * PAGE_SIZE,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __flush_purge_region(maple_sendbuf + i * PAGE_SIZE,
+ PAGE_SIZE);
}
finish:
list_for_each_entry_safe(mq, nmq, &maple_sentq, list) {
mdev = mq->dev;
recvbuf = mq->recvbuf->buf;
- sh_sync_dma_for_device(recvbuf, 0x400, DMA_FROM_DEVICE);
+ __flush_invalidate_region(sh_cacheop_vaddr(recvbuf),
+ 0x400);
code = recvbuf[0];
kfree(mq->sendbuf);
list_del_init(&mq->list);