#endif
}
+static void r4k__flush_cache_vmap(void)
+{
+ r4k_blast_dcache();
+}
+
+static void r4k__flush_cache_vunmap(void)
+{
+ r4k_blast_dcache();
+}
+
static inline void local_r4k_flush_cache_range(void * args)
{
struct vm_area_struct *vma = args;
PAGE_SIZE - 1);
else
shm_align_mask = PAGE_SIZE-1;
+
+ __flush_cache_vmap = r4k__flush_cache_vmap;
+ __flush_cache_vunmap = r4k__flush_cache_vunmap;
+
flush_cache_all = cache_noop;
__flush_cache_all = r4k___flush_cache_all;
flush_cache_mm = r4k_flush_cache_mm;
local_irq_restore(flags);
}
+static void tx39__flush_cache_vmap(void)
+{
+ tx39_blast_dcache();
+}
+
+static void tx39__flush_cache_vunmap(void)
+{
+ tx39_blast_dcache();
+}
+
static inline void tx39_flush_cache_all(void)
{
if (!cpu_has_dc_aliases)
switch (current_cpu_type()) {
case CPU_TX3912:
/* TX39/H core (writethru direct-map cache) */
+ __flush_cache_vmap = tx39__flush_cache_vmap;
+ __flush_cache_vunmap = tx39__flush_cache_vunmap;
flush_cache_all = tx39h_flush_icache_all;
__flush_cache_all = tx39h_flush_icache_all;
flush_cache_mm = (void *) tx39h_flush_icache_all;
write_c0_wired(0); /* set 8 on reset... */
/* board-dependent init code may set WBON */
+ __flush_cache_vmap = tx39__flush_cache_vmap;
+ __flush_cache_vunmap = tx39__flush_cache_vunmap;
+
flush_cache_all = tx39_flush_cache_all;
__flush_cache_all = tx39___flush_cache_all;
flush_cache_mm = tx39_flush_cache_mm;
unsigned long pfn);
void (*flush_icache_range)(unsigned long start, unsigned long end);
+void (*__flush_cache_vmap)(void);
+void (*__flush_cache_vunmap)(void);
+
/* MIPS specific cache operations */
void (*flush_cache_sigtramp)(unsigned long addr);
void (*local_flush_data_cache_page)(void * addr);
}
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
-#define flush_cache_vmap(start, end) flush_cache_all()
-#define flush_cache_vunmap(start, end) flush_cache_all()
+
+extern void (*__flush_cache_vmap)(void);
+
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ if (cpu_has_dc_aliases)
+ __flush_cache_vmap();
+}
+
+extern void (*__flush_cache_vunmap)(void);
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ if (cpu_has_dc_aliases)
+ __flush_cache_vunmap();
+}
extern void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,