page = pfn_to_page(pfn);
mapping = page_mapping(page);
if (mapping) {
+#ifndef CONFIG_SMP
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty)
__flush_dcache_page(mapping, page);
+#endif
if (cache_is_vivt())
make_coherent(mapping, vma, addr, pfn);
+ else if (vma->vm_flags & VM_EXEC)
+ __flush_icache_all();
}
}
__flush_dcache_page(mapping, page);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, page);
+ else if (mapping)
+ __flush_icache_all();
}
}
EXPORT_SYMBOL(flush_dcache_page);
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
+static inline void __flush_icache_all(void)
+{
+ asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
+ :
+ : "r" (0));
+}
+
#define ARCH_HAS_FLUSH_ANON_PAGE
static inline void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long vmaddr)
#ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id();
+#ifdef CONFIG_SMP
+ /* check for possible thread migration */
+ if (!cpus_empty(next->cpu_vm_mask) && !cpu_isset(cpu, next->cpu_vm_mask))
+ __flush_icache_all();
+#endif
if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
check_context(next);
cpu_switch_mm(next->pgd, next);