sh: Kill off now redundant local irq disabling.
authorPaul Mundt <lethal@linux-sh.org>
Fri, 21 Aug 2009 09:21:07 +0000 (18:21 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Fri, 21 Aug 2009 09:21:07 +0000 (18:21 +0900)
on_each_cpu() takes care of IRQ and preempt handling, the localized
handling in each of the called functions can be killed off.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/mm/cache-sh2a.c
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache-sh5.c
arch/sh/mm/cache-sh7705.c

index 975899d..d783361 100644 (file)
@@ -102,12 +102,10 @@ static void sh2a_flush_icache_range(void *args)
        struct flusher_data *data = args;
        unsigned long start, end;
        unsigned long v;
-       unsigned long flags;
 
        start = data->addr1 & ~(L1_CACHE_BYTES-1);
        end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1);
 
-       local_irq_save(flags);
        jump_to_uncached();
 
        for (v = start; v < end; v+=L1_CACHE_BYTES) {
@@ -122,12 +120,10 @@ static void sh2a_flush_icache_range(void *args)
                        }
                }
                /* I-Cache invalidate */
-               ctrl_outl(addr,
-                         CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008);
+               ctrl_outl(addr, CACHE_IC_ADDRESS_ARRAY | addr | 0x00000008);
        }
 
        back_to_cached();
-       local_irq_restore(flags);
 }
 
 void __init sh2a_cache_init(void)
index 9201b37..e3b77f0 100644 (file)
@@ -48,48 +48,44 @@ static void sh4_flush_icache_range(void *args)
        struct flusher_data *data = args;
        int icacheaddr;
        unsigned long start, end;
-       unsigned long flags, v;
+       unsigned long v;
        int i;
 
        start = data->addr1;
        end = data->addr2;
 
-       /* If there are too many pages then just blow the caches */
-        if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
-                local_flush_cache_all(args);
-       } else {
-               /* selectively flush d-cache then invalidate the i-cache */
-               /* this is inefficient, so only use for small ranges */
-               start &= ~(L1_CACHE_BYTES-1);
-               end += L1_CACHE_BYTES-1;
-               end &= ~(L1_CACHE_BYTES-1);
-
-               local_irq_save(flags);
-               jump_to_uncached();
-
-               for (v = start; v < end; v+=L1_CACHE_BYTES) {
-                       asm volatile("ocbwb     %0"
-                                    : /* no output */
-                                    : "m" (__m(v)));
-
-                       icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
-                                       v & cpu_data->icache.entry_mask);
-
-                       for (i = 0; i < cpu_data->icache.ways;
-                               i++, icacheaddr += cpu_data->icache.way_incr)
-                                       /* Clear i-cache line valid-bit */
-                                       ctrl_outl(0, icacheaddr);
-               }
+       /* If there are too many pages then just blow the caches */
+       if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
+               local_flush_cache_all(args);
+       } else {
+               /* selectively flush d-cache then invalidate the i-cache */
+               /* this is inefficient, so only use for small ranges */
+               start &= ~(L1_CACHE_BYTES-1);
+               end += L1_CACHE_BYTES-1;
+               end &= ~(L1_CACHE_BYTES-1);
+
+               jump_to_uncached();
+
+               for (v = start; v < end; v+=L1_CACHE_BYTES) {
+                       __ocbwb(v);
+
+                       icacheaddr = CACHE_IC_ADDRESS_ARRAY |
+                               (v & cpu_data->icache.entry_mask);
+
+                       for (i = 0; i < cpu_data->icache.ways;
+                               i++, icacheaddr += cpu_data->icache.way_incr)
+                               /* Clear i-cache line valid-bit */
+                               ctrl_outl(0, icacheaddr);
+               }
 
                back_to_cached();
-               local_irq_restore(flags);
        }
 }
 
 static inline void flush_cache_4096(unsigned long start,
                                    unsigned long phys)
 {
-       unsigned long flags, exec_offset = 0;
+       unsigned long exec_offset = 0;
 
        /*
         * All types of SH-4 require PC to be in P2 to operate on the I-cache.
@@ -99,10 +95,8 @@ static inline void flush_cache_4096(unsigned long start,
            (start < CACHE_OC_ADDRESS_ARRAY))
                exec_offset = 0x20000000;
 
-       local_irq_save(flags);
        __flush_cache_4096(start | SH_CACHE_ASSOC,
                           P1SEGADDR(phys), exec_offset);
-       local_irq_restore(flags);
 }
 
 /*
@@ -135,9 +129,8 @@ static void sh4_flush_dcache_page(void *page)
 /* TODO: Selective icache invalidation through IC address array.. */
 static void __uses_jump_to_uncached flush_icache_all(void)
 {
-       unsigned long flags, ccr;
+       unsigned long ccr;
 
-       local_irq_save(flags);
        jump_to_uncached();
 
        /* Flush I-cache */
@@ -149,9 +142,7 @@ static void __uses_jump_to_uncached flush_icache_all(void)
         * back_to_cached() will take care of the barrier for us, don't add
         * another one!
         */
-
        back_to_cached();
-       local_irq_restore(flags);
 }
 
 static inline void flush_dcache_all(void)
index 467ff8e..2f9dd6d 100644 (file)
@@ -34,28 +34,22 @@ static inline void
 sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
                           unsigned long paddr)
 {
-       local_irq_disable();
        sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
 }
 
 static inline void sh64_teardown_dtlb_cache_slot(void)
 {
        sh64_teardown_tlb_slot(dtlb_cache_slot);
-       local_irq_enable();
 }
 
 static inline void sh64_icache_inv_all(void)
 {
        unsigned long long addr, flag, data;
-       unsigned long flags;
 
        addr = ICCR0;
        flag = ICCR0_ICI;
        data = 0;
 
-       /* Make this a critical section for safety (probably not strictly necessary.) */
-       local_irq_save(flags);
-
        /* Without %1 it gets unexplicably wrong */
        __asm__ __volatile__ (
                "getcfg %3, 0, %0\n\t"
@@ -64,8 +58,6 @@ static inline void sh64_icache_inv_all(void)
                "synci"
                : "=&r" (data)
                : "0" (data), "r" (flag), "r" (addr));
-
-       local_irq_restore(flags);
 }
 
 static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
@@ -90,7 +82,6 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
           Also, eaddr is page-aligned. */
        unsigned int cpu = smp_processor_id();
        unsigned long long addr, end_addr;
-       unsigned long flags = 0;
        unsigned long running_asid, vma_asid;
        addr = eaddr;
        end_addr = addr + PAGE_SIZE;
@@ -111,10 +102,9 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
 
        running_asid = get_asid();
        vma_asid = cpu_asid(cpu, vma->vm_mm);
-       if (running_asid != vma_asid) {
-               local_irq_save(flags);
+       if (running_asid != vma_asid)
                switch_and_save_asid(vma_asid);
-       }
+
        while (addr < end_addr) {
                /* Worth unrolling a little */
                __asm__ __volatile__("icbi %0,  0" : : "r" (addr));
@@ -123,10 +113,9 @@ static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long
                __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
                addr += 128;
        }
-       if (running_asid != vma_asid) {
+
+       if (running_asid != vma_asid)
                switch_and_save_asid(running_asid);
-               local_irq_restore(flags);
-       }
 }
 
 static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
@@ -159,16 +148,12 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
                unsigned long eaddr;
                unsigned long after_last_page_start;
                unsigned long mm_asid, current_asid;
-               unsigned long flags = 0;
 
                mm_asid = cpu_asid(smp_processor_id(), mm);
                current_asid = get_asid();
 
-               if (mm_asid != current_asid) {
-                       /* Switch ASID and run the invalidate loop under cli */
-                       local_irq_save(flags);
+               if (mm_asid != current_asid)
                        switch_and_save_asid(mm_asid);
-               }
 
                aligned_start = start & PAGE_MASK;
                after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
@@ -194,10 +179,8 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
                        aligned_start = vma->vm_end; /* Skip to start of next region */
                }
 
-               if (mm_asid != current_asid) {
+               if (mm_asid != current_asid)
                        switch_and_save_asid(current_asid);
-                       local_irq_restore(flags);
-               }
        }
 }
 
index 6293f57..9dc3866 100644 (file)
@@ -81,7 +81,6 @@ static void sh7705_flush_icache_range(void *args)
 static void __flush_dcache_page(unsigned long phys)
 {
        unsigned long ways, waysize, addrstart;
-       unsigned long flags;
 
        phys |= SH_CACHE_VALID;
 
@@ -98,7 +97,6 @@ static void __flush_dcache_page(unsigned long phys)
         * potential cache aliasing, therefore the optimisation is probably not
         * possible.
         */
-       local_irq_save(flags);
        jump_to_uncached();
 
        ways = current_cpu_data.dcache.ways;
@@ -126,7 +124,6 @@ static void __flush_dcache_page(unsigned long phys)
        } while (--ways);
 
        back_to_cached();
-       local_irq_restore(flags);
 }
 
 /*
@@ -145,14 +142,9 @@ static void sh7705_flush_dcache_page(void *page)
 
 static void sh7705_flush_cache_all(void *args)
 {
-       unsigned long flags;
-
-       local_irq_save(flags);
        jump_to_uncached();
-
        cache_wback_all();
        back_to_cached();
-       local_irq_restore(flags);
 }
 
 /*