Merge branch 'sh/stable-updates'
authorPaul Mundt <lethal@linux-sh.org>
Fri, 16 Oct 2009 06:14:50 +0000 (15:14 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Fri, 16 Oct 2009 06:14:50 +0000 (15:14 +0900)
Conflicts:
arch/sh/mm/cache-sh4.c

1  2 
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache.c

   * flushing. Anything exceeding this will simply flush the dcache in its
   * entirety.
   */
 -#define MAX_DCACHE_PAGES      64      /* XXX: Tune for ways */
  #define MAX_ICACHE_PAGES      32
  
- static void __flush_cache_4096(unsigned long addr, unsigned long phys,
+ static void __flush_cache_one(unsigned long addr, unsigned long phys,
                               unsigned long exec_offset);
  
  /*
@@@ -93,11 -99,10 +92,11 @@@ static inline void flush_cache_one(unsi
         */
        if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
            (start < CACHE_OC_ADDRESS_ARRAY))
 -              exec_offset = 0x20000000;
 +              exec_offset = cached_to_uncached;
  
        local_irq_save(flags);
-       __flush_cache_4096(start | SH_CACHE_ASSOC,
-                          virt_to_phys(phys), exec_offset);
 -      __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset);
++      __flush_cache_one(start | SH_CACHE_ASSOC,
++                        virt_to_phys(phys), exec_offset);
        local_irq_restore(flags);
  }
  
@@@ -121,9 -126,9 +120,9 @@@ static void sh4_flush_dcache_page(void 
                int i, n;
  
                /* Loop all the D-cache */
-               n = boot_cpu_data.dcache.way_incr >> 12;
-               for (i = 0; i < n; i++, addr += 4096)
-                       flush_cache_4096(addr, phys);
+               n = boot_cpu_data.dcache.n_aliases;
 -              for (i = 0; i < n; i++, addr += PAGE_SIZE)
++              for (i = 0; i <= n; i++, addr += PAGE_SIZE)
+                       flush_cache_one(addr, phys);
        }
  
        wmb();
@@@ -210,64 -300,44 +209,63 @@@ static void sh4_flush_cache_page(void *
  {
        struct flusher_data *data = args;
        struct vm_area_struct *vma;
 +      struct page *page;
        unsigned long address, pfn, phys;
 -      unsigned int alias_mask;
 +      int map_coherent = 0;
 +      pgd_t *pgd;
 +      pud_t *pud;
 +      pmd_t *pmd;
 +      pte_t *pte;
 +      void *vaddr;
  
        vma = data->vma;
--      address = data->addr1;
++      address = data->addr1 & PAGE_MASK;
        pfn = data->addr2;
        phys = pfn << PAGE_SHIFT;
 +      page = pfn_to_page(pfn);
  
        if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
                return;
  
-       address &= PAGE_MASK;
 -      alias_mask = boot_cpu_data.dcache.alias_mask;
 -
 -      /* We only need to flush D-cache when we have alias */
 -      if ((address^phys) & alias_mask) {
 -              /* Loop 4K of the D-cache */
 -              flush_cache_one(
 -                      CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
 -                      phys);
 -              /* Loop another 4K of the D-cache */
 -              flush_cache_one(
 -                      CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
 -                      phys);
 -      }
 +      pgd = pgd_offset(vma->vm_mm, address);
 +      pud = pud_offset(pgd, address);
 +      pmd = pmd_offset(pud, address);
 +      pte = pte_offset_kernel(pmd, address);
 +
 +      /* If the page isn't present, there is nothing to do here. */
 +      if (!(pte_val(*pte) & _PAGE_PRESENT))
 +              return;
  
 -      alias_mask = boot_cpu_data.icache.alias_mask;
 -      if (vma->vm_flags & VM_EXEC) {
 +      if ((vma->vm_mm == current->active_mm))
 +              vaddr = NULL;
 +      else {
                /*
 -               * Evict entries from the portion of the cache from which code
 -               * may have been executed at this address (virtual).  There's
 -               * no need to evict from the portion corresponding to the
 -               * physical address as for the D-cache, because we know the
 -               * kernel has never executed the code through its identity
 -               * translation.
 +               * Use kmap_coherent or kmap_atomic to do flushes for
 +               * another ASID than the current one.
                 */
 -              flush_cache_one(
 -                      CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
 -                      phys);
 +              map_coherent = (current_cpu_data.dcache.n_aliases &&
 +                      !test_bit(PG_dcache_dirty, &page->flags) &&
 +                      page_mapped(page));
 +              if (map_coherent)
 +                      vaddr = kmap_coherent(page, address);
 +              else
 +                      vaddr = kmap_atomic(page, KM_USER0);
 +
 +              address = (unsigned long)vaddr;
 +      }
 +
 +      if (pages_do_alias(address, phys))
-               flush_cache_4096(CACHE_OC_ADDRESS_ARRAY |
++              flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
 +                      (address & shm_align_mask), phys);
 +
 +      if (vma->vm_flags & VM_EXEC)
 +              flush_icache_all();
 +
 +      if (vaddr) {
 +              if (map_coherent)
 +                      kunmap_coherent(vaddr);
 +              else
 +                      kunmap_atomic(vaddr, KM_USER0);
        }
  }
  
Simple merge