1 /* arch/sparc64/mm/tlb.c
3 * Copyright (C) 2004 David S. Miller <davem@redhat.com>
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/cacheflush.h>
17 #include <asm/mmu_context.h>
20 /* Heavily inspired by the ppc64 code. */
22 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
24 void flush_tlb_pending(void)
26 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
31 if (CTX_VALID(tb->mm->context)) {
33 smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
36 __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
37 tb->tlb_nr, &tb->vaddrs[0]);
43 put_cpu_var(tlb_batch);
46 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
49 struct tlb_batch *tb = &get_cpu_var(tlb_batch);
58 if (unlikely(nr != 0 && mm != tb->mm)) {
66 tb->vaddrs[nr] = vaddr;
68 if (nr >= TLB_BATCH_NR)
71 put_cpu_var(tlb_batch);
74 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
75 pte_t *ptep, pte_t orig, int fullmm)
77 if (tlb_type != hypervisor &&
79 unsigned long paddr, pfn = pte_pfn(orig);
80 struct address_space *mapping;
86 page = pfn_to_page(pfn);
87 if (PageReserved(page))
90 /* A real file page? */
91 mapping = page_mapping(page);
95 paddr = (unsigned long) page_address(page);
96 if ((paddr ^ vaddr) & (1 << 13))
97 flush_dcache_page_all(mm, page);
102 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
105 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
106 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
107 pmd_t pmd, bool exec)
112 pte = pte_offset_map(&pmd, vaddr);
113 end = vaddr + HPAGE_SIZE;
114 while (vaddr < end) {
115 if (pte_val(*pte) & _PAGE_VALID)
116 tlb_batch_add_one(mm, vaddr, exec);
123 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
124 pmd_t *pmdp, pmd_t pmd)
133 if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
134 if (pmd_val(pmd) & PMD_ISHUGE)
135 mm->context.huge_pte_count++;
137 mm->context.huge_pte_count--;
139 /* Do not try to allocate the TSB hash table if we
140 * don't have one already. We have various locks held
141 * and thus we'll end up doing a GFP_KERNEL allocation
142 * in an atomic context.
144 * Instead, we let the first TLB miss on a hugepage
149 if (!pmd_none(orig)) {
150 bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
153 if (pmd_val(orig) & PMD_ISHUGE)
154 tlb_batch_add_one(mm, addr, exec);
156 tlb_batch_pmd_scan(mm, addr, orig, exec);
160 void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
162 struct list_head *lh = (struct list_head *) pgtable;
164 assert_spin_locked(&mm->page_table_lock);
167 if (!mm->pmd_huge_pte)
170 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
171 mm->pmd_huge_pte = pgtable;
174 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
176 struct list_head *lh;
179 assert_spin_locked(&mm->page_table_lock);
182 pgtable = mm->pmd_huge_pte;
183 lh = (struct list_head *) pgtable;
185 mm->pmd_huge_pte = NULL;
187 mm->pmd_huge_pte = (pgtable_t) lh->next;
190 pte_val(pgtable[0]) = 0;
191 pte_val(pgtable[1]) = 0;
195 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */