3b91c981a27fbcd275230b4f1329d33f10e47473
[platform/kernel/linux-rpi.git] / arch / x86 / mm / tlb.c
1 #include <linux/init.h>
2
3 #include <linux/mm.h>
4 #include <linux/spinlock.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
8 #include <linux/cpu.h>
9
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/cache.h>
13 #include <asm/apic.h>
14 #include <asm/uv/uv.h>
15
16 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
17                         = { &init_mm, 0, };
18
19 /*
20  *      Smarter SMP flushing macros.
21  *              c/o Linus Torvalds.
22  *
23  *      These mean you can really definitely utterly forget about
24  *      writing to user space from interrupts. (Its not allowed anyway).
25  *
26  *      Optimizations Manfred Spraul <manfred@colorfullife.com>
27  *
28  *      More scalable flush, from Andi Kleen
29  *
30  *      To avoid global state use 8 different call vectors.
31  *      Each CPU uses a specific vector to trigger flushes on other
32  *      CPUs. Depending on the received vector the target CPUs look into
33  *      the right array slot for the flush data.
34  *
35  *      With more than 8 CPUs they are hashed to the 8 available
36  *      vectors. The limited global vector space forces us to this right now.
37  *      In future when interrupts are split into per CPU domains this could be
38  *      fixed, at the cost of triggering multiple IPIs in some cases.
39  */
40
41 union smp_flush_state {
42         struct {
43                 struct mm_struct *flush_mm;
44                 unsigned long flush_start;
45                 unsigned long flush_end;
46                 raw_spinlock_t tlbstate_lock;
47                 DECLARE_BITMAP(flush_cpumask, NR_CPUS);
48         };
49         char pad[INTERNODE_CACHE_BYTES];
50 } ____cacheline_internodealigned_in_smp;
51
52 /* State is put into the per CPU data section, but padded
53    to a full cache line because other CPUs can access it and we don't
54    want false sharing in the per cpu data segment. */
55 static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
56
57 static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
58
59 /*
60  * We cannot call mmdrop() because we are in interrupt context,
61  * instead update mm->cpu_vm_mask.
62  */
63 void leave_mm(int cpu)
64 {
65         struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
66         if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
67                 BUG();
68         if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
69                 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
70                 load_cr3(swapper_pg_dir);
71         }
72 }
73 EXPORT_SYMBOL_GPL(leave_mm);
74
75 /*
76  *
77  * The flush IPI assumes that a thread switch happens in this order:
78  * [cpu0: the cpu that switches]
79  * 1) switch_mm() either 1a) or 1b)
80  * 1a) thread switch to a different mm
81  * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
82  *      Stop ipi delivery for the old mm. This is not synchronized with
83  *      the other cpus, but smp_invalidate_interrupt ignore flush ipis
84  *      for the wrong mm, and in the worst case we perform a superfluous
85  *      tlb flush.
86  * 1a2) set cpu mmu_state to TLBSTATE_OK
87  *      Now the smp_invalidate_interrupt won't call leave_mm if cpu0
88  *      was in lazy tlb mode.
89  * 1a3) update cpu active_mm
90  *      Now cpu0 accepts tlb flushes for the new mm.
91  * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
92  *      Now the other cpus will send tlb flush ipis.
93  * 1a4) change cr3.
94  * 1b) thread switch without mm change
95  *      cpu active_mm is correct, cpu0 already handles
96  *      flush ipis.
97  * 1b1) set cpu mmu_state to TLBSTATE_OK
98  * 1b2) test_and_set the cpu bit in cpu_vm_mask.
99  *      Atomically set the bit [other cpus will start sending flush ipis],
100  *      and test the bit.
101  * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
102  * 2) switch %%esp, ie current
103  *
104  * The interrupt must handle 2 special cases:
105  * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
106  * - the cpu performs speculative tlb reads, i.e. even if the cpu only
107  *   runs in kernel space, the cpu could load tlb entries for user space
108  *   pages.
109  *
110  * The good news is that cpu mmu_state is local to each cpu, no
111  * write/read ordering problems.
112  */
113
114 /*
115  * TLB flush IPI:
116  *
117  * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
118  * 2) Leave the mm if we are in the lazy tlb mode.
119  *
120  * Interrupts are disabled.
121  */
122
123 /*
124  * FIXME: use of asmlinkage is not consistent.  On x86_64 it's noop
125  * but still used for documentation purpose but the usage is slightly
126  * inconsistent.  On x86_32, asmlinkage is regparm(0) but interrupt
127  * entry calls in with the first parameter in %eax.  Maybe define
128  * intrlinkage?
129  */
130 #ifdef CONFIG_X86_64
131 asmlinkage
132 #endif
133 void smp_invalidate_interrupt(struct pt_regs *regs)
134 {
135         unsigned int cpu;
136         unsigned int sender;
137         union smp_flush_state *f;
138
139         cpu = smp_processor_id();
140         /*
141          * orig_rax contains the negated interrupt vector.
142          * Use that to determine where the sender put the data.
143          */
144         sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
145         f = &flush_state[sender];
146
147         if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
148                 goto out;
149                 /*
150                  * This was a BUG() but until someone can quote me the
151                  * line from the intel manual that guarantees an IPI to
152                  * multiple CPUs is retried _only_ on the erroring CPUs
153                  * its staying as a return
154                  *
155                  * BUG();
156                  */
157
158         if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) {
159                 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
160                         if (f->flush_end == TLB_FLUSH_ALL
161                                         || !cpu_has_invlpg)
162                                 local_flush_tlb();
163                         else if (!f->flush_end)
164                                 __flush_tlb_single(f->flush_start);
165                         else {
166                                 unsigned long addr;
167                                 addr = f->flush_start;
168                                 while (addr < f->flush_end) {
169                                         __flush_tlb_single(addr);
170                                         addr += PAGE_SIZE;
171                                 }
172                         }
173                 } else
174                         leave_mm(cpu);
175         }
176 out:
177         ack_APIC_irq();
178         smp_mb__before_clear_bit();
179         cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
180         smp_mb__after_clear_bit();
181         inc_irq_stat(irq_tlb_count);
182 }
183
184 static void flush_tlb_others_ipi(const struct cpumask *cpumask,
185                                  struct mm_struct *mm, unsigned long start,
186                                  unsigned long end)
187 {
188         unsigned int sender;
189         union smp_flush_state *f;
190
191         /* Caller has disabled preemption */
192         sender = this_cpu_read(tlb_vector_offset);
193         f = &flush_state[sender];
194
195         if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
196                 raw_spin_lock(&f->tlbstate_lock);
197
198         f->flush_mm = mm;
199         f->flush_start = start;
200         f->flush_end = end;
201         if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
202                 /*
203                  * We have to send the IPI only to
204                  * CPUs affected.
205                  */
206                 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
207                               INVALIDATE_TLB_VECTOR_START + sender);
208
209                 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
210                         cpu_relax();
211         }
212
213         f->flush_mm = NULL;
214         f->flush_start = 0;
215         f->flush_end = 0;
216         if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
217                 raw_spin_unlock(&f->tlbstate_lock);
218 }
219
220 void native_flush_tlb_others(const struct cpumask *cpumask,
221                                  struct mm_struct *mm, unsigned long start,
222                                  unsigned long end)
223 {
224         if (is_uv_system()) {
225                 unsigned int cpu;
226
227                 cpu = smp_processor_id();
228                 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
229                 if (cpumask)
230                         flush_tlb_others_ipi(cpumask, mm, start, end);
231                 return;
232         }
233         flush_tlb_others_ipi(cpumask, mm, start, end);
234 }
235
236 static void __cpuinit calculate_tlb_offset(void)
237 {
238         int cpu, node, nr_node_vecs, idx = 0;
239         /*
240          * we are changing tlb_vector_offset for each CPU in runtime, but this
241          * will not cause inconsistency, as the write is atomic under X86. we
242          * might see more lock contentions in a short time, but after all CPU's
243          * tlb_vector_offset are changed, everything should go normal
244          *
245          * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
246          * waste some vectors.
247          **/
248         if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
249                 nr_node_vecs = 1;
250         else
251                 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
252
253         for_each_online_node(node) {
254                 int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
255                         nr_node_vecs;
256                 int cpu_offset = 0;
257                 for_each_cpu(cpu, cpumask_of_node(node)) {
258                         per_cpu(tlb_vector_offset, cpu) = node_offset +
259                                 cpu_offset;
260                         cpu_offset++;
261                         cpu_offset = cpu_offset % nr_node_vecs;
262                 }
263                 idx++;
264         }
265 }
266
267 static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
268                 unsigned long action, void *hcpu)
269 {
270         switch (action & 0xf) {
271         case CPU_ONLINE:
272         case CPU_DEAD:
273                 calculate_tlb_offset();
274         }
275         return NOTIFY_OK;
276 }
277
278 static int __cpuinit init_smp_flush(void)
279 {
280         int i;
281
282         for (i = 0; i < ARRAY_SIZE(flush_state); i++)
283                 raw_spin_lock_init(&flush_state[i].tlbstate_lock);
284
285         calculate_tlb_offset();
286         hotcpu_notifier(tlb_cpuhp_notify, 0);
287         return 0;
288 }
289 core_initcall(init_smp_flush);
290
291 void flush_tlb_current_task(void)
292 {
293         struct mm_struct *mm = current->mm;
294
295         preempt_disable();
296
297         local_flush_tlb();
298         if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
299                 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
300         preempt_enable();
301 }
302
303 void flush_tlb_mm(struct mm_struct *mm)
304 {
305         preempt_disable();
306
307         if (current->active_mm == mm) {
308                 if (current->mm)
309                         local_flush_tlb();
310                 else
311                         leave_mm(smp_processor_id());
312         }
313         if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
314                 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
315
316         preempt_enable();
317 }
318
319 #define FLUSHALL_BAR    16
320
321 void flush_tlb_range(struct vm_area_struct *vma,
322                                    unsigned long start, unsigned long end)
323 {
324         struct mm_struct *mm;
325
326         if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
327                 flush_tlb_mm(vma->vm_mm);
328                 return;
329         }
330
331         preempt_disable();
332         mm = vma->vm_mm;
333         if (current->active_mm == mm) {
334                 if (current->mm) {
335                         unsigned long addr, vmflag = vma->vm_flags;
336                         unsigned act_entries, tlb_entries = 0;
337
338                         if (vmflag & VM_EXEC)
339                                 tlb_entries = tlb_lli_4k[ENTRIES];
340                         else
341                                 tlb_entries = tlb_lld_4k[ENTRIES];
342
343                         act_entries = tlb_entries > mm->total_vm ?
344                                         mm->total_vm : tlb_entries;
345
346                         if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
347                                 local_flush_tlb();
348                         else {
349                                 for (addr = start; addr < end;
350                                                 addr += PAGE_SIZE)
351                                         __flush_tlb_single(addr);
352
353                                 if (cpumask_any_but(mm_cpumask(mm),
354                                         smp_processor_id()) < nr_cpu_ids)
355                                         flush_tlb_others(mm_cpumask(mm), mm,
356                                                                 start, end);
357                                 preempt_enable();
358                                 return;
359                         }
360                 } else {
361                         leave_mm(smp_processor_id());
362                 }
363         }
364         if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
365                 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
366         preempt_enable();
367 }
368
369
370 void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
371 {
372         struct mm_struct *mm = vma->vm_mm;
373
374         preempt_disable();
375
376         if (current->active_mm == mm) {
377                 if (current->mm)
378                         __flush_tlb_one(start);
379                 else
380                         leave_mm(smp_processor_id());
381         }
382
383         if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
384                 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
385
386         preempt_enable();
387 }
388
389 static void do_flush_tlb_all(void *info)
390 {
391         __flush_tlb_all();
392         if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
393                 leave_mm(smp_processor_id());
394 }
395
396 void flush_tlb_all(void)
397 {
398         on_each_cpu(do_flush_tlb_all, NULL, 1);
399 }