1 #include <linux/init.h>
4 #include <linux/spinlock.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/cache.h>
14 #include <asm/uv/uv.h>
16 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
20 * Smarter SMP flushing macros.
23 * These mean you can really definitely utterly forget about
24 * writing to user space from interrupts. (Its not allowed anyway).
26 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 * More scalable flush, from Andi Kleen
30 * To avoid global state use 8 different call vectors.
31 * Each CPU uses a specific vector to trigger flushes on other
32 * CPUs. Depending on the received vector the target CPUs look into
33 * the right array slot for the flush data.
35 * With more than 8 CPUs they are hashed to the 8 available
36 * vectors. The limited global vector space forces us to this right now.
37 * In future when interrupts are split into per CPU domains this could be
38 * fixed, at the cost of triggering multiple IPIs in some cases.
41 union smp_flush_state {
43 struct mm_struct *flush_mm;
44 unsigned long flush_start;
45 unsigned long flush_end;
46 raw_spinlock_t tlbstate_lock;
47 DECLARE_BITMAP(flush_cpumask, NR_CPUS);
49 char pad[INTERNODE_CACHE_BYTES];
50 } ____cacheline_internodealigned_in_smp;
52 /* State is put into the per CPU data section, but padded
53 to a full cache line because other CPUs can access it and we don't
54 want false sharing in the per cpu data segment. */
55 static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
57 static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset);
60 * We cannot call mmdrop() because we are in interrupt context,
61 * instead update mm->cpu_vm_mask.
63 void leave_mm(int cpu)
65 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
66 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
68 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
69 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
70 load_cr3(swapper_pg_dir);
73 EXPORT_SYMBOL_GPL(leave_mm);
77 * The flush IPI assumes that a thread switch happens in this order:
78 * [cpu0: the cpu that switches]
79 * 1) switch_mm() either 1a) or 1b)
80 * 1a) thread switch to a different mm
81 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
82 * Stop ipi delivery for the old mm. This is not synchronized with
83 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
84 * for the wrong mm, and in the worst case we perform a superfluous
86 * 1a2) set cpu mmu_state to TLBSTATE_OK
87 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
88 * was in lazy tlb mode.
89 * 1a3) update cpu active_mm
90 * Now cpu0 accepts tlb flushes for the new mm.
91 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
92 * Now the other cpus will send tlb flush ipis.
94 * 1b) thread switch without mm change
95 * cpu active_mm is correct, cpu0 already handles
97 * 1b1) set cpu mmu_state to TLBSTATE_OK
98 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
99 * Atomically set the bit [other cpus will start sending flush ipis],
101 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
102 * 2) switch %%esp, ie current
104 * The interrupt must handle 2 special cases:
105 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
106 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
107 * runs in kernel space, the cpu could load tlb entries for user space
110 * The good news is that cpu mmu_state is local to each cpu, no
111 * write/read ordering problems.
117 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
118 * 2) Leave the mm if we are in the lazy tlb mode.
120 * Interrupts are disabled.
124 * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop
125 * but still used for documentation purpose but the usage is slightly
126 * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt
127 * entry calls in with the first parameter in %eax. Maybe define
133 void smp_invalidate_interrupt(struct pt_regs *regs)
137 union smp_flush_state *f;
139 cpu = smp_processor_id();
141 * orig_rax contains the negated interrupt vector.
142 * Use that to determine where the sender put the data.
144 sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
145 f = &flush_state[sender];
147 if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
150 * This was a BUG() but until someone can quote me the
151 * line from the intel manual that guarantees an IPI to
152 * multiple CPUs is retried _only_ on the erroring CPUs
153 * its staying as a return
158 if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) {
159 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
160 if (f->flush_end == TLB_FLUSH_ALL
163 else if (!f->flush_end)
164 __flush_tlb_single(f->flush_start);
167 addr = f->flush_start;
168 while (addr < f->flush_end) {
169 __flush_tlb_single(addr);
178 smp_mb__before_clear_bit();
179 cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
180 smp_mb__after_clear_bit();
181 inc_irq_stat(irq_tlb_count);
184 static void flush_tlb_others_ipi(const struct cpumask *cpumask,
185 struct mm_struct *mm, unsigned long start,
189 union smp_flush_state *f;
191 /* Caller has disabled preemption */
192 sender = this_cpu_read(tlb_vector_offset);
193 f = &flush_state[sender];
195 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
196 raw_spin_lock(&f->tlbstate_lock);
199 f->flush_start = start;
201 if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) {
203 * We have to send the IPI only to
206 apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
207 INVALIDATE_TLB_VECTOR_START + sender);
209 while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
216 if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS)
217 raw_spin_unlock(&f->tlbstate_lock);
220 void native_flush_tlb_others(const struct cpumask *cpumask,
221 struct mm_struct *mm, unsigned long start,
224 if (is_uv_system()) {
227 cpu = smp_processor_id();
228 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
230 flush_tlb_others_ipi(cpumask, mm, start, end);
233 flush_tlb_others_ipi(cpumask, mm, start, end);
236 static void __cpuinit calculate_tlb_offset(void)
238 int cpu, node, nr_node_vecs, idx = 0;
240 * we are changing tlb_vector_offset for each CPU in runtime, but this
241 * will not cause inconsistency, as the write is atomic under X86. we
242 * might see more lock contentions in a short time, but after all CPU's
243 * tlb_vector_offset are changed, everything should go normal
245 * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
246 * waste some vectors.
248 if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS)
251 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
253 for_each_online_node(node) {
254 int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
257 for_each_cpu(cpu, cpumask_of_node(node)) {
258 per_cpu(tlb_vector_offset, cpu) = node_offset +
261 cpu_offset = cpu_offset % nr_node_vecs;
267 static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
268 unsigned long action, void *hcpu)
270 switch (action & 0xf) {
273 calculate_tlb_offset();
278 static int __cpuinit init_smp_flush(void)
282 for (i = 0; i < ARRAY_SIZE(flush_state); i++)
283 raw_spin_lock_init(&flush_state[i].tlbstate_lock);
285 calculate_tlb_offset();
286 hotcpu_notifier(tlb_cpuhp_notify, 0);
289 core_initcall(init_smp_flush);
291 void flush_tlb_current_task(void)
293 struct mm_struct *mm = current->mm;
298 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
299 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
303 void flush_tlb_mm(struct mm_struct *mm)
307 if (current->active_mm == mm) {
311 leave_mm(smp_processor_id());
313 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
314 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
319 #define FLUSHALL_BAR 16
321 void flush_tlb_range(struct vm_area_struct *vma,
322 unsigned long start, unsigned long end)
324 struct mm_struct *mm;
326 if (!cpu_has_invlpg || vma->vm_flags & VM_HUGETLB) {
327 flush_tlb_mm(vma->vm_mm);
333 if (current->active_mm == mm) {
335 unsigned long addr, vmflag = vma->vm_flags;
336 unsigned act_entries, tlb_entries = 0;
338 if (vmflag & VM_EXEC)
339 tlb_entries = tlb_lli_4k[ENTRIES];
341 tlb_entries = tlb_lld_4k[ENTRIES];
343 act_entries = tlb_entries > mm->total_vm ?
344 mm->total_vm : tlb_entries;
346 if ((end - start)/PAGE_SIZE > act_entries/FLUSHALL_BAR)
349 for (addr = start; addr < end;
351 __flush_tlb_single(addr);
353 if (cpumask_any_but(mm_cpumask(mm),
354 smp_processor_id()) < nr_cpu_ids)
355 flush_tlb_others(mm_cpumask(mm), mm,
361 leave_mm(smp_processor_id());
364 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
365 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
370 void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
372 struct mm_struct *mm = vma->vm_mm;
376 if (current->active_mm == mm) {
378 __flush_tlb_one(start);
380 leave_mm(smp_processor_id());
383 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
384 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
389 static void do_flush_tlb_all(void *info)
392 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
393 leave_mm(smp_processor_id());
396 void flush_tlb_all(void)
398 on_each_cpu(do_flush_tlb_all, NULL, 1);