2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/module.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
36 #include <asm/atomic.h>
38 #include <asm/processor.h>
39 #include <asm/r4k-timer.h>
40 #include <asm/system.h>
41 #include <asm/mmu_context.h>
44 #ifdef CONFIG_MIPS_MT_SMTC
45 #include <asm/mipsmtregs.h>
46 #endif /* CONFIG_MIPS_MT_SMTC */
48 static volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
49 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
50 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
52 extern void cpu_idle(void);
54 /* Number of TCs (or siblings in Intel speak) per CPU core */
55 int smp_num_siblings = 1;
56 EXPORT_SYMBOL(smp_num_siblings);
58 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
59 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
60 EXPORT_SYMBOL(cpu_sibling_map);
62 /* representing cpus for which sibling maps can be computed */
63 static cpumask_t cpu_sibling_setup_map;
65 static inline void set_cpu_sibling_map(int cpu)
69 cpu_set(cpu, cpu_sibling_setup_map);
71 if (smp_num_siblings > 1) {
72 for_each_cpu_mask(i, cpu_sibling_setup_map) {
73 if (cpu_data[cpu].core == cpu_data[i].core) {
74 cpu_set(i, cpu_sibling_map[cpu]);
75 cpu_set(cpu, cpu_sibling_map[i]);
79 cpu_set(cpu, cpu_sibling_map[cpu]);
82 struct plat_smp_ops *mp_ops;
84 __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
87 printk(KERN_WARNING "Overriding previously set SMP ops\n");
93 * First C code run on the secondary CPUs after being started up by
96 asmlinkage __cpuinit void start_secondary(void)
100 #ifdef CONFIG_MIPS_MT_SMTC
101 /* Only do cpu_probe for first TC of CPU */
102 if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
103 #endif /* CONFIG_MIPS_MT_SMTC */
107 mips_clockevent_init();
108 mp_ops->init_secondary();
111 * XXX parity protection should be folded in here when it's converted
112 * to an option instead of something based on .cputype
117 cpu = smp_processor_id();
118 cpu_data[cpu].udelay_val = loops_per_jiffy;
120 notify_cpu_starting(cpu);
122 mp_ops->smp_finish();
123 set_cpu_sibling_map(cpu);
125 cpu_set(cpu, cpu_callin_map);
127 synchronise_count_slave();
132 void arch_send_call_function_ipi(cpumask_t mask)
134 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
138 * We reuse the same vector for the single IPI
140 void arch_send_call_function_single_ipi(int cpu)
142 mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
146 * Call into both interrupt handlers, as we share the IPI for them
148 void smp_call_function_interrupt(void)
151 generic_smp_call_function_single_interrupt();
152 generic_smp_call_function_interrupt();
156 static void stop_this_cpu(void *dummy)
161 cpu_clear(smp_processor_id(), cpu_online_map);
164 (*cpu_wait)(); /* Wait if available. */
168 void smp_send_stop(void)
170 smp_call_function(stop_this_cpu, NULL, 0);
173 void __init smp_cpus_done(unsigned int max_cpus)
176 synchronise_count_master();
179 /* called from main before smp_init() */
180 void __init smp_prepare_cpus(unsigned int max_cpus)
182 init_new_context(current, &init_mm);
183 current_thread_info()->cpu = 0;
184 mp_ops->prepare_cpus(max_cpus);
185 set_cpu_sibling_map(0);
186 #ifndef CONFIG_HOTPLUG_CPU
187 cpu_present_map = cpu_possible_map;
191 /* preload SMP state for boot cpu */
192 void __devinit smp_prepare_boot_cpu(void)
194 cpu_set(0, cpu_possible_map);
195 cpu_set(0, cpu_online_map);
196 cpu_set(0, cpu_callin_map);
200 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
201 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
202 * physical, not logical.
204 int __cpuinit __cpu_up(unsigned int cpu)
206 struct task_struct *idle;
209 * Processor goes to start_secondary(), sets online flag
210 * The following code is purely to make sure
211 * Linux can schedule processes on this slave.
213 idle = fork_idle(cpu);
215 panic(KERN_ERR "Fork failed for CPU %d", cpu);
217 mp_ops->boot_secondary(cpu, idle);
220 * Trust is futile. We should really have timeouts ...
222 while (!cpu_isset(cpu, cpu_callin_map))
225 cpu_set(cpu, cpu_online_map);
230 /* Not really SMP stuff ... */
231 int setup_profiling_timer(unsigned int multiplier)
236 static void flush_tlb_all_ipi(void *info)
238 local_flush_tlb_all();
241 void flush_tlb_all(void)
243 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
246 static void flush_tlb_mm_ipi(void *mm)
248 local_flush_tlb_mm((struct mm_struct *)mm);
252 * Special Variant of smp_call_function for use by TLB functions:
255 * o collapses to normal function call on UP kernels
256 * o collapses to normal function call on systems with a single shared
258 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
260 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
262 #ifndef CONFIG_MIPS_MT_SMTC
263 smp_call_function(func, info, 1);
267 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
271 smp_on_other_tlbs(func, info);
278 * The following tlb flush calls are invoked when old translations are
279 * being torn down, or pte attributes are changing. For single threaded
280 * address spaces, a new context is obtained on the current cpu, and tlb
281 * context on other cpus are invalidated to force a new context allocation
282 * at switch_mm time, should the mm ever be used on other cpus. For
283 * multithreaded address spaces, intercpu interrupts have to be sent.
284 * Another case where intercpu interrupts are required is when the target
285 * mm might be active on another cpu (eg debuggers doing the flushes on
286 * behalf of debugees, kswapd stealing pages from another process etc).
290 void flush_tlb_mm(struct mm_struct *mm)
294 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
295 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
297 cpumask_t mask = cpu_online_map;
300 cpu_clear(smp_processor_id(), mask);
301 for_each_cpu_mask(cpu, mask)
302 if (cpu_context(cpu, mm))
303 cpu_context(cpu, mm) = 0;
305 local_flush_tlb_mm(mm);
310 struct flush_tlb_data {
311 struct vm_area_struct *vma;
316 static void flush_tlb_range_ipi(void *info)
318 struct flush_tlb_data *fd = info;
320 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
323 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
325 struct mm_struct *mm = vma->vm_mm;
328 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
329 struct flush_tlb_data fd = {
335 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
337 cpumask_t mask = cpu_online_map;
340 cpu_clear(smp_processor_id(), mask);
341 for_each_cpu_mask(cpu, mask)
342 if (cpu_context(cpu, mm))
343 cpu_context(cpu, mm) = 0;
345 local_flush_tlb_range(vma, start, end);
349 static void flush_tlb_kernel_range_ipi(void *info)
351 struct flush_tlb_data *fd = info;
353 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
356 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
358 struct flush_tlb_data fd = {
363 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
366 static void flush_tlb_page_ipi(void *info)
368 struct flush_tlb_data *fd = info;
370 local_flush_tlb_page(fd->vma, fd->addr1);
373 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
376 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
377 struct flush_tlb_data fd = {
382 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
384 cpumask_t mask = cpu_online_map;
387 cpu_clear(smp_processor_id(), mask);
388 for_each_cpu_mask(cpu, mask)
389 if (cpu_context(cpu, vma->vm_mm))
390 cpu_context(cpu, vma->vm_mm) = 0;
392 local_flush_tlb_page(vma, page);
396 static void flush_tlb_one_ipi(void *info)
398 unsigned long vaddr = (unsigned long) info;
400 local_flush_tlb_one(vaddr);
403 void flush_tlb_one(unsigned long vaddr)
405 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
408 EXPORT_SYMBOL(flush_tlb_page);
409 EXPORT_SYMBOL(flush_tlb_one);