4 * SMP support for the SuperH processors.
6 * Copyright (C) 2002, 2003 Paul Mundt
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/err.h>
15 #include <linux/cache.h>
16 #include <linux/cpumask.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/spinlock.h>
21 #include <linux/threads.h>
22 #include <linux/module.h>
23 #include <linux/time.h>
24 #include <linux/timex.h>
25 #include <linux/sched.h>
26 #include <linux/module.h>
28 #include <asm/atomic.h>
29 #include <asm/processor.h>
30 #include <asm/system.h>
31 #include <asm/mmu_context.h>
35 * This was written with the Sega Saturn (SMP SH-2 7604) in mind,
36 * but is designed to be usable regardless if there's an MMU
39 struct sh_cpuinfo cpu_data[NR_CPUS];
41 extern void per_cpu_trap_init(void);
43 cpumask_t cpu_possible_map;
44 EXPORT_SYMBOL(cpu_possible_map);
46 cpumask_t cpu_online_map;
47 EXPORT_SYMBOL(cpu_online_map);
48 static atomic_t cpus_booted = ATOMIC_INIT(0);
50 /* These are defined by the board-specific code. */
53 * Cause the function described by call_data to be executed on the passed
54 * cpu. When the function has finished, increment the finished field of
57 void __smp_send_ipi(unsigned int cpu, unsigned int action);
60 * Find the number of available processors
62 unsigned int __smp_probe_cpus(void);
65 * Start a particular processor
67 void __smp_slave_init(unsigned int cpu);
70 * Run specified function on a particular processor.
72 void __smp_call_function(unsigned int cpu);
74 static inline void __init smp_store_cpu_info(unsigned int cpu)
76 cpu_data[cpu].loops_per_jiffy = loops_per_jiffy;
79 void __init smp_prepare_cpus(unsigned int max_cpus)
81 unsigned int cpu = smp_processor_id();
84 atomic_set(&cpus_booted, 1);
85 smp_store_cpu_info(cpu);
87 for (i = 0; i < __smp_probe_cpus(); i++)
88 cpu_set(i, cpu_possible_map);
91 void __devinit smp_prepare_boot_cpu(void)
93 unsigned int cpu = smp_processor_id();
95 cpu_set(cpu, cpu_online_map);
96 cpu_set(cpu, cpu_possible_map);
99 int __cpu_up(unsigned int cpu)
101 struct task_struct *tsk;
103 tsk = fork_idle(cpu);
106 panic("Failed forking idle task for cpu %d\n", cpu);
108 task_thread_info(tsk)->cpu = cpu;
110 cpu_set(cpu, cpu_online_map);
115 int start_secondary(void *unused)
119 cpu = smp_processor_id();
121 atomic_inc(&init_mm.mm_count);
122 current->active_mm = &init_mm;
124 smp_store_cpu_info(cpu);
126 __smp_slave_init(cpu);
130 atomic_inc(&cpus_booted);
136 void __init smp_cpus_done(unsigned int max_cpus)
141 void smp_send_reschedule(int cpu)
143 __smp_send_ipi(cpu, SMP_MSG_RESCHEDULE);
146 static void stop_this_cpu(void *unused)
148 cpu_clear(smp_processor_id(), cpu_online_map);
155 void smp_send_stop(void)
157 smp_call_function(stop_this_cpu, 0, 1, 0);
161 struct smp_fn_call_struct smp_fn_call = {
162 .lock = SPIN_LOCK_UNLOCKED,
163 .finished = ATOMIC_INIT(0),
167 * The caller of this wants the passed function to run on every cpu. If wait
168 * is set, wait until all cpus have finished the function before returning.
169 * The lock is here to protect the call structure.
170 * You must not call this function with disabled interrupts or from a
171 * hardware interrupt handler or from a bottom half handler.
173 int smp_call_function(void (*func)(void *info), void *info, int retry, int wait)
175 unsigned int nr_cpus = atomic_read(&cpus_booted);
181 /* Can deadlock when called with interrupts disabled */
182 WARN_ON(irqs_disabled());
184 spin_lock(&smp_fn_call.lock);
186 atomic_set(&smp_fn_call.finished, 0);
187 smp_fn_call.fn = func;
188 smp_fn_call.data = info;
190 for (i = 0; i < nr_cpus; i++)
191 if (i != smp_processor_id())
192 __smp_call_function(i);
195 while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1));
197 spin_unlock(&smp_fn_call.lock);
202 /* Not really SMP stuff ... */
203 int setup_profiling_timer(unsigned int multiplier)
208 static void flush_tlb_all_ipi(void *info)
210 local_flush_tlb_all();
213 void flush_tlb_all(void)
215 on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
218 static void flush_tlb_mm_ipi(void *mm)
220 local_flush_tlb_mm((struct mm_struct *)mm);
224 * The following tlb flush calls are invoked when old translations are
225 * being torn down, or pte attributes are changing. For single threaded
226 * address spaces, a new context is obtained on the current cpu, and tlb
227 * context on other cpus are invalidated to force a new context allocation
228 * at switch_mm time, should the mm ever be used on other cpus. For
229 * multithreaded address spaces, intercpu interrupts have to be sent.
230 * Another case where intercpu interrupts are required is when the target
231 * mm might be active on another cpu (eg debuggers doing the flushes on
232 * behalf of debugees, kswapd stealing pages from another process etc).
236 void flush_tlb_mm(struct mm_struct *mm)
240 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
241 smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
244 for (i = 0; i < num_online_cpus(); i++)
245 if (smp_processor_id() != i)
246 cpu_context(i, mm) = 0;
248 local_flush_tlb_mm(mm);
253 struct flush_tlb_data {
254 struct vm_area_struct *vma;
259 static void flush_tlb_range_ipi(void *info)
261 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
263 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
266 void flush_tlb_range(struct vm_area_struct *vma,
267 unsigned long start, unsigned long end)
269 struct mm_struct *mm = vma->vm_mm;
272 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
273 struct flush_tlb_data fd;
278 smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
281 for (i = 0; i < num_online_cpus(); i++)
282 if (smp_processor_id() != i)
283 cpu_context(i, mm) = 0;
285 local_flush_tlb_range(vma, start, end);
289 static void flush_tlb_kernel_range_ipi(void *info)
291 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
293 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
296 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
298 struct flush_tlb_data fd;
302 on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1);
305 static void flush_tlb_page_ipi(void *info)
307 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
309 local_flush_tlb_page(fd->vma, fd->addr1);
312 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
315 if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
316 (current->mm != vma->vm_mm)) {
317 struct flush_tlb_data fd;
321 smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
324 for (i = 0; i < num_online_cpus(); i++)
325 if (smp_processor_id() != i)
326 cpu_context(i, vma->vm_mm) = 0;
328 local_flush_tlb_page(vma, page);
332 static void flush_tlb_one_ipi(void *info)
334 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
335 local_flush_tlb_one(fd->addr1, fd->addr2);
338 void flush_tlb_one(unsigned long asid, unsigned long vaddr)
340 struct flush_tlb_data fd;
345 smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1);
346 local_flush_tlb_one(asid, vaddr);