mm: don't include asm/pgtable.h if linux/mm.h is already included
[platform/kernel/linux-starfive.git] / arch / sparc / kernel / smp_64.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* smp.c: Sparc64 SMP support.
3  *
4  * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
5  */
6
7 #include <linux/export.h>
8 #include <linux/kernel.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/hotplug.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/threads.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/cache.h>
23 #include <linux/jiffies.h>
24 #include <linux/profile.h>
25 #include <linux/memblock.h>
26 #include <linux/vmalloc.h>
27 #include <linux/ftrace.h>
28 #include <linux/cpu.h>
29 #include <linux/slab.h>
30 #include <linux/kgdb.h>
31
32 #include <asm/head.h>
33 #include <asm/ptrace.h>
34 #include <linux/atomic.h>
35 #include <asm/tlbflush.h>
36 #include <asm/mmu_context.h>
37 #include <asm/cpudata.h>
38 #include <asm/hvtramp.h>
39 #include <asm/io.h>
40 #include <asm/timer.h>
41 #include <asm/setup.h>
42
43 #include <asm/irq.h>
44 #include <asm/irq_regs.h>
45 #include <asm/page.h>
46 #include <asm/oplib.h>
47 #include <linux/uaccess.h>
48 #include <asm/starfire.h>
49 #include <asm/tlb.h>
50 #include <asm/sections.h>
51 #include <asm/prom.h>
52 #include <asm/mdesc.h>
53 #include <asm/ldc.h>
54 #include <asm/hypervisor.h>
55 #include <asm/pcr.h>
56
57 #include "cpumap.h"
58 #include "kernel.h"
59
60 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
61 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
62         { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
63
64 cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
65         [0 ... NR_CPUS-1] = CPU_MASK_NONE };
66
67 cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
68         [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
69
70 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
71 EXPORT_SYMBOL(cpu_core_map);
72 EXPORT_SYMBOL(cpu_core_sib_map);
73 EXPORT_SYMBOL(cpu_core_sib_cache_map);
74
75 static cpumask_t smp_commenced_mask;
76
77 static DEFINE_PER_CPU(bool, poke);
78 static bool cpu_poke;
79
80 void smp_info(struct seq_file *m)
81 {
82         int i;
83         
84         seq_printf(m, "State:\n");
85         for_each_online_cpu(i)
86                 seq_printf(m, "CPU%d:\t\tonline\n", i);
87 }
88
89 void smp_bogo(struct seq_file *m)
90 {
91         int i;
92         
93         for_each_online_cpu(i)
94                 seq_printf(m,
95                            "Cpu%dClkTck\t: %016lx\n",
96                            i, cpu_data(i).clock_tick);
97 }
98
99 extern void setup_sparc64_timer(void);
100
101 static volatile unsigned long callin_flag = 0;
102
103 void smp_callin(void)
104 {
105         int cpuid = hard_smp_processor_id();
106
107         __local_per_cpu_offset = __per_cpu_offset(cpuid);
108
109         if (tlb_type == hypervisor)
110                 sun4v_ktsb_register();
111
112         __flush_tlb_all();
113
114         setup_sparc64_timer();
115
116         if (cheetah_pcache_forced_on)
117                 cheetah_enable_pcache();
118
119         callin_flag = 1;
120         __asm__ __volatile__("membar #Sync\n\t"
121                              "flush  %%g6" : : : "memory");
122
123         /* Clear this or we will die instantly when we
124          * schedule back to this idler...
125          */
126         current_thread_info()->new_child = 0;
127
128         /* Attach to the address space of init_task. */
129         mmgrab(&init_mm);
130         current->active_mm = &init_mm;
131
132         /* inform the notifiers about the new cpu */
133         notify_cpu_starting(cpuid);
134
135         while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
136                 rmb();
137
138         set_cpu_online(cpuid, true);
139
140         /* idle thread is expected to have preempt disabled */
141         preempt_disable();
142
143         local_irq_enable();
144
145         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
146 }
147
148 void cpu_panic(void)
149 {
150         printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
151         panic("SMP bolixed\n");
152 }
153
154 /* This tick register synchronization scheme is taken entirely from
155  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
156  *
157  * The only change I've made is to rework it so that the master
158  * initiates the synchonization instead of the slave. -DaveM
159  */
160
161 #define MASTER  0
162 #define SLAVE   (SMP_CACHE_BYTES/sizeof(unsigned long))
163
164 #define NUM_ROUNDS      64      /* magic value */
165 #define NUM_ITERS       5       /* likewise */
166
167 static DEFINE_RAW_SPINLOCK(itc_sync_lock);
168 static unsigned long go[SLAVE + 1];
169
170 #define DEBUG_TICK_SYNC 0
171
172 static inline long get_delta (long *rt, long *master)
173 {
174         unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
175         unsigned long tcenter, t0, t1, tm;
176         unsigned long i;
177
178         for (i = 0; i < NUM_ITERS; i++) {
179                 t0 = tick_ops->get_tick();
180                 go[MASTER] = 1;
181                 membar_safe("#StoreLoad");
182                 while (!(tm = go[SLAVE]))
183                         rmb();
184                 go[SLAVE] = 0;
185                 wmb();
186                 t1 = tick_ops->get_tick();
187
188                 if (t1 - t0 < best_t1 - best_t0)
189                         best_t0 = t0, best_t1 = t1, best_tm = tm;
190         }
191
192         *rt = best_t1 - best_t0;
193         *master = best_tm - best_t0;
194
195         /* average best_t0 and best_t1 without overflow: */
196         tcenter = (best_t0/2 + best_t1/2);
197         if (best_t0 % 2 + best_t1 % 2 == 2)
198                 tcenter++;
199         return tcenter - best_tm;
200 }
201
202 void smp_synchronize_tick_client(void)
203 {
204         long i, delta, adj, adjust_latency = 0, done = 0;
205         unsigned long flags, rt, master_time_stamp;
206 #if DEBUG_TICK_SYNC
207         struct {
208                 long rt;        /* roundtrip time */
209                 long master;    /* master's timestamp */
210                 long diff;      /* difference between midpoint and master's timestamp */
211                 long lat;       /* estimate of itc adjustment latency */
212         } t[NUM_ROUNDS];
213 #endif
214
215         go[MASTER] = 1;
216
217         while (go[MASTER])
218                 rmb();
219
220         local_irq_save(flags);
221         {
222                 for (i = 0; i < NUM_ROUNDS; i++) {
223                         delta = get_delta(&rt, &master_time_stamp);
224                         if (delta == 0)
225                                 done = 1;       /* let's lock on to this... */
226
227                         if (!done) {
228                                 if (i > 0) {
229                                         adjust_latency += -delta;
230                                         adj = -delta + adjust_latency/4;
231                                 } else
232                                         adj = -delta;
233
234                                 tick_ops->add_tick(adj);
235                         }
236 #if DEBUG_TICK_SYNC
237                         t[i].rt = rt;
238                         t[i].master = master_time_stamp;
239                         t[i].diff = delta;
240                         t[i].lat = adjust_latency/4;
241 #endif
242                 }
243         }
244         local_irq_restore(flags);
245
246 #if DEBUG_TICK_SYNC
247         for (i = 0; i < NUM_ROUNDS; i++)
248                 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
249                        t[i].rt, t[i].master, t[i].diff, t[i].lat);
250 #endif
251
252         printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
253                "(last diff %ld cycles, maxerr %lu cycles)\n",
254                smp_processor_id(), delta, rt);
255 }
256
257 static void smp_start_sync_tick_client(int cpu);
258
259 static void smp_synchronize_one_tick(int cpu)
260 {
261         unsigned long flags, i;
262
263         go[MASTER] = 0;
264
265         smp_start_sync_tick_client(cpu);
266
267         /* wait for client to be ready */
268         while (!go[MASTER])
269                 rmb();
270
271         /* now let the client proceed into his loop */
272         go[MASTER] = 0;
273         membar_safe("#StoreLoad");
274
275         raw_spin_lock_irqsave(&itc_sync_lock, flags);
276         {
277                 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
278                         while (!go[MASTER])
279                                 rmb();
280                         go[MASTER] = 0;
281                         wmb();
282                         go[SLAVE] = tick_ops->get_tick();
283                         membar_safe("#StoreLoad");
284                 }
285         }
286         raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
287 }
288
289 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
290 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
291                                 void **descrp)
292 {
293         extern unsigned long sparc64_ttable_tl0;
294         extern unsigned long kern_locked_tte_data;
295         struct hvtramp_descr *hdesc;
296         unsigned long trampoline_ra;
297         struct trap_per_cpu *tb;
298         u64 tte_vaddr, tte_data;
299         unsigned long hv_err;
300         int i;
301
302         hdesc = kzalloc(sizeof(*hdesc) +
303                         (sizeof(struct hvtramp_mapping) *
304                          num_kernel_image_mappings - 1),
305                         GFP_KERNEL);
306         if (!hdesc) {
307                 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
308                        "hvtramp_descr.\n");
309                 return;
310         }
311         *descrp = hdesc;
312
313         hdesc->cpu = cpu;
314         hdesc->num_mappings = num_kernel_image_mappings;
315
316         tb = &trap_block[cpu];
317
318         hdesc->fault_info_va = (unsigned long) &tb->fault_info;
319         hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
320
321         hdesc->thread_reg = thread_reg;
322
323         tte_vaddr = (unsigned long) KERNBASE;
324         tte_data = kern_locked_tte_data;
325
326         for (i = 0; i < hdesc->num_mappings; i++) {
327                 hdesc->maps[i].vaddr = tte_vaddr;
328                 hdesc->maps[i].tte   = tte_data;
329                 tte_vaddr += 0x400000;
330                 tte_data  += 0x400000;
331         }
332
333         trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
334
335         hv_err = sun4v_cpu_start(cpu, trampoline_ra,
336                                  kimage_addr_to_ra(&sparc64_ttable_tl0),
337                                  __pa(hdesc));
338         if (hv_err)
339                 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
340                        "gives error %lu\n", hv_err);
341 }
342 #endif
343
344 extern unsigned long sparc64_cpu_startup;
345
346 /* The OBP cpu startup callback truncates the 3rd arg cookie to
347  * 32-bits (I think) so to be safe we have it read the pointer
348  * contained here so we work on >4GB machines. -DaveM
349  */
350 static struct thread_info *cpu_new_thread = NULL;
351
352 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
353 {
354         unsigned long entry =
355                 (unsigned long)(&sparc64_cpu_startup);
356         unsigned long cookie =
357                 (unsigned long)(&cpu_new_thread);
358         void *descr = NULL;
359         int timeout, ret;
360
361         callin_flag = 0;
362         cpu_new_thread = task_thread_info(idle);
363
364         if (tlb_type == hypervisor) {
365 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
366                 if (ldom_domaining_enabled)
367                         ldom_startcpu_cpuid(cpu,
368                                             (unsigned long) cpu_new_thread,
369                                             &descr);
370                 else
371 #endif
372                         prom_startcpu_cpuid(cpu, entry, cookie);
373         } else {
374                 struct device_node *dp = of_find_node_by_cpuid(cpu);
375
376                 prom_startcpu(dp->phandle, entry, cookie);
377         }
378
379         for (timeout = 0; timeout < 50000; timeout++) {
380                 if (callin_flag)
381                         break;
382                 udelay(100);
383         }
384
385         if (callin_flag) {
386                 ret = 0;
387         } else {
388                 printk("Processor %d is stuck.\n", cpu);
389                 ret = -ENODEV;
390         }
391         cpu_new_thread = NULL;
392
393         kfree(descr);
394
395         return ret;
396 }
397
398 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
399 {
400         u64 result, target;
401         int stuck, tmp;
402
403         if (this_is_starfire) {
404                 /* map to real upaid */
405                 cpu = (((cpu & 0x3c) << 1) |
406                         ((cpu & 0x40) >> 4) |
407                         (cpu & 0x3));
408         }
409
410         target = (cpu << 14) | 0x70;
411 again:
412         /* Ok, this is the real Spitfire Errata #54.
413          * One must read back from a UDB internal register
414          * after writes to the UDB interrupt dispatch, but
415          * before the membar Sync for that write.
416          * So we use the high UDB control register (ASI 0x7f,
417          * ADDR 0x20) for the dummy read. -DaveM
418          */
419         tmp = 0x40;
420         __asm__ __volatile__(
421         "wrpr   %1, %2, %%pstate\n\t"
422         "stxa   %4, [%0] %3\n\t"
423         "stxa   %5, [%0+%8] %3\n\t"
424         "add    %0, %8, %0\n\t"
425         "stxa   %6, [%0+%8] %3\n\t"
426         "membar #Sync\n\t"
427         "stxa   %%g0, [%7] %3\n\t"
428         "membar #Sync\n\t"
429         "mov    0x20, %%g1\n\t"
430         "ldxa   [%%g1] 0x7f, %%g0\n\t"
431         "membar #Sync"
432         : "=r" (tmp)
433         : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
434           "r" (data0), "r" (data1), "r" (data2), "r" (target),
435           "r" (0x10), "0" (tmp)
436         : "g1");
437
438         /* NOTE: PSTATE_IE is still clear. */
439         stuck = 100000;
440         do {
441                 __asm__ __volatile__("ldxa [%%g0] %1, %0"
442                         : "=r" (result)
443                         : "i" (ASI_INTR_DISPATCH_STAT));
444                 if (result == 0) {
445                         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
446                                              : : "r" (pstate));
447                         return;
448                 }
449                 stuck -= 1;
450                 if (stuck == 0)
451                         break;
452         } while (result & 0x1);
453         __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
454                              : : "r" (pstate));
455         if (stuck == 0) {
456                 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
457                        smp_processor_id(), result);
458         } else {
459                 udelay(2);
460                 goto again;
461         }
462 }
463
464 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
465 {
466         u64 *mondo, data0, data1, data2;
467         u16 *cpu_list;
468         u64 pstate;
469         int i;
470
471         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
472         cpu_list = __va(tb->cpu_list_pa);
473         mondo = __va(tb->cpu_mondo_block_pa);
474         data0 = mondo[0];
475         data1 = mondo[1];
476         data2 = mondo[2];
477         for (i = 0; i < cnt; i++)
478                 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
479 }
480
481 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
482  * packet, but we have no use for that.  However we do take advantage of
483  * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
484  */
485 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
486 {
487         int nack_busy_id, is_jbus, need_more;
488         u64 *mondo, pstate, ver, busy_mask;
489         u16 *cpu_list;
490
491         cpu_list = __va(tb->cpu_list_pa);
492         mondo = __va(tb->cpu_mondo_block_pa);
493
494         /* Unfortunately, someone at Sun had the brilliant idea to make the
495          * busy/nack fields hard-coded by ITID number for this Ultra-III
496          * derivative processor.
497          */
498         __asm__ ("rdpr %%ver, %0" : "=r" (ver));
499         is_jbus = ((ver >> 32) == __JALAPENO_ID ||
500                    (ver >> 32) == __SERRANO_ID);
501
502         __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
503
504 retry:
505         need_more = 0;
506         __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
507                              : : "r" (pstate), "i" (PSTATE_IE));
508
509         /* Setup the dispatch data registers. */
510         __asm__ __volatile__("stxa      %0, [%3] %6\n\t"
511                              "stxa      %1, [%4] %6\n\t"
512                              "stxa      %2, [%5] %6\n\t"
513                              "membar    #Sync\n\t"
514                              : /* no outputs */
515                              : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
516                                "r" (0x40), "r" (0x50), "r" (0x60),
517                                "i" (ASI_INTR_W));
518
519         nack_busy_id = 0;
520         busy_mask = 0;
521         {
522                 int i;
523
524                 for (i = 0; i < cnt; i++) {
525                         u64 target, nr;
526
527                         nr = cpu_list[i];
528                         if (nr == 0xffff)
529                                 continue;
530
531                         target = (nr << 14) | 0x70;
532                         if (is_jbus) {
533                                 busy_mask |= (0x1UL << (nr * 2));
534                         } else {
535                                 target |= (nack_busy_id << 24);
536                                 busy_mask |= (0x1UL <<
537                                               (nack_busy_id * 2));
538                         }
539                         __asm__ __volatile__(
540                                 "stxa   %%g0, [%0] %1\n\t"
541                                 "membar #Sync\n\t"
542                                 : /* no outputs */
543                                 : "r" (target), "i" (ASI_INTR_W));
544                         nack_busy_id++;
545                         if (nack_busy_id == 32) {
546                                 need_more = 1;
547                                 break;
548                         }
549                 }
550         }
551
552         /* Now, poll for completion. */
553         {
554                 u64 dispatch_stat, nack_mask;
555                 long stuck;
556
557                 stuck = 100000 * nack_busy_id;
558                 nack_mask = busy_mask << 1;
559                 do {
560                         __asm__ __volatile__("ldxa      [%%g0] %1, %0"
561                                              : "=r" (dispatch_stat)
562                                              : "i" (ASI_INTR_DISPATCH_STAT));
563                         if (!(dispatch_stat & (busy_mask | nack_mask))) {
564                                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
565                                                      : : "r" (pstate));
566                                 if (unlikely(need_more)) {
567                                         int i, this_cnt = 0;
568                                         for (i = 0; i < cnt; i++) {
569                                                 if (cpu_list[i] == 0xffff)
570                                                         continue;
571                                                 cpu_list[i] = 0xffff;
572                                                 this_cnt++;
573                                                 if (this_cnt == 32)
574                                                         break;
575                                         }
576                                         goto retry;
577                                 }
578                                 return;
579                         }
580                         if (!--stuck)
581                                 break;
582                 } while (dispatch_stat & busy_mask);
583
584                 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
585                                      : : "r" (pstate));
586
587                 if (dispatch_stat & busy_mask) {
588                         /* Busy bits will not clear, continue instead
589                          * of freezing up on this cpu.
590                          */
591                         printk("CPU[%d]: mondo stuckage result[%016llx]\n",
592                                smp_processor_id(), dispatch_stat);
593                 } else {
594                         int i, this_busy_nack = 0;
595
596                         /* Delay some random time with interrupts enabled
597                          * to prevent deadlock.
598                          */
599                         udelay(2 * nack_busy_id);
600
601                         /* Clear out the mask bits for cpus which did not
602                          * NACK us.
603                          */
604                         for (i = 0; i < cnt; i++) {
605                                 u64 check_mask, nr;
606
607                                 nr = cpu_list[i];
608                                 if (nr == 0xffff)
609                                         continue;
610
611                                 if (is_jbus)
612                                         check_mask = (0x2UL << (2*nr));
613                                 else
614                                         check_mask = (0x2UL <<
615                                                       this_busy_nack);
616                                 if ((dispatch_stat & check_mask) == 0)
617                                         cpu_list[i] = 0xffff;
618                                 this_busy_nack += 2;
619                                 if (this_busy_nack == 64)
620                                         break;
621                         }
622
623                         goto retry;
624                 }
625         }
626 }
627
628 #define CPU_MONDO_COUNTER(cpuid)        (cpu_mondo_counter[cpuid])
629 #define MONDO_USEC_WAIT_MIN             2
630 #define MONDO_USEC_WAIT_MAX             100
631 #define MONDO_RETRY_LIMIT               500000
632
633 /* Multi-cpu list version.
634  *
635  * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
636  * Sometimes not all cpus receive the mondo, requiring us to re-send
637  * the mondo until all cpus have received, or cpus are truly stuck
638  * unable to receive mondo, and we timeout.
639  * Occasionally a target cpu strand is borrowed briefly by hypervisor to
640  * perform guest service, such as PCIe error handling. Consider the
641  * service time, 1 second overall wait is reasonable for 1 cpu.
642  * Here two in-between mondo check wait time are defined: 2 usec for
643  * single cpu quick turn around and up to 100usec for large cpu count.
644  * Deliver mondo to large number of cpus could take longer, we adjusts
645  * the retry count as long as target cpus are making forward progress.
646  */
647 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
648 {
649         int this_cpu, tot_cpus, prev_sent, i, rem;
650         int usec_wait, retries, tot_retries;
651         u16 first_cpu = 0xffff;
652         unsigned long xc_rcvd = 0;
653         unsigned long status;
654         int ecpuerror_id = 0;
655         int enocpu_id = 0;
656         u16 *cpu_list;
657         u16 cpu;
658
659         this_cpu = smp_processor_id();
660         cpu_list = __va(tb->cpu_list_pa);
661         usec_wait = cnt * MONDO_USEC_WAIT_MIN;
662         if (usec_wait > MONDO_USEC_WAIT_MAX)
663                 usec_wait = MONDO_USEC_WAIT_MAX;
664         retries = tot_retries = 0;
665         tot_cpus = cnt;
666         prev_sent = 0;
667
668         do {
669                 int n_sent, mondo_delivered, target_cpu_busy;
670
671                 status = sun4v_cpu_mondo_send(cnt,
672                                               tb->cpu_list_pa,
673                                               tb->cpu_mondo_block_pa);
674
675                 /* HV_EOK means all cpus received the xcall, we're done.  */
676                 if (likely(status == HV_EOK))
677                         goto xcall_done;
678
679                 /* If not these non-fatal errors, panic */
680                 if (unlikely((status != HV_EWOULDBLOCK) &&
681                         (status != HV_ECPUERROR) &&
682                         (status != HV_ENOCPU)))
683                         goto fatal_errors;
684
685                 /* First, see if we made any forward progress.
686                  *
687                  * Go through the cpu_list, count the target cpus that have
688                  * received our mondo (n_sent), and those that did not (rem).
689                  * Re-pack cpu_list with the cpus remain to be retried in the
690                  * front - this simplifies tracking the truly stalled cpus.
691                  *
692                  * The hypervisor indicates successful sends by setting
693                  * cpu list entries to the value 0xffff.
694                  *
695                  * EWOULDBLOCK means some target cpus did not receive the
696                  * mondo and retry usually helps.
697                  *
698                  * ECPUERROR means at least one target cpu is in error state,
699                  * it's usually safe to skip the faulty cpu and retry.
700                  *
701                  * ENOCPU means one of the target cpu doesn't belong to the
702                  * domain, perhaps offlined which is unexpected, but not
703                  * fatal and it's okay to skip the offlined cpu.
704                  */
705                 rem = 0;
706                 n_sent = 0;
707                 for (i = 0; i < cnt; i++) {
708                         cpu = cpu_list[i];
709                         if (likely(cpu == 0xffff)) {
710                                 n_sent++;
711                         } else if ((status == HV_ECPUERROR) &&
712                                 (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
713                                 ecpuerror_id = cpu + 1;
714                         } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
715                                 enocpu_id = cpu + 1;
716                         } else {
717                                 cpu_list[rem++] = cpu;
718                         }
719                 }
720
721                 /* No cpu remained, we're done. */
722                 if (rem == 0)
723                         break;
724
725                 /* Otherwise, update the cpu count for retry. */
726                 cnt = rem;
727
728                 /* Record the overall number of mondos received by the
729                  * first of the remaining cpus.
730                  */
731                 if (first_cpu != cpu_list[0]) {
732                         first_cpu = cpu_list[0];
733                         xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
734                 }
735
736                 /* Was any mondo delivered successfully? */
737                 mondo_delivered = (n_sent > prev_sent);
738                 prev_sent = n_sent;
739
740                 /* or, was any target cpu busy processing other mondos? */
741                 target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
742                 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
743
744                 /* Retry count is for no progress. If we're making progress,
745                  * reset the retry count.
746                  */
747                 if (likely(mondo_delivered || target_cpu_busy)) {
748                         tot_retries += retries;
749                         retries = 0;
750                 } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
751                         goto fatal_mondo_timeout;
752                 }
753
754                 /* Delay a little bit to let other cpus catch up on
755                  * their cpu mondo queue work.
756                  */
757                 if (!mondo_delivered)
758                         udelay(usec_wait);
759
760                 retries++;
761         } while (1);
762
763 xcall_done:
764         if (unlikely(ecpuerror_id > 0)) {
765                 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
766                        this_cpu, ecpuerror_id - 1);
767         } else if (unlikely(enocpu_id > 0)) {
768                 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
769                        this_cpu, enocpu_id - 1);
770         }
771         return;
772
773 fatal_errors:
774         /* fatal errors include bad alignment, etc */
775         pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
776                this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
777         panic("Unexpected SUN4V mondo error %lu\n", status);
778
779 fatal_mondo_timeout:
780         /* some cpus being non-responsive to the cpu mondo */
781         pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
782                this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
783         panic("SUN4V mondo timeout panic\n");
784 }
785
786 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
787
788 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
789 {
790         struct trap_per_cpu *tb;
791         int this_cpu, i, cnt;
792         unsigned long flags;
793         u16 *cpu_list;
794         u64 *mondo;
795
796         /* We have to do this whole thing with interrupts fully disabled.
797          * Otherwise if we send an xcall from interrupt context it will
798          * corrupt both our mondo block and cpu list state.
799          *
800          * One consequence of this is that we cannot use timeout mechanisms
801          * that depend upon interrupts being delivered locally.  So, for
802          * example, we cannot sample jiffies and expect it to advance.
803          *
804          * Fortunately, udelay() uses %stick/%tick so we can use that.
805          */
806         local_irq_save(flags);
807
808         this_cpu = smp_processor_id();
809         tb = &trap_block[this_cpu];
810
811         mondo = __va(tb->cpu_mondo_block_pa);
812         mondo[0] = data0;
813         mondo[1] = data1;
814         mondo[2] = data2;
815         wmb();
816
817         cpu_list = __va(tb->cpu_list_pa);
818
819         /* Setup the initial cpu list.  */
820         cnt = 0;
821         for_each_cpu(i, mask) {
822                 if (i == this_cpu || !cpu_online(i))
823                         continue;
824                 cpu_list[cnt++] = i;
825         }
826
827         if (cnt)
828                 xcall_deliver_impl(tb, cnt);
829
830         local_irq_restore(flags);
831 }
832
833 /* Send cross call to all processors mentioned in MASK_P
834  * except self.  Really, there are only two cases currently,
835  * "cpu_online_mask" and "mm_cpumask(mm)".
836  */
837 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
838 {
839         u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
840
841         xcall_deliver(data0, data1, data2, mask);
842 }
843
844 /* Send cross call to all processors except self. */
845 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
846 {
847         smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
848 }
849
850 extern unsigned long xcall_sync_tick;
851
852 static void smp_start_sync_tick_client(int cpu)
853 {
854         xcall_deliver((u64) &xcall_sync_tick, 0, 0,
855                       cpumask_of(cpu));
856 }
857
858 extern unsigned long xcall_call_function;
859
860 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
861 {
862         xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
863 }
864
865 extern unsigned long xcall_call_function_single;
866
867 void arch_send_call_function_single_ipi(int cpu)
868 {
869         xcall_deliver((u64) &xcall_call_function_single, 0, 0,
870                       cpumask_of(cpu));
871 }
872
873 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
874 {
875         clear_softint(1 << irq);
876         irq_enter();
877         generic_smp_call_function_interrupt();
878         irq_exit();
879 }
880
881 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
882 {
883         clear_softint(1 << irq);
884         irq_enter();
885         generic_smp_call_function_single_interrupt();
886         irq_exit();
887 }
888
889 static void tsb_sync(void *info)
890 {
891         struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
892         struct mm_struct *mm = info;
893
894         /* It is not valid to test "current->active_mm == mm" here.
895          *
896          * The value of "current" is not changed atomically with
897          * switch_mm().  But that's OK, we just need to check the
898          * current cpu's trap block PGD physical address.
899          */
900         if (tp->pgd_paddr == __pa(mm->pgd))
901                 tsb_context_switch(mm);
902 }
903
904 void smp_tsb_sync(struct mm_struct *mm)
905 {
906         smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
907 }
908
909 extern unsigned long xcall_flush_tlb_mm;
910 extern unsigned long xcall_flush_tlb_page;
911 extern unsigned long xcall_flush_tlb_kernel_range;
912 extern unsigned long xcall_fetch_glob_regs;
913 extern unsigned long xcall_fetch_glob_pmu;
914 extern unsigned long xcall_fetch_glob_pmu_n4;
915 extern unsigned long xcall_receive_signal;
916 extern unsigned long xcall_new_mmu_context_version;
917 #ifdef CONFIG_KGDB
918 extern unsigned long xcall_kgdb_capture;
919 #endif
920
921 #ifdef DCACHE_ALIASING_POSSIBLE
922 extern unsigned long xcall_flush_dcache_page_cheetah;
923 #endif
924 extern unsigned long xcall_flush_dcache_page_spitfire;
925
926 static inline void __local_flush_dcache_page(struct page *page)
927 {
928 #ifdef DCACHE_ALIASING_POSSIBLE
929         __flush_dcache_page(page_address(page),
930                             ((tlb_type == spitfire) &&
931                              page_mapping_file(page) != NULL));
932 #else
933         if (page_mapping_file(page) != NULL &&
934             tlb_type == spitfire)
935                 __flush_icache_page(__pa(page_address(page)));
936 #endif
937 }
938
939 void smp_flush_dcache_page_impl(struct page *page, int cpu)
940 {
941         int this_cpu;
942
943         if (tlb_type == hypervisor)
944                 return;
945
946 #ifdef CONFIG_DEBUG_DCFLUSH
947         atomic_inc(&dcpage_flushes);
948 #endif
949
950         this_cpu = get_cpu();
951
952         if (cpu == this_cpu) {
953                 __local_flush_dcache_page(page);
954         } else if (cpu_online(cpu)) {
955                 void *pg_addr = page_address(page);
956                 u64 data0 = 0;
957
958                 if (tlb_type == spitfire) {
959                         data0 = ((u64)&xcall_flush_dcache_page_spitfire);
960                         if (page_mapping_file(page) != NULL)
961                                 data0 |= ((u64)1 << 32);
962                 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
963 #ifdef DCACHE_ALIASING_POSSIBLE
964                         data0 = ((u64)&xcall_flush_dcache_page_cheetah);
965 #endif
966                 }
967                 if (data0) {
968                         xcall_deliver(data0, __pa(pg_addr),
969                                       (u64) pg_addr, cpumask_of(cpu));
970 #ifdef CONFIG_DEBUG_DCFLUSH
971                         atomic_inc(&dcpage_flushes_xcall);
972 #endif
973                 }
974         }
975
976         put_cpu();
977 }
978
979 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
980 {
981         void *pg_addr;
982         u64 data0;
983
984         if (tlb_type == hypervisor)
985                 return;
986
987         preempt_disable();
988
989 #ifdef CONFIG_DEBUG_DCFLUSH
990         atomic_inc(&dcpage_flushes);
991 #endif
992         data0 = 0;
993         pg_addr = page_address(page);
994         if (tlb_type == spitfire) {
995                 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
996                 if (page_mapping_file(page) != NULL)
997                         data0 |= ((u64)1 << 32);
998         } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
999 #ifdef DCACHE_ALIASING_POSSIBLE
1000                 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
1001 #endif
1002         }
1003         if (data0) {
1004                 xcall_deliver(data0, __pa(pg_addr),
1005                               (u64) pg_addr, cpu_online_mask);
1006 #ifdef CONFIG_DEBUG_DCFLUSH
1007                 atomic_inc(&dcpage_flushes_xcall);
1008 #endif
1009         }
1010         __local_flush_dcache_page(page);
1011
1012         preempt_enable();
1013 }
1014
1015 #ifdef CONFIG_KGDB
1016 void kgdb_roundup_cpus(void)
1017 {
1018         smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1019 }
1020 #endif
1021
1022 void smp_fetch_global_regs(void)
1023 {
1024         smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1025 }
1026
1027 void smp_fetch_global_pmu(void)
1028 {
1029         if (tlb_type == hypervisor &&
1030             sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1031                 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1032         else
1033                 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1034 }
1035
1036 /* We know that the window frames of the user have been flushed
1037  * to the stack before we get here because all callers of us
1038  * are flush_tlb_*() routines, and these run after flush_cache_*()
1039  * which performs the flushw.
1040  *
1041  * The SMP TLB coherency scheme we use works as follows:
1042  *
1043  * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1044  *    space has (potentially) executed on, this is the heuristic
1045  *    we use to avoid doing cross calls.
1046  *
1047  *    Also, for flushing from kswapd and also for clones, we
1048  *    use cpu_vm_mask as the list of cpus to make run the TLB.
1049  *
1050  * 2) TLB context numbers are shared globally across all processors
1051  *    in the system, this allows us to play several games to avoid
1052  *    cross calls.
1053  *
1054  *    One invariant is that when a cpu switches to a process, and
1055  *    that processes tsk->active_mm->cpu_vm_mask does not have the
1056  *    current cpu's bit set, that tlb context is flushed locally.
1057  *
1058  *    If the address space is non-shared (ie. mm->count == 1) we avoid
1059  *    cross calls when we want to flush the currently running process's
1060  *    tlb state.  This is done by clearing all cpu bits except the current
1061  *    processor's in current->mm->cpu_vm_mask and performing the
1062  *    flush locally only.  This will force any subsequent cpus which run
1063  *    this task to flush the context from the local tlb if the process
1064  *    migrates to another cpu (again).
1065  *
1066  * 3) For shared address spaces (threads) and swapping we bite the
1067  *    bullet for most cases and perform the cross call (but only to
1068  *    the cpus listed in cpu_vm_mask).
1069  *
1070  *    The performance gain from "optimizing" away the cross call for threads is
1071  *    questionable (in theory the big win for threads is the massive sharing of
1072  *    address space state across processors).
1073  */
1074
1075 /* This currently is only used by the hugetlb arch pre-fault
1076  * hook on UltraSPARC-III+ and later when changing the pagesize
1077  * bits of the context register for an address space.
1078  */
1079 void smp_flush_tlb_mm(struct mm_struct *mm)
1080 {
1081         u32 ctx = CTX_HWBITS(mm->context);
1082         int cpu = get_cpu();
1083
1084         if (atomic_read(&mm->mm_users) == 1) {
1085                 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1086                 goto local_flush_and_out;
1087         }
1088
1089         smp_cross_call_masked(&xcall_flush_tlb_mm,
1090                               ctx, 0, 0,
1091                               mm_cpumask(mm));
1092
1093 local_flush_and_out:
1094         __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1095
1096         put_cpu();
1097 }
1098
1099 struct tlb_pending_info {
1100         unsigned long ctx;
1101         unsigned long nr;
1102         unsigned long *vaddrs;
1103 };
1104
1105 static void tlb_pending_func(void *info)
1106 {
1107         struct tlb_pending_info *t = info;
1108
1109         __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1110 }
1111
1112 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1113 {
1114         u32 ctx = CTX_HWBITS(mm->context);
1115         struct tlb_pending_info info;
1116         int cpu = get_cpu();
1117
1118         info.ctx = ctx;
1119         info.nr = nr;
1120         info.vaddrs = vaddrs;
1121
1122         if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1123                 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1124         else
1125                 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1126                                        &info, 1);
1127
1128         __flush_tlb_pending(ctx, nr, vaddrs);
1129
1130         put_cpu();
1131 }
1132
1133 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1134 {
1135         unsigned long context = CTX_HWBITS(mm->context);
1136         int cpu = get_cpu();
1137
1138         if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1139                 cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1140         else
1141                 smp_cross_call_masked(&xcall_flush_tlb_page,
1142                                       context, vaddr, 0,
1143                                       mm_cpumask(mm));
1144         __flush_tlb_page(context, vaddr);
1145
1146         put_cpu();
1147 }
1148
1149 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1150 {
1151         start &= PAGE_MASK;
1152         end    = PAGE_ALIGN(end);
1153         if (start != end) {
1154                 smp_cross_call(&xcall_flush_tlb_kernel_range,
1155                                0, start, end);
1156
1157                 __flush_tlb_kernel_range(start, end);
1158         }
1159 }
1160
1161 /* CPU capture. */
1162 /* #define CAPTURE_DEBUG */
1163 extern unsigned long xcall_capture;
1164
1165 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1166 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1167 static unsigned long penguins_are_doing_time;
1168
1169 void smp_capture(void)
1170 {
1171         int result = atomic_add_return(1, &smp_capture_depth);
1172
1173         if (result == 1) {
1174                 int ncpus = num_online_cpus();
1175
1176 #ifdef CAPTURE_DEBUG
1177                 printk("CPU[%d]: Sending penguins to jail...",
1178                        smp_processor_id());
1179 #endif
1180                 penguins_are_doing_time = 1;
1181                 atomic_inc(&smp_capture_registry);
1182                 smp_cross_call(&xcall_capture, 0, 0, 0);
1183                 while (atomic_read(&smp_capture_registry) != ncpus)
1184                         rmb();
1185 #ifdef CAPTURE_DEBUG
1186                 printk("done\n");
1187 #endif
1188         }
1189 }
1190
1191 void smp_release(void)
1192 {
1193         if (atomic_dec_and_test(&smp_capture_depth)) {
1194 #ifdef CAPTURE_DEBUG
1195                 printk("CPU[%d]: Giving pardon to "
1196                        "imprisoned penguins\n",
1197                        smp_processor_id());
1198 #endif
1199                 penguins_are_doing_time = 0;
1200                 membar_safe("#StoreLoad");
1201                 atomic_dec(&smp_capture_registry);
1202         }
1203 }
1204
1205 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1206  * set, so they can service tlb flush xcalls...
1207  */
1208 extern void prom_world(int);
1209
1210 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1211 {
1212         clear_softint(1 << irq);
1213
1214         preempt_disable();
1215
1216         __asm__ __volatile__("flushw");
1217         prom_world(1);
1218         atomic_inc(&smp_capture_registry);
1219         membar_safe("#StoreLoad");
1220         while (penguins_are_doing_time)
1221                 rmb();
1222         atomic_dec(&smp_capture_registry);
1223         prom_world(0);
1224
1225         preempt_enable();
1226 }
1227
1228 /* /proc/profile writes can call this, don't __init it please. */
1229 int setup_profiling_timer(unsigned int multiplier)
1230 {
1231         return -EINVAL;
1232 }
1233
1234 void __init smp_prepare_cpus(unsigned int max_cpus)
1235 {
1236 }
1237
1238 void smp_prepare_boot_cpu(void)
1239 {
1240 }
1241
1242 void __init smp_setup_processor_id(void)
1243 {
1244         if (tlb_type == spitfire)
1245                 xcall_deliver_impl = spitfire_xcall_deliver;
1246         else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1247                 xcall_deliver_impl = cheetah_xcall_deliver;
1248         else
1249                 xcall_deliver_impl = hypervisor_xcall_deliver;
1250 }
1251
1252 void __init smp_fill_in_cpu_possible_map(void)
1253 {
1254         int possible_cpus = num_possible_cpus();
1255         int i;
1256
1257         if (possible_cpus > nr_cpu_ids)
1258                 possible_cpus = nr_cpu_ids;
1259
1260         for (i = 0; i < possible_cpus; i++)
1261                 set_cpu_possible(i, true);
1262         for (; i < NR_CPUS; i++)
1263                 set_cpu_possible(i, false);
1264 }
1265
1266 void smp_fill_in_sib_core_maps(void)
1267 {
1268         unsigned int i;
1269
1270         for_each_present_cpu(i) {
1271                 unsigned int j;
1272
1273                 cpumask_clear(&cpu_core_map[i]);
1274                 if (cpu_data(i).core_id == 0) {
1275                         cpumask_set_cpu(i, &cpu_core_map[i]);
1276                         continue;
1277                 }
1278
1279                 for_each_present_cpu(j) {
1280                         if (cpu_data(i).core_id ==
1281                             cpu_data(j).core_id)
1282                                 cpumask_set_cpu(j, &cpu_core_map[i]);
1283                 }
1284         }
1285
1286         for_each_present_cpu(i)  {
1287                 unsigned int j;
1288
1289                 for_each_present_cpu(j)  {
1290                         if (cpu_data(i).max_cache_id ==
1291                             cpu_data(j).max_cache_id)
1292                                 cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1293
1294                         if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1295                                 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1296                 }
1297         }
1298
1299         for_each_present_cpu(i) {
1300                 unsigned int j;
1301
1302                 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1303                 if (cpu_data(i).proc_id == -1) {
1304                         cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1305                         continue;
1306                 }
1307
1308                 for_each_present_cpu(j) {
1309                         if (cpu_data(i).proc_id ==
1310                             cpu_data(j).proc_id)
1311                                 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1312                 }
1313         }
1314 }
1315
1316 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1317 {
1318         int ret = smp_boot_one_cpu(cpu, tidle);
1319
1320         if (!ret) {
1321                 cpumask_set_cpu(cpu, &smp_commenced_mask);
1322                 while (!cpu_online(cpu))
1323                         mb();
1324                 if (!cpu_online(cpu)) {
1325                         ret = -ENODEV;
1326                 } else {
1327                         /* On SUN4V, writes to %tick and %stick are
1328                          * not allowed.
1329                          */
1330                         if (tlb_type != hypervisor)
1331                                 smp_synchronize_one_tick(cpu);
1332                 }
1333         }
1334         return ret;
1335 }
1336
1337 #ifdef CONFIG_HOTPLUG_CPU
1338 void cpu_play_dead(void)
1339 {
1340         int cpu = smp_processor_id();
1341         unsigned long pstate;
1342
1343         idle_task_exit();
1344
1345         if (tlb_type == hypervisor) {
1346                 struct trap_per_cpu *tb = &trap_block[cpu];
1347
1348                 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1349                                 tb->cpu_mondo_pa, 0);
1350                 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1351                                 tb->dev_mondo_pa, 0);
1352                 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1353                                 tb->resum_mondo_pa, 0);
1354                 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1355                                 tb->nonresum_mondo_pa, 0);
1356         }
1357
1358         cpumask_clear_cpu(cpu, &smp_commenced_mask);
1359         membar_safe("#Sync");
1360
1361         local_irq_disable();
1362
1363         __asm__ __volatile__(
1364                 "rdpr   %%pstate, %0\n\t"
1365                 "wrpr   %0, %1, %%pstate"
1366                 : "=r" (pstate)
1367                 : "i" (PSTATE_IE));
1368
1369         while (1)
1370                 barrier();
1371 }
1372
1373 int __cpu_disable(void)
1374 {
1375         int cpu = smp_processor_id();
1376         cpuinfo_sparc *c;
1377         int i;
1378
1379         for_each_cpu(i, &cpu_core_map[cpu])
1380                 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1381         cpumask_clear(&cpu_core_map[cpu]);
1382
1383         for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1384                 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1385         cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1386
1387         c = &cpu_data(cpu);
1388
1389         c->core_id = 0;
1390         c->proc_id = -1;
1391
1392         smp_wmb();
1393
1394         /* Make sure no interrupts point to this cpu.  */
1395         fixup_irqs();
1396
1397         local_irq_enable();
1398         mdelay(1);
1399         local_irq_disable();
1400
1401         set_cpu_online(cpu, false);
1402
1403         cpu_map_rebuild();
1404
1405         return 0;
1406 }
1407
1408 void __cpu_die(unsigned int cpu)
1409 {
1410         int i;
1411
1412         for (i = 0; i < 100; i++) {
1413                 smp_rmb();
1414                 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1415                         break;
1416                 msleep(100);
1417         }
1418         if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1419                 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1420         } else {
1421 #if defined(CONFIG_SUN_LDOMS)
1422                 unsigned long hv_err;
1423                 int limit = 100;
1424
1425                 do {
1426                         hv_err = sun4v_cpu_stop(cpu);
1427                         if (hv_err == HV_EOK) {
1428                                 set_cpu_present(cpu, false);
1429                                 break;
1430                         }
1431                 } while (--limit > 0);
1432                 if (limit <= 0) {
1433                         printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1434                                hv_err);
1435                 }
1436 #endif
1437         }
1438 }
1439 #endif
1440
1441 void __init smp_cpus_done(unsigned int max_cpus)
1442 {
1443 }
1444
1445 static void send_cpu_ipi(int cpu)
1446 {
1447         xcall_deliver((u64) &xcall_receive_signal,
1448                         0, 0, cpumask_of(cpu));
1449 }
1450
1451 void scheduler_poke(void)
1452 {
1453         if (!cpu_poke)
1454                 return;
1455
1456         if (!__this_cpu_read(poke))
1457                 return;
1458
1459         __this_cpu_write(poke, false);
1460         set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1461 }
1462
1463 static unsigned long send_cpu_poke(int cpu)
1464 {
1465         unsigned long hv_err;
1466
1467         per_cpu(poke, cpu) = true;
1468         hv_err = sun4v_cpu_poke(cpu);
1469         if (hv_err != HV_EOK) {
1470                 per_cpu(poke, cpu) = false;
1471                 pr_err_ratelimited("%s: sun4v_cpu_poke() fails err=%lu\n",
1472                                     __func__, hv_err);
1473         }
1474
1475         return hv_err;
1476 }
1477
1478 void smp_send_reschedule(int cpu)
1479 {
1480         if (cpu == smp_processor_id()) {
1481                 WARN_ON_ONCE(preemptible());
1482                 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1483                 return;
1484         }
1485
1486         /* Use cpu poke to resume idle cpu if supported. */
1487         if (cpu_poke && idle_cpu(cpu)) {
1488                 unsigned long ret;
1489
1490                 ret = send_cpu_poke(cpu);
1491                 if (ret == HV_EOK)
1492                         return;
1493         }
1494
1495         /* Use IPI in following cases:
1496          * - cpu poke not supported
1497          * - cpu not idle
1498          * - send_cpu_poke() returns with error
1499          */
1500         send_cpu_ipi(cpu);
1501 }
1502
1503 void smp_init_cpu_poke(void)
1504 {
1505         unsigned long major;
1506         unsigned long minor;
1507         int ret;
1508
1509         if (tlb_type != hypervisor)
1510                 return;
1511
1512         ret = sun4v_hvapi_get(HV_GRP_CORE, &major, &minor);
1513         if (ret) {
1514                 pr_debug("HV_GRP_CORE is not registered\n");
1515                 return;
1516         }
1517
1518         if (major == 1 && minor >= 6) {
1519                 /* CPU POKE is registered. */
1520                 cpu_poke = true;
1521                 return;
1522         }
1523
1524         pr_debug("CPU_POKE not supported\n");
1525 }
1526
1527 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1528 {
1529         clear_softint(1 << irq);
1530         scheduler_ipi();
1531 }
1532
1533 static void stop_this_cpu(void *dummy)
1534 {
1535         set_cpu_online(smp_processor_id(), false);
1536         prom_stopself();
1537 }
1538
1539 void smp_send_stop(void)
1540 {
1541         int cpu;
1542
1543         if (tlb_type == hypervisor) {
1544                 int this_cpu = smp_processor_id();
1545 #ifdef CONFIG_SERIAL_SUNHV
1546                 sunhv_migrate_hvcons_irq(this_cpu);
1547 #endif
1548                 for_each_online_cpu(cpu) {
1549                         if (cpu == this_cpu)
1550                                 continue;
1551
1552                         set_cpu_online(cpu, false);
1553 #ifdef CONFIG_SUN_LDOMS
1554                         if (ldom_domaining_enabled) {
1555                                 unsigned long hv_err;
1556                                 hv_err = sun4v_cpu_stop(cpu);
1557                                 if (hv_err)
1558                                         printk(KERN_ERR "sun4v_cpu_stop() "
1559                                                "failed err=%lu\n", hv_err);
1560                         } else
1561 #endif
1562                                 prom_stopcpu_cpuid(cpu);
1563                 }
1564         } else
1565                 smp_call_function(stop_this_cpu, NULL, 0);
1566 }
1567
1568 /**
1569  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1570  * @cpu: cpu to allocate for
1571  * @size: size allocation in bytes
1572  * @align: alignment
1573  *
1574  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
1575  * does the right thing for NUMA regardless of the current
1576  * configuration.
1577  *
1578  * RETURNS:
1579  * Pointer to the allocated area on success, NULL on failure.
1580  */
1581 static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1582                                         size_t align)
1583 {
1584         const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1585 #ifdef CONFIG_NEED_MULTIPLE_NODES
1586         int node = cpu_to_node(cpu);
1587         void *ptr;
1588
1589         if (!node_online(node) || !NODE_DATA(node)) {
1590                 ptr = memblock_alloc_from(size, align, goal);
1591                 pr_info("cpu %d has no node %d or node-local memory\n",
1592                         cpu, node);
1593                 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1594                          cpu, size, __pa(ptr));
1595         } else {
1596                 ptr = memblock_alloc_try_nid(size, align, goal,
1597                                              MEMBLOCK_ALLOC_ACCESSIBLE, node);
1598                 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1599                          "%016lx\n", cpu, size, node, __pa(ptr));
1600         }
1601         return ptr;
1602 #else
1603         return memblock_alloc_from(size, align, goal);
1604 #endif
1605 }
1606
1607 static void __init pcpu_free_bootmem(void *ptr, size_t size)
1608 {
1609         memblock_free(__pa(ptr), size);
1610 }
1611
1612 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1613 {
1614         if (cpu_to_node(from) == cpu_to_node(to))
1615                 return LOCAL_DISTANCE;
1616         else
1617                 return REMOTE_DISTANCE;
1618 }
1619
1620 static void __init pcpu_populate_pte(unsigned long addr)
1621 {
1622         pgd_t *pgd = pgd_offset_k(addr);
1623         p4d_t *p4d;
1624         pud_t *pud;
1625         pmd_t *pmd;
1626
1627         if (pgd_none(*pgd)) {
1628                 pud_t *new;
1629
1630                 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1631                 if (!new)
1632                         goto err_alloc;
1633                 pgd_populate(&init_mm, pgd, new);
1634         }
1635
1636         p4d = p4d_offset(pgd, addr);
1637         if (p4d_none(*p4d)) {
1638                 pud_t *new;
1639
1640                 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1641                 if (!new)
1642                         goto err_alloc;
1643                 p4d_populate(&init_mm, p4d, new);
1644         }
1645
1646         pud = pud_offset(p4d, addr);
1647         if (pud_none(*pud)) {
1648                 pmd_t *new;
1649
1650                 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1651                 if (!new)
1652                         goto err_alloc;
1653                 pud_populate(&init_mm, pud, new);
1654         }
1655
1656         pmd = pmd_offset(pud, addr);
1657         if (!pmd_present(*pmd)) {
1658                 pte_t *new;
1659
1660                 new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1661                 if (!new)
1662                         goto err_alloc;
1663                 pmd_populate_kernel(&init_mm, pmd, new);
1664         }
1665
1666         return;
1667
1668 err_alloc:
1669         panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
1670               __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1671 }
1672
1673 void __init setup_per_cpu_areas(void)
1674 {
1675         unsigned long delta;
1676         unsigned int cpu;
1677         int rc = -EINVAL;
1678
1679         if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1680                 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1681                                             PERCPU_DYNAMIC_RESERVE, 4 << 20,
1682                                             pcpu_cpu_distance,
1683                                             pcpu_alloc_bootmem,
1684                                             pcpu_free_bootmem);
1685                 if (rc)
1686                         pr_warn("PERCPU: %s allocator failed (%d), "
1687                                 "falling back to page size\n",
1688                                 pcpu_fc_names[pcpu_chosen_fc], rc);
1689         }
1690         if (rc < 0)
1691                 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1692                                            pcpu_alloc_bootmem,
1693                                            pcpu_free_bootmem,
1694                                            pcpu_populate_pte);
1695         if (rc < 0)
1696                 panic("cannot initialize percpu area (err=%d)", rc);
1697
1698         delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1699         for_each_possible_cpu(cpu)
1700                 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1701
1702         /* Setup %g5 for the boot cpu.  */
1703         __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1704
1705         of_fill_in_cpu_data();
1706         if (tlb_type == hypervisor)
1707                 mdesc_fill_in_cpu_data(cpu_all_mask);
1708 }