078d3e00a6168fe7d56f212c0822800ee77bc9f8
[platform/adaptation/renesas_rcar/renesas_kernel.git] / arch / powerpc / mm / numa.c
1 /*
2  * pSeries NUMA support
3  *
4  * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
14 #include <linux/mm.h>
15 #include <linux/mmzone.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
21 #include <linux/of.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <linux/stop_machine.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/uaccess.h>
29 #include <linux/slab.h>
30 #include <asm/cputhreads.h>
31 #include <asm/sparsemem.h>
32 #include <asm/prom.h>
33 #include <asm/smp.h>
34 #include <asm/firmware.h>
35 #include <asm/paca.h>
36 #include <asm/hvcall.h>
37 #include <asm/setup.h>
38 #include <asm/vdso.h>
39
40 static int numa_enabled = 1;
41
42 static char *cmdline __initdata;
43
44 static int numa_debug;
45 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
46
47 int numa_cpu_lookup_table[NR_CPUS];
48 cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
49 struct pglist_data *node_data[MAX_NUMNODES];
50
51 EXPORT_SYMBOL(numa_cpu_lookup_table);
52 EXPORT_SYMBOL(node_to_cpumask_map);
53 EXPORT_SYMBOL(node_data);
54
55 static int min_common_depth;
56 static int n_mem_addr_cells, n_mem_size_cells;
57 static int form1_affinity;
58
59 #define MAX_DISTANCE_REF_POINTS 4
60 static int distance_ref_points_depth;
61 static const __be32 *distance_ref_points;
62 static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
63
64 /*
65  * Allocate node_to_cpumask_map based on number of available nodes
66  * Requires node_possible_map to be valid.
67  *
68  * Note: cpumask_of_node() is not valid until after this is done.
69  */
70 static void __init setup_node_to_cpumask_map(void)
71 {
72         unsigned int node;
73
74         /* setup nr_node_ids if not done yet */
75         if (nr_node_ids == MAX_NUMNODES)
76                 setup_nr_node_ids();
77
78         /* allocate the map */
79         for (node = 0; node < nr_node_ids; node++)
80                 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
81
82         /* cpumask_of_node() will now work */
83         dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
84 }
85
86 static int __init fake_numa_create_new_node(unsigned long end_pfn,
87                                                 unsigned int *nid)
88 {
89         unsigned long long mem;
90         char *p = cmdline;
91         static unsigned int fake_nid;
92         static unsigned long long curr_boundary;
93
94         /*
95          * Modify node id, iff we started creating NUMA nodes
96          * We want to continue from where we left of the last time
97          */
98         if (fake_nid)
99                 *nid = fake_nid;
100         /*
101          * In case there are no more arguments to parse, the
102          * node_id should be the same as the last fake node id
103          * (we've handled this above).
104          */
105         if (!p)
106                 return 0;
107
108         mem = memparse(p, &p);
109         if (!mem)
110                 return 0;
111
112         if (mem < curr_boundary)
113                 return 0;
114
115         curr_boundary = mem;
116
117         if ((end_pfn << PAGE_SHIFT) > mem) {
118                 /*
119                  * Skip commas and spaces
120                  */
121                 while (*p == ',' || *p == ' ' || *p == '\t')
122                         p++;
123
124                 cmdline = p;
125                 fake_nid++;
126                 *nid = fake_nid;
127                 dbg("created new fake_node with id %d\n", fake_nid);
128                 return 1;
129         }
130         return 0;
131 }
132
133 /*
134  * get_node_active_region - Return active region containing pfn
135  * Active range returned is empty if none found.
136  * @pfn: The page to return the region for
137  * @node_ar: Returned set to the active region containing @pfn
138  */
139 static void __init get_node_active_region(unsigned long pfn,
140                                           struct node_active_region *node_ar)
141 {
142         unsigned long start_pfn, end_pfn;
143         int i, nid;
144
145         for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
146                 if (pfn >= start_pfn && pfn < end_pfn) {
147                         node_ar->nid = nid;
148                         node_ar->start_pfn = start_pfn;
149                         node_ar->end_pfn = end_pfn;
150                         break;
151                 }
152         }
153 }
154
155 static void map_cpu_to_node(int cpu, int node)
156 {
157         numa_cpu_lookup_table[cpu] = node;
158
159         dbg("adding cpu %d to node %d\n", cpu, node);
160
161         if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
162                 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
163 }
164
165 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
166 static void unmap_cpu_from_node(unsigned long cpu)
167 {
168         int node = numa_cpu_lookup_table[cpu];
169
170         dbg("removing cpu %lu from node %d\n", cpu, node);
171
172         if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
173                 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
174         } else {
175                 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
176                        cpu, node);
177         }
178 }
179 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
180
181 /* must hold reference to node during call */
182 static const __be32 *of_get_associativity(struct device_node *dev)
183 {
184         return of_get_property(dev, "ibm,associativity", NULL);
185 }
186
187 /*
188  * Returns the property linux,drconf-usable-memory if
189  * it exists (the property exists only in kexec/kdump kernels,
190  * added by kexec-tools)
191  */
192 static const __be32 *of_get_usable_memory(struct device_node *memory)
193 {
194         const __be32 *prop;
195         u32 len;
196         prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
197         if (!prop || len < sizeof(unsigned int))
198                 return NULL;
199         return prop;
200 }
201
202 int __node_distance(int a, int b)
203 {
204         int i;
205         int distance = LOCAL_DISTANCE;
206
207         if (!form1_affinity)
208                 return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
209
210         for (i = 0; i < distance_ref_points_depth; i++) {
211                 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
212                         break;
213
214                 /* Double the distance for each NUMA level */
215                 distance *= 2;
216         }
217
218         return distance;
219 }
220
221 static void initialize_distance_lookup_table(int nid,
222                 const __be32 *associativity)
223 {
224         int i;
225
226         if (!form1_affinity)
227                 return;
228
229         for (i = 0; i < distance_ref_points_depth; i++) {
230                 const __be32 *entry;
231
232                 entry = &associativity[be32_to_cpu(distance_ref_points[i])];
233                 distance_lookup_table[nid][i] = of_read_number(entry, 1);
234         }
235 }
236
237 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
238  * info is found.
239  */
240 static int associativity_to_nid(const __be32 *associativity)
241 {
242         int nid = -1;
243
244         if (min_common_depth == -1)
245                 goto out;
246
247         if (of_read_number(associativity, 1) >= min_common_depth)
248                 nid = of_read_number(&associativity[min_common_depth], 1);
249
250         /* POWER4 LPAR uses 0xffff as invalid node */
251         if (nid == 0xffff || nid >= MAX_NUMNODES)
252                 nid = -1;
253
254         if (nid > 0 &&
255             of_read_number(associativity, 1) >= distance_ref_points_depth)
256                 initialize_distance_lookup_table(nid, associativity);
257
258 out:
259         return nid;
260 }
261
262 /* Returns the nid associated with the given device tree node,
263  * or -1 if not found.
264  */
265 static int of_node_to_nid_single(struct device_node *device)
266 {
267         int nid = -1;
268         const __be32 *tmp;
269
270         tmp = of_get_associativity(device);
271         if (tmp)
272                 nid = associativity_to_nid(tmp);
273         return nid;
274 }
275
276 /* Walk the device tree upwards, looking for an associativity id */
277 int of_node_to_nid(struct device_node *device)
278 {
279         struct device_node *tmp;
280         int nid = -1;
281
282         of_node_get(device);
283         while (device) {
284                 nid = of_node_to_nid_single(device);
285                 if (nid != -1)
286                         break;
287
288                 tmp = device;
289                 device = of_get_parent(tmp);
290                 of_node_put(tmp);
291         }
292         of_node_put(device);
293
294         return nid;
295 }
296 EXPORT_SYMBOL_GPL(of_node_to_nid);
297
298 static int __init find_min_common_depth(void)
299 {
300         int depth;
301         struct device_node *root;
302
303         if (firmware_has_feature(FW_FEATURE_OPAL))
304                 root = of_find_node_by_path("/ibm,opal");
305         else
306                 root = of_find_node_by_path("/rtas");
307         if (!root)
308                 root = of_find_node_by_path("/");
309
310         /*
311          * This property is a set of 32-bit integers, each representing
312          * an index into the ibm,associativity nodes.
313          *
314          * With form 0 affinity the first integer is for an SMP configuration
315          * (should be all 0's) and the second is for a normal NUMA
316          * configuration. We have only one level of NUMA.
317          *
318          * With form 1 affinity the first integer is the most significant
319          * NUMA boundary and the following are progressively less significant
320          * boundaries. There can be more than one level of NUMA.
321          */
322         distance_ref_points = of_get_property(root,
323                                         "ibm,associativity-reference-points",
324                                         &distance_ref_points_depth);
325
326         if (!distance_ref_points) {
327                 dbg("NUMA: ibm,associativity-reference-points not found.\n");
328                 goto err;
329         }
330
331         distance_ref_points_depth /= sizeof(int);
332
333         if (firmware_has_feature(FW_FEATURE_OPAL) ||
334             firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
335                 dbg("Using form 1 affinity\n");
336                 form1_affinity = 1;
337         }
338
339         if (form1_affinity) {
340                 depth = of_read_number(distance_ref_points, 1);
341         } else {
342                 if (distance_ref_points_depth < 2) {
343                         printk(KERN_WARNING "NUMA: "
344                                 "short ibm,associativity-reference-points\n");
345                         goto err;
346                 }
347
348                 depth = of_read_number(&distance_ref_points[1], 1);
349         }
350
351         /*
352          * Warn and cap if the hardware supports more than
353          * MAX_DISTANCE_REF_POINTS domains.
354          */
355         if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
356                 printk(KERN_WARNING "NUMA: distance array capped at "
357                         "%d entries\n", MAX_DISTANCE_REF_POINTS);
358                 distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
359         }
360
361         of_node_put(root);
362         return depth;
363
364 err:
365         of_node_put(root);
366         return -1;
367 }
368
369 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
370 {
371         struct device_node *memory = NULL;
372
373         memory = of_find_node_by_type(memory, "memory");
374         if (!memory)
375                 panic("numa.c: No memory nodes found!");
376
377         *n_addr_cells = of_n_addr_cells(memory);
378         *n_size_cells = of_n_size_cells(memory);
379         of_node_put(memory);
380 }
381
382 static unsigned long read_n_cells(int n, const __be32 **buf)
383 {
384         unsigned long result = 0;
385
386         while (n--) {
387                 result = (result << 32) | of_read_number(*buf, 1);
388                 (*buf)++;
389         }
390         return result;
391 }
392
393 /*
394  * Read the next memblock list entry from the ibm,dynamic-memory property
395  * and return the information in the provided of_drconf_cell structure.
396  */
397 static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
398 {
399         const __be32 *cp;
400
401         drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
402
403         cp = *cellp;
404         drmem->drc_index = of_read_number(cp, 1);
405         drmem->reserved = of_read_number(&cp[1], 1);
406         drmem->aa_index = of_read_number(&cp[2], 1);
407         drmem->flags = of_read_number(&cp[3], 1);
408
409         *cellp = cp + 4;
410 }
411
412 /*
413  * Retrieve and validate the ibm,dynamic-memory property of the device tree.
414  *
415  * The layout of the ibm,dynamic-memory property is a number N of memblock
416  * list entries followed by N memblock list entries.  Each memblock list entry
417  * contains information as laid out in the of_drconf_cell struct above.
418  */
419 static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
420 {
421         const __be32 *prop;
422         u32 len, entries;
423
424         prop = of_get_property(memory, "ibm,dynamic-memory", &len);
425         if (!prop || len < sizeof(unsigned int))
426                 return 0;
427
428         entries = of_read_number(prop++, 1);
429
430         /* Now that we know the number of entries, revalidate the size
431          * of the property read in to ensure we have everything
432          */
433         if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
434                 return 0;
435
436         *dm = prop;
437         return entries;
438 }
439
440 /*
441  * Retrieve and validate the ibm,lmb-size property for drconf memory
442  * from the device tree.
443  */
444 static u64 of_get_lmb_size(struct device_node *memory)
445 {
446         const __be32 *prop;
447         u32 len;
448
449         prop = of_get_property(memory, "ibm,lmb-size", &len);
450         if (!prop || len < sizeof(unsigned int))
451                 return 0;
452
453         return read_n_cells(n_mem_size_cells, &prop);
454 }
455
456 struct assoc_arrays {
457         u32     n_arrays;
458         u32     array_sz;
459         const __be32 *arrays;
460 };
461
462 /*
463  * Retrieve and validate the list of associativity arrays for drconf
464  * memory from the ibm,associativity-lookup-arrays property of the
465  * device tree..
466  *
467  * The layout of the ibm,associativity-lookup-arrays property is a number N
468  * indicating the number of associativity arrays, followed by a number M
469  * indicating the size of each associativity array, followed by a list
470  * of N associativity arrays.
471  */
472 static int of_get_assoc_arrays(struct device_node *memory,
473                                struct assoc_arrays *aa)
474 {
475         const __be32 *prop;
476         u32 len;
477
478         prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
479         if (!prop || len < 2 * sizeof(unsigned int))
480                 return -1;
481
482         aa->n_arrays = of_read_number(prop++, 1);
483         aa->array_sz = of_read_number(prop++, 1);
484
485         /* Now that we know the number of arrays and size of each array,
486          * revalidate the size of the property read in.
487          */
488         if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
489                 return -1;
490
491         aa->arrays = prop;
492         return 0;
493 }
494
495 /*
496  * This is like of_node_to_nid_single() for memory represented in the
497  * ibm,dynamic-reconfiguration-memory node.
498  */
499 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
500                                    struct assoc_arrays *aa)
501 {
502         int default_nid = 0;
503         int nid = default_nid;
504         int index;
505
506         if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
507             !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
508             drmem->aa_index < aa->n_arrays) {
509                 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
510                 nid = of_read_number(&aa->arrays[index], 1);
511
512                 if (nid == 0xffff || nid >= MAX_NUMNODES)
513                         nid = default_nid;
514         }
515
516         return nid;
517 }
518
519 /*
520  * Figure out to which domain a cpu belongs and stick it there.
521  * Return the id of the domain used.
522  */
523 static int numa_setup_cpu(unsigned long lcpu)
524 {
525         int nid = 0;
526         struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
527
528         if (!cpu) {
529                 WARN_ON(1);
530                 goto out;
531         }
532
533         nid = of_node_to_nid_single(cpu);
534
535         if (nid < 0 || !node_online(nid))
536                 nid = first_online_node;
537 out:
538         map_cpu_to_node(lcpu, nid);
539
540         of_node_put(cpu);
541
542         return nid;
543 }
544
545 static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
546                              void *hcpu)
547 {
548         unsigned long lcpu = (unsigned long)hcpu;
549         int ret = NOTIFY_DONE;
550
551         switch (action) {
552         case CPU_UP_PREPARE:
553         case CPU_UP_PREPARE_FROZEN:
554                 numa_setup_cpu(lcpu);
555                 ret = NOTIFY_OK;
556                 break;
557 #ifdef CONFIG_HOTPLUG_CPU
558         case CPU_DEAD:
559         case CPU_DEAD_FROZEN:
560         case CPU_UP_CANCELED:
561         case CPU_UP_CANCELED_FROZEN:
562                 unmap_cpu_from_node(lcpu);
563                 break;
564                 ret = NOTIFY_OK;
565 #endif
566         }
567         return ret;
568 }
569
570 /*
571  * Check and possibly modify a memory region to enforce the memory limit.
572  *
573  * Returns the size the region should have to enforce the memory limit.
574  * This will either be the original value of size, a truncated value,
575  * or zero. If the returned value of size is 0 the region should be
576  * discarded as it lies wholly above the memory limit.
577  */
578 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
579                                                       unsigned long size)
580 {
581         /*
582          * We use memblock_end_of_DRAM() in here instead of memory_limit because
583          * we've already adjusted it for the limit and it takes care of
584          * having memory holes below the limit.  Also, in the case of
585          * iommu_is_off, memory_limit is not set but is implicitly enforced.
586          */
587
588         if (start + size <= memblock_end_of_DRAM())
589                 return size;
590
591         if (start >= memblock_end_of_DRAM())
592                 return 0;
593
594         return memblock_end_of_DRAM() - start;
595 }
596
597 /*
598  * Reads the counter for a given entry in
599  * linux,drconf-usable-memory property
600  */
601 static inline int __init read_usm_ranges(const __be32 **usm)
602 {
603         /*
604          * For each lmb in ibm,dynamic-memory a corresponding
605          * entry in linux,drconf-usable-memory property contains
606          * a counter followed by that many (base, size) duple.
607          * read the counter from linux,drconf-usable-memory
608          */
609         return read_n_cells(n_mem_size_cells, usm);
610 }
611
612 /*
613  * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
614  * node.  This assumes n_mem_{addr,size}_cells have been set.
615  */
616 static void __init parse_drconf_memory(struct device_node *memory)
617 {
618         const __be32 *uninitialized_var(dm), *usm;
619         unsigned int n, rc, ranges, is_kexec_kdump = 0;
620         unsigned long lmb_size, base, size, sz;
621         int nid;
622         struct assoc_arrays aa = { .arrays = NULL };
623
624         n = of_get_drconf_memory(memory, &dm);
625         if (!n)
626                 return;
627
628         lmb_size = of_get_lmb_size(memory);
629         if (!lmb_size)
630                 return;
631
632         rc = of_get_assoc_arrays(memory, &aa);
633         if (rc)
634                 return;
635
636         /* check if this is a kexec/kdump kernel */
637         usm = of_get_usable_memory(memory);
638         if (usm != NULL)
639                 is_kexec_kdump = 1;
640
641         for (; n != 0; --n) {
642                 struct of_drconf_cell drmem;
643
644                 read_drconf_cell(&drmem, &dm);
645
646                 /* skip this block if the reserved bit is set in flags (0x80)
647                    or if the block is not assigned to this partition (0x8) */
648                 if ((drmem.flags & DRCONF_MEM_RESERVED)
649                     || !(drmem.flags & DRCONF_MEM_ASSIGNED))
650                         continue;
651
652                 base = drmem.base_addr;
653                 size = lmb_size;
654                 ranges = 1;
655
656                 if (is_kexec_kdump) {
657                         ranges = read_usm_ranges(&usm);
658                         if (!ranges) /* there are no (base, size) duple */
659                                 continue;
660                 }
661                 do {
662                         if (is_kexec_kdump) {
663                                 base = read_n_cells(n_mem_addr_cells, &usm);
664                                 size = read_n_cells(n_mem_size_cells, &usm);
665                         }
666                         nid = of_drconf_to_nid_single(&drmem, &aa);
667                         fake_numa_create_new_node(
668                                 ((base + size) >> PAGE_SHIFT),
669                                            &nid);
670                         node_set_online(nid);
671                         sz = numa_enforce_memory_limit(base, size);
672                         if (sz)
673                                 memblock_set_node(base, sz, nid);
674                 } while (--ranges);
675         }
676 }
677
678 static int __init parse_numa_properties(void)
679 {
680         struct device_node *memory;
681         int default_nid = 0;
682         unsigned long i;
683
684         if (numa_enabled == 0) {
685                 printk(KERN_WARNING "NUMA disabled by user\n");
686                 return -1;
687         }
688
689         min_common_depth = find_min_common_depth();
690
691         if (min_common_depth < 0)
692                 return min_common_depth;
693
694         dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
695
696         /*
697          * Even though we connect cpus to numa domains later in SMP
698          * init, we need to know the node ids now. This is because
699          * each node to be onlined must have NODE_DATA etc backing it.
700          */
701         for_each_present_cpu(i) {
702                 struct device_node *cpu;
703                 int nid;
704
705                 cpu = of_get_cpu_node(i, NULL);
706                 BUG_ON(!cpu);
707                 nid = of_node_to_nid_single(cpu);
708                 of_node_put(cpu);
709
710                 /*
711                  * Don't fall back to default_nid yet -- we will plug
712                  * cpus into nodes once the memory scan has discovered
713                  * the topology.
714                  */
715                 if (nid < 0)
716                         continue;
717                 node_set_online(nid);
718         }
719
720         get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
721
722         for_each_node_by_type(memory, "memory") {
723                 unsigned long start;
724                 unsigned long size;
725                 int nid;
726                 int ranges;
727                 const __be32 *memcell_buf;
728                 unsigned int len;
729
730                 memcell_buf = of_get_property(memory,
731                         "linux,usable-memory", &len);
732                 if (!memcell_buf || len <= 0)
733                         memcell_buf = of_get_property(memory, "reg", &len);
734                 if (!memcell_buf || len <= 0)
735                         continue;
736
737                 /* ranges in cell */
738                 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
739 new_range:
740                 /* these are order-sensitive, and modify the buffer pointer */
741                 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
742                 size = read_n_cells(n_mem_size_cells, &memcell_buf);
743
744                 /*
745                  * Assumption: either all memory nodes or none will
746                  * have associativity properties.  If none, then
747                  * everything goes to default_nid.
748                  */
749                 nid = of_node_to_nid_single(memory);
750                 if (nid < 0)
751                         nid = default_nid;
752
753                 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
754                 node_set_online(nid);
755
756                 if (!(size = numa_enforce_memory_limit(start, size))) {
757                         if (--ranges)
758                                 goto new_range;
759                         else
760                                 continue;
761                 }
762
763                 memblock_set_node(start, size, nid);
764
765                 if (--ranges)
766                         goto new_range;
767         }
768
769         /*
770          * Now do the same thing for each MEMBLOCK listed in the
771          * ibm,dynamic-memory property in the
772          * ibm,dynamic-reconfiguration-memory node.
773          */
774         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
775         if (memory)
776                 parse_drconf_memory(memory);
777
778         return 0;
779 }
780
781 static void __init setup_nonnuma(void)
782 {
783         unsigned long top_of_ram = memblock_end_of_DRAM();
784         unsigned long total_ram = memblock_phys_mem_size();
785         unsigned long start_pfn, end_pfn;
786         unsigned int nid = 0;
787         struct memblock_region *reg;
788
789         printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
790                top_of_ram, total_ram);
791         printk(KERN_DEBUG "Memory hole size: %ldMB\n",
792                (top_of_ram - total_ram) >> 20);
793
794         for_each_memblock(memory, reg) {
795                 start_pfn = memblock_region_memory_base_pfn(reg);
796                 end_pfn = memblock_region_memory_end_pfn(reg);
797
798                 fake_numa_create_new_node(end_pfn, &nid);
799                 memblock_set_node(PFN_PHYS(start_pfn),
800                                   PFN_PHYS(end_pfn - start_pfn), nid);
801                 node_set_online(nid);
802         }
803 }
804
805 void __init dump_numa_cpu_topology(void)
806 {
807         unsigned int node;
808         unsigned int cpu, count;
809
810         if (min_common_depth == -1 || !numa_enabled)
811                 return;
812
813         for_each_online_node(node) {
814                 printk(KERN_DEBUG "Node %d CPUs:", node);
815
816                 count = 0;
817                 /*
818                  * If we used a CPU iterator here we would miss printing
819                  * the holes in the cpumap.
820                  */
821                 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
822                         if (cpumask_test_cpu(cpu,
823                                         node_to_cpumask_map[node])) {
824                                 if (count == 0)
825                                         printk(" %u", cpu);
826                                 ++count;
827                         } else {
828                                 if (count > 1)
829                                         printk("-%u", cpu - 1);
830                                 count = 0;
831                         }
832                 }
833
834                 if (count > 1)
835                         printk("-%u", nr_cpu_ids - 1);
836                 printk("\n");
837         }
838 }
839
840 static void __init dump_numa_memory_topology(void)
841 {
842         unsigned int node;
843         unsigned int count;
844
845         if (min_common_depth == -1 || !numa_enabled)
846                 return;
847
848         for_each_online_node(node) {
849                 unsigned long i;
850
851                 printk(KERN_DEBUG "Node %d Memory:", node);
852
853                 count = 0;
854
855                 for (i = 0; i < memblock_end_of_DRAM();
856                      i += (1 << SECTION_SIZE_BITS)) {
857                         if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
858                                 if (count == 0)
859                                         printk(" 0x%lx", i);
860                                 ++count;
861                         } else {
862                                 if (count > 0)
863                                         printk("-0x%lx", i);
864                                 count = 0;
865                         }
866                 }
867
868                 if (count > 0)
869                         printk("-0x%lx", i);
870                 printk("\n");
871         }
872 }
873
874 /*
875  * Allocate some memory, satisfying the memblock or bootmem allocator where
876  * required. nid is the preferred node and end is the physical address of
877  * the highest address in the node.
878  *
879  * Returns the virtual address of the memory.
880  */
881 static void __init *careful_zallocation(int nid, unsigned long size,
882                                        unsigned long align,
883                                        unsigned long end_pfn)
884 {
885         void *ret;
886         int new_nid;
887         unsigned long ret_paddr;
888
889         ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
890
891         /* retry over all memory */
892         if (!ret_paddr)
893                 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
894
895         if (!ret_paddr)
896                 panic("numa.c: cannot allocate %lu bytes for node %d",
897                       size, nid);
898
899         ret = __va(ret_paddr);
900
901         /*
902          * We initialize the nodes in numeric order: 0, 1, 2...
903          * and hand over control from the MEMBLOCK allocator to the
904          * bootmem allocator.  If this function is called for
905          * node 5, then we know that all nodes <5 are using the
906          * bootmem allocator instead of the MEMBLOCK allocator.
907          *
908          * So, check the nid from which this allocation came
909          * and double check to see if we need to use bootmem
910          * instead of the MEMBLOCK.  We don't free the MEMBLOCK memory
911          * since it would be useless.
912          */
913         new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
914         if (new_nid < nid) {
915                 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
916                                 size, align, 0);
917
918                 dbg("alloc_bootmem %p %lx\n", ret, size);
919         }
920
921         memset(ret, 0, size);
922         return ret;
923 }
924
925 static struct notifier_block ppc64_numa_nb = {
926         .notifier_call = cpu_numa_callback,
927         .priority = 1 /* Must run before sched domains notifier. */
928 };
929
930 static void __init mark_reserved_regions_for_nid(int nid)
931 {
932         struct pglist_data *node = NODE_DATA(nid);
933         struct memblock_region *reg;
934
935         for_each_memblock(reserved, reg) {
936                 unsigned long physbase = reg->base;
937                 unsigned long size = reg->size;
938                 unsigned long start_pfn = physbase >> PAGE_SHIFT;
939                 unsigned long end_pfn = PFN_UP(physbase + size);
940                 struct node_active_region node_ar;
941                 unsigned long node_end_pfn = pgdat_end_pfn(node);
942
943                 /*
944                  * Check to make sure that this memblock.reserved area is
945                  * within the bounds of the node that we care about.
946                  * Checking the nid of the start and end points is not
947                  * sufficient because the reserved area could span the
948                  * entire node.
949                  */
950                 if (end_pfn <= node->node_start_pfn ||
951                     start_pfn >= node_end_pfn)
952                         continue;
953
954                 get_node_active_region(start_pfn, &node_ar);
955                 while (start_pfn < end_pfn &&
956                         node_ar.start_pfn < node_ar.end_pfn) {
957                         unsigned long reserve_size = size;
958                         /*
959                          * if reserved region extends past active region
960                          * then trim size to active region
961                          */
962                         if (end_pfn > node_ar.end_pfn)
963                                 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
964                                         - physbase;
965                         /*
966                          * Only worry about *this* node, others may not
967                          * yet have valid NODE_DATA().
968                          */
969                         if (node_ar.nid == nid) {
970                                 dbg("reserve_bootmem %lx %lx nid=%d\n",
971                                         physbase, reserve_size, node_ar.nid);
972                                 reserve_bootmem_node(NODE_DATA(node_ar.nid),
973                                                 physbase, reserve_size,
974                                                 BOOTMEM_DEFAULT);
975                         }
976                         /*
977                          * if reserved region is contained in the active region
978                          * then done.
979                          */
980                         if (end_pfn <= node_ar.end_pfn)
981                                 break;
982
983                         /*
984                          * reserved region extends past the active region
985                          *   get next active region that contains this
986                          *   reserved region
987                          */
988                         start_pfn = node_ar.end_pfn;
989                         physbase = start_pfn << PAGE_SHIFT;
990                         size = size - reserve_size;
991                         get_node_active_region(start_pfn, &node_ar);
992                 }
993         }
994 }
995
996
997 void __init do_init_bootmem(void)
998 {
999         int nid;
1000
1001         min_low_pfn = 0;
1002         max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1003         max_pfn = max_low_pfn;
1004
1005         if (parse_numa_properties())
1006                 setup_nonnuma();
1007         else
1008                 dump_numa_memory_topology();
1009
1010         for_each_online_node(nid) {
1011                 unsigned long start_pfn, end_pfn;
1012                 void *bootmem_vaddr;
1013                 unsigned long bootmap_pages;
1014
1015                 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1016
1017                 /*
1018                  * Allocate the node structure node local if possible
1019                  *
1020                  * Be careful moving this around, as it relies on all
1021                  * previous nodes' bootmem to be initialized and have
1022                  * all reserved areas marked.
1023                  */
1024                 NODE_DATA(nid) = careful_zallocation(nid,
1025                                         sizeof(struct pglist_data),
1026                                         SMP_CACHE_BYTES, end_pfn);
1027
1028                 dbg("node %d\n", nid);
1029                 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1030
1031                 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1032                 NODE_DATA(nid)->node_start_pfn = start_pfn;
1033                 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1034
1035                 if (NODE_DATA(nid)->node_spanned_pages == 0)
1036                         continue;
1037
1038                 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1039                 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1040
1041                 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1042                 bootmem_vaddr = careful_zallocation(nid,
1043                                         bootmap_pages << PAGE_SHIFT,
1044                                         PAGE_SIZE, end_pfn);
1045
1046                 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1047
1048                 init_bootmem_node(NODE_DATA(nid),
1049                                   __pa(bootmem_vaddr) >> PAGE_SHIFT,
1050                                   start_pfn, end_pfn);
1051
1052                 free_bootmem_with_active_regions(nid, end_pfn);
1053                 /*
1054                  * Be very careful about moving this around.  Future
1055                  * calls to careful_zallocation() depend on this getting
1056                  * done correctly.
1057                  */
1058                 mark_reserved_regions_for_nid(nid);
1059                 sparse_memory_present_with_active_regions(nid);
1060         }
1061
1062         init_bootmem_done = 1;
1063
1064         /*
1065          * Now bootmem is initialised we can create the node to cpumask
1066          * lookup tables and setup the cpu callback to populate them.
1067          */
1068         setup_node_to_cpumask_map();
1069
1070         register_cpu_notifier(&ppc64_numa_nb);
1071         cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
1072                           (void *)(unsigned long)boot_cpuid);
1073 }
1074
1075 void __init paging_init(void)
1076 {
1077         unsigned long max_zone_pfns[MAX_NR_ZONES];
1078         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
1079         max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT;
1080         free_area_init_nodes(max_zone_pfns);
1081 }
1082
1083 static int __init early_numa(char *p)
1084 {
1085         if (!p)
1086                 return 0;
1087
1088         if (strstr(p, "off"))
1089                 numa_enabled = 0;
1090
1091         if (strstr(p, "debug"))
1092                 numa_debug = 1;
1093
1094         p = strstr(p, "fake=");
1095         if (p)
1096                 cmdline = p + strlen("fake=");
1097
1098         return 0;
1099 }
1100 early_param("numa", early_numa);
1101
1102 #ifdef CONFIG_MEMORY_HOTPLUG
1103 /*
1104  * Find the node associated with a hot added memory section for
1105  * memory represented in the device tree by the property
1106  * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1107  */
1108 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1109                                      unsigned long scn_addr)
1110 {
1111         const __be32 *dm;
1112         unsigned int drconf_cell_cnt, rc;
1113         unsigned long lmb_size;
1114         struct assoc_arrays aa;
1115         int nid = -1;
1116
1117         drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1118         if (!drconf_cell_cnt)
1119                 return -1;
1120
1121         lmb_size = of_get_lmb_size(memory);
1122         if (!lmb_size)
1123                 return -1;
1124
1125         rc = of_get_assoc_arrays(memory, &aa);
1126         if (rc)
1127                 return -1;
1128
1129         for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1130                 struct of_drconf_cell drmem;
1131
1132                 read_drconf_cell(&drmem, &dm);
1133
1134                 /* skip this block if it is reserved or not assigned to
1135                  * this partition */
1136                 if ((drmem.flags & DRCONF_MEM_RESERVED)
1137                     || !(drmem.flags & DRCONF_MEM_ASSIGNED))
1138                         continue;
1139
1140                 if ((scn_addr < drmem.base_addr)
1141                     || (scn_addr >= (drmem.base_addr + lmb_size)))
1142                         continue;
1143
1144                 nid = of_drconf_to_nid_single(&drmem, &aa);
1145                 break;
1146         }
1147
1148         return nid;
1149 }
1150
1151 /*
1152  * Find the node associated with a hot added memory section for memory
1153  * represented in the device tree as a node (i.e. memory@XXXX) for
1154  * each memblock.
1155  */
1156 static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1157 {
1158         struct device_node *memory;
1159         int nid = -1;
1160
1161         for_each_node_by_type(memory, "memory") {
1162                 unsigned long start, size;
1163                 int ranges;
1164                 const __be32 *memcell_buf;
1165                 unsigned int len;
1166
1167                 memcell_buf = of_get_property(memory, "reg", &len);
1168                 if (!memcell_buf || len <= 0)
1169                         continue;
1170
1171                 /* ranges in cell */
1172                 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1173
1174                 while (ranges--) {
1175                         start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1176                         size = read_n_cells(n_mem_size_cells, &memcell_buf);
1177
1178                         if ((scn_addr < start) || (scn_addr >= (start + size)))
1179                                 continue;
1180
1181                         nid = of_node_to_nid_single(memory);
1182                         break;
1183                 }
1184
1185                 if (nid >= 0)
1186                         break;
1187         }
1188
1189         of_node_put(memory);
1190
1191         return nid;
1192 }
1193
1194 /*
1195  * Find the node associated with a hot added memory section.  Section
1196  * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
1197  * sections are fully contained within a single MEMBLOCK.
1198  */
1199 int hot_add_scn_to_nid(unsigned long scn_addr)
1200 {
1201         struct device_node *memory = NULL;
1202         int nid, found = 0;
1203
1204         if (!numa_enabled || (min_common_depth < 0))
1205                 return first_online_node;
1206
1207         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1208         if (memory) {
1209                 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
1210                 of_node_put(memory);
1211         } else {
1212                 nid = hot_add_node_scn_to_nid(scn_addr);
1213         }
1214
1215         if (nid < 0 || !node_online(nid))
1216                 nid = first_online_node;
1217
1218         if (NODE_DATA(nid)->node_spanned_pages)
1219                 return nid;
1220
1221         for_each_online_node(nid) {
1222                 if (NODE_DATA(nid)->node_spanned_pages) {
1223                         found = 1;
1224                         break;
1225                 }
1226         }
1227
1228         BUG_ON(!found);
1229         return nid;
1230 }
1231
1232 static u64 hot_add_drconf_memory_max(void)
1233 {
1234         struct device_node *memory = NULL;
1235         unsigned int drconf_cell_cnt = 0;
1236         u64 lmb_size = 0;
1237         const __be32 *dm = NULL;
1238
1239         memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1240         if (memory) {
1241                 drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
1242                 lmb_size = of_get_lmb_size(memory);
1243                 of_node_put(memory);
1244         }
1245         return lmb_size * drconf_cell_cnt;
1246 }
1247
1248 /*
1249  * memory_hotplug_max - return max address of memory that may be added
1250  *
1251  * This is currently only used on systems that support drconfig memory
1252  * hotplug.
1253  */
1254 u64 memory_hotplug_max(void)
1255 {
1256         return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1257 }
1258 #endif /* CONFIG_MEMORY_HOTPLUG */
1259
1260 /* Virtual Processor Home Node (VPHN) support */
1261 #ifdef CONFIG_PPC_SPLPAR
1262 struct topology_update_data {
1263         struct topology_update_data *next;
1264         unsigned int cpu;
1265         int old_nid;
1266         int new_nid;
1267 };
1268
1269 static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1270 static cpumask_t cpu_associativity_changes_mask;
1271 static int vphn_enabled;
1272 static int prrn_enabled;
1273 static void reset_topology_timer(void);
1274
1275 /*
1276  * Store the current values of the associativity change counters in the
1277  * hypervisor.
1278  */
1279 static void setup_cpu_associativity_change_counters(void)
1280 {
1281         int cpu;
1282
1283         /* The VPHN feature supports a maximum of 8 reference points */
1284         BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1285
1286         for_each_possible_cpu(cpu) {
1287                 int i;
1288                 u8 *counts = vphn_cpu_change_counts[cpu];
1289                 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1290
1291                 for (i = 0; i < distance_ref_points_depth; i++)
1292                         counts[i] = hypervisor_counts[i];
1293         }
1294 }
1295
1296 /*
1297  * The hypervisor maintains a set of 8 associativity change counters in
1298  * the VPA of each cpu that correspond to the associativity levels in the
1299  * ibm,associativity-reference-points property. When an associativity
1300  * level changes, the corresponding counter is incremented.
1301  *
1302  * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1303  * node associativity levels have changed.
1304  *
1305  * Returns the number of cpus with unhandled associativity changes.
1306  */
1307 static int update_cpu_associativity_changes_mask(void)
1308 {
1309         int cpu;
1310         cpumask_t *changes = &cpu_associativity_changes_mask;
1311
1312         for_each_possible_cpu(cpu) {
1313                 int i, changed = 0;
1314                 u8 *counts = vphn_cpu_change_counts[cpu];
1315                 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1316
1317                 for (i = 0; i < distance_ref_points_depth; i++) {
1318                         if (hypervisor_counts[i] != counts[i]) {
1319                                 counts[i] = hypervisor_counts[i];
1320                                 changed = 1;
1321                         }
1322                 }
1323                 if (changed) {
1324                         cpumask_or(changes, changes, cpu_sibling_mask(cpu));
1325                         cpu = cpu_last_thread_sibling(cpu);
1326                 }
1327         }
1328
1329         return cpumask_weight(changes);
1330 }
1331
1332 /*
1333  * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1334  * the complete property we have to add the length in the first cell.
1335  */
1336 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1337
1338 /*
1339  * Convert the associativity domain numbers returned from the hypervisor
1340  * to the sequence they would appear in the ibm,associativity property.
1341  */
1342 static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
1343 {
1344         int i, nr_assoc_doms = 0;
1345         const __be16 *field = (const __be16 *) packed;
1346
1347 #define VPHN_FIELD_UNUSED       (0xffff)
1348 #define VPHN_FIELD_MSB          (0x8000)
1349 #define VPHN_FIELD_MASK         (~VPHN_FIELD_MSB)
1350
1351         for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1352                 if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
1353                         /* All significant fields processed, and remaining
1354                          * fields contain the reserved value of all 1's.
1355                          * Just store them.
1356                          */
1357                         unpacked[i] = *((__be32 *)field);
1358                         field += 2;
1359                 } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
1360                         /* Data is in the lower 15 bits of this field */
1361                         unpacked[i] = cpu_to_be32(
1362                                 be16_to_cpup(field) & VPHN_FIELD_MASK);
1363                         field++;
1364                         nr_assoc_doms++;
1365                 } else {
1366                         /* Data is in the lower 15 bits of this field
1367                          * concatenated with the next 16 bit field
1368                          */
1369                         unpacked[i] = *((__be32 *)field);
1370                         field += 2;
1371                         nr_assoc_doms++;
1372                 }
1373         }
1374
1375         /* The first cell contains the length of the property */
1376         unpacked[0] = cpu_to_be32(nr_assoc_doms);
1377
1378         return nr_assoc_doms;
1379 }
1380
1381 /*
1382  * Retrieve the new associativity information for a virtual processor's
1383  * home node.
1384  */
1385 static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1386 {
1387         long rc;
1388         long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1389         u64 flags = 1;
1390         int hwcpu = get_hard_smp_processor_id(cpu);
1391
1392         rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1393         vphn_unpack_associativity(retbuf, associativity);
1394
1395         return rc;
1396 }
1397
1398 static long vphn_get_associativity(unsigned long cpu,
1399                                         __be32 *associativity)
1400 {
1401         long rc;
1402
1403         rc = hcall_vphn(cpu, associativity);
1404
1405         switch (rc) {
1406         case H_FUNCTION:
1407                 printk(KERN_INFO
1408                         "VPHN is not supported. Disabling polling...\n");
1409                 stop_topology_update();
1410                 break;
1411         case H_HARDWARE:
1412                 printk(KERN_ERR
1413                         "hcall_vphn() experienced a hardware fault "
1414                         "preventing VPHN. Disabling polling...\n");
1415                 stop_topology_update();
1416         }
1417
1418         return rc;
1419 }
1420
1421 /*
1422  * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1423  * characteristics change. This function doesn't perform any locking and is
1424  * only safe to call from stop_machine().
1425  */
1426 static int update_cpu_topology(void *data)
1427 {
1428         struct topology_update_data *update;
1429         unsigned long cpu;
1430
1431         if (!data)
1432                 return -EINVAL;
1433
1434         cpu = smp_processor_id();
1435
1436         for (update = data; update; update = update->next) {
1437                 if (cpu != update->cpu)
1438                         continue;
1439
1440                 unmap_cpu_from_node(update->cpu);
1441                 map_cpu_to_node(update->cpu, update->new_nid);
1442                 vdso_getcpu_init();
1443         }
1444
1445         return 0;
1446 }
1447
1448 /*
1449  * Update the node maps and sysfs entries for each cpu whose home node
1450  * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1451  */
1452 int arch_update_cpu_topology(void)
1453 {
1454         unsigned int cpu, sibling, changed = 0;
1455         struct topology_update_data *updates, *ud;
1456         __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1457         cpumask_t updated_cpus;
1458         struct device *dev;
1459         int weight, new_nid, i = 0;
1460
1461         weight = cpumask_weight(&cpu_associativity_changes_mask);
1462         if (!weight)
1463                 return 0;
1464
1465         updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
1466         if (!updates)
1467                 return 0;
1468
1469         cpumask_clear(&updated_cpus);
1470
1471         for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1472                 /*
1473                  * If siblings aren't flagged for changes, updates list
1474                  * will be too short. Skip on this update and set for next
1475                  * update.
1476                  */
1477                 if (!cpumask_subset(cpu_sibling_mask(cpu),
1478                                         &cpu_associativity_changes_mask)) {
1479                         pr_info("Sibling bits not set for associativity "
1480                                         "change, cpu%d\n", cpu);
1481                         cpumask_or(&cpu_associativity_changes_mask,
1482                                         &cpu_associativity_changes_mask,
1483                                         cpu_sibling_mask(cpu));
1484                         cpu = cpu_last_thread_sibling(cpu);
1485                         continue;
1486                 }
1487
1488                 /* Use associativity from first thread for all siblings */
1489                 vphn_get_associativity(cpu, associativity);
1490                 new_nid = associativity_to_nid(associativity);
1491                 if (new_nid < 0 || !node_online(new_nid))
1492                         new_nid = first_online_node;
1493
1494                 if (new_nid == numa_cpu_lookup_table[cpu]) {
1495                         cpumask_andnot(&cpu_associativity_changes_mask,
1496                                         &cpu_associativity_changes_mask,
1497                                         cpu_sibling_mask(cpu));
1498                         cpu = cpu_last_thread_sibling(cpu);
1499                         continue;
1500                 }
1501
1502                 for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
1503                         ud = &updates[i++];
1504                         ud->cpu = sibling;
1505                         ud->new_nid = new_nid;
1506                         ud->old_nid = numa_cpu_lookup_table[sibling];
1507                         cpumask_set_cpu(sibling, &updated_cpus);
1508                         if (i < weight)
1509                                 ud->next = &updates[i];
1510                 }
1511                 cpu = cpu_last_thread_sibling(cpu);
1512         }
1513
1514         stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1515
1516         for (ud = &updates[0]; ud; ud = ud->next) {
1517                 unregister_cpu_under_node(ud->cpu, ud->old_nid);
1518                 register_cpu_under_node(ud->cpu, ud->new_nid);
1519
1520                 dev = get_cpu_device(ud->cpu);
1521                 if (dev)
1522                         kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1523                 cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1524                 changed = 1;
1525         }
1526
1527         kfree(updates);
1528         return changed;
1529 }
1530
1531 static void topology_work_fn(struct work_struct *work)
1532 {
1533         rebuild_sched_domains();
1534 }
1535 static DECLARE_WORK(topology_work, topology_work_fn);
1536
1537 static void topology_schedule_update(void)
1538 {
1539         schedule_work(&topology_work);
1540 }
1541
1542 static void topology_timer_fn(unsigned long ignored)
1543 {
1544         if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1545                 topology_schedule_update();
1546         else if (vphn_enabled) {
1547                 if (update_cpu_associativity_changes_mask() > 0)
1548                         topology_schedule_update();
1549                 reset_topology_timer();
1550         }
1551 }
1552 static struct timer_list topology_timer =
1553         TIMER_INITIALIZER(topology_timer_fn, 0, 0);
1554
1555 static void reset_topology_timer(void)
1556 {
1557         topology_timer.data = 0;
1558         topology_timer.expires = jiffies + 60 * HZ;
1559         mod_timer(&topology_timer, topology_timer.expires);
1560 }
1561
1562 #ifdef CONFIG_SMP
1563
1564 static void stage_topology_update(int core_id)
1565 {
1566         cpumask_or(&cpu_associativity_changes_mask,
1567                 &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
1568         reset_topology_timer();
1569 }
1570
1571 static int dt_update_callback(struct notifier_block *nb,
1572                                 unsigned long action, void *data)
1573 {
1574         struct of_prop_reconfig *update;
1575         int rc = NOTIFY_DONE;
1576
1577         switch (action) {
1578         case OF_RECONFIG_UPDATE_PROPERTY:
1579                 update = (struct of_prop_reconfig *)data;
1580                 if (!of_prop_cmp(update->dn->type, "cpu") &&
1581                     !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1582                         u32 core_id;
1583                         of_property_read_u32(update->dn, "reg", &core_id);
1584                         stage_topology_update(core_id);
1585                         rc = NOTIFY_OK;
1586                 }
1587                 break;
1588         }
1589
1590         return rc;
1591 }
1592
1593 static struct notifier_block dt_update_nb = {
1594         .notifier_call = dt_update_callback,
1595 };
1596
1597 #endif
1598
1599 /*
1600  * Start polling for associativity changes.
1601  */
1602 int start_topology_update(void)
1603 {
1604         int rc = 0;
1605
1606         if (firmware_has_feature(FW_FEATURE_PRRN)) {
1607                 if (!prrn_enabled) {
1608                         prrn_enabled = 1;
1609                         vphn_enabled = 0;
1610 #ifdef CONFIG_SMP
1611                         rc = of_reconfig_notifier_register(&dt_update_nb);
1612 #endif
1613                 }
1614         } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1615                    lppaca_shared_proc(get_lppaca())) {
1616                 if (!vphn_enabled) {
1617                         prrn_enabled = 0;
1618                         vphn_enabled = 1;
1619                         setup_cpu_associativity_change_counters();
1620                         init_timer_deferrable(&topology_timer);
1621                         reset_topology_timer();
1622                 }
1623         }
1624
1625         return rc;
1626 }
1627
1628 /*
1629  * Disable polling for VPHN associativity changes.
1630  */
1631 int stop_topology_update(void)
1632 {
1633         int rc = 0;
1634
1635         if (prrn_enabled) {
1636                 prrn_enabled = 0;
1637 #ifdef CONFIG_SMP
1638                 rc = of_reconfig_notifier_unregister(&dt_update_nb);
1639 #endif
1640         } else if (vphn_enabled) {
1641                 vphn_enabled = 0;
1642                 rc = del_timer_sync(&topology_timer);
1643         }
1644
1645         return rc;
1646 }
1647
1648 int prrn_is_enabled(void)
1649 {
1650         return prrn_enabled;
1651 }
1652
1653 static int topology_read(struct seq_file *file, void *v)
1654 {
1655         if (vphn_enabled || prrn_enabled)
1656                 seq_puts(file, "on\n");
1657         else
1658                 seq_puts(file, "off\n");
1659
1660         return 0;
1661 }
1662
1663 static int topology_open(struct inode *inode, struct file *file)
1664 {
1665         return single_open(file, topology_read, NULL);
1666 }
1667
1668 static ssize_t topology_write(struct file *file, const char __user *buf,
1669                               size_t count, loff_t *off)
1670 {
1671         char kbuf[4]; /* "on" or "off" plus null. */
1672         int read_len;
1673
1674         read_len = count < 3 ? count : 3;
1675         if (copy_from_user(kbuf, buf, read_len))
1676                 return -EINVAL;
1677
1678         kbuf[read_len] = '\0';
1679
1680         if (!strncmp(kbuf, "on", 2))
1681                 start_topology_update();
1682         else if (!strncmp(kbuf, "off", 3))
1683                 stop_topology_update();
1684         else
1685                 return -EINVAL;
1686
1687         return count;
1688 }
1689
1690 static const struct file_operations topology_ops = {
1691         .read = seq_read,
1692         .write = topology_write,
1693         .open = topology_open,
1694         .release = single_release
1695 };
1696
1697 static int topology_update_init(void)
1698 {
1699         start_topology_update();
1700         proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
1701
1702         return 0;
1703 }
1704 device_initcall(topology_update_init);
1705 #endif /* CONFIG_PPC_SPLPAR */