mm/vmalloc.c: rename VM_UNLIST to VM_UNINITIALIZED
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / sparse.c
1 /*
2  * sparse memory mappings.
3  */
4 #include <linux/mm.h>
5 #include <linux/slab.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/highmem.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/vmalloc.h>
12 #include "internal.h"
13 #include <asm/dma.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16
17 /*
18  * Permanent SPARSEMEM data:
19  *
20  * 1) mem_section       - memory sections, mem_map's for valid memory
21  */
22 #ifdef CONFIG_SPARSEMEM_EXTREME
23 struct mem_section *mem_section[NR_SECTION_ROOTS]
24         ____cacheline_internodealigned_in_smp;
25 #else
26 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
27         ____cacheline_internodealigned_in_smp;
28 #endif
29 EXPORT_SYMBOL(mem_section);
30
31 #ifdef NODE_NOT_IN_PAGE_FLAGS
32 /*
33  * If we did not store the node number in the page then we have to
34  * do a lookup in the section_to_node_table in order to find which
35  * node the page belongs to.
36  */
37 #if MAX_NUMNODES <= 256
38 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
39 #else
40 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41 #endif
42
43 int page_to_nid(const struct page *page)
44 {
45         return section_to_node_table[page_to_section(page)];
46 }
47 EXPORT_SYMBOL(page_to_nid);
48
49 static void set_section_nid(unsigned long section_nr, int nid)
50 {
51         section_to_node_table[section_nr] = nid;
52 }
53 #else /* !NODE_NOT_IN_PAGE_FLAGS */
54 static inline void set_section_nid(unsigned long section_nr, int nid)
55 {
56 }
57 #endif
58
59 #ifdef CONFIG_SPARSEMEM_EXTREME
60 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
61 {
62         struct mem_section *section = NULL;
63         unsigned long array_size = SECTIONS_PER_ROOT *
64                                    sizeof(struct mem_section);
65
66         if (slab_is_available()) {
67                 if (node_state(nid, N_HIGH_MEMORY))
68                         section = kzalloc_node(array_size, GFP_KERNEL, nid);
69                 else
70                         section = kzalloc(array_size, GFP_KERNEL);
71         } else {
72                 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
73         }
74
75         return section;
76 }
77
78 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
79 {
80         unsigned long root = SECTION_NR_TO_ROOT(section_nr);
81         struct mem_section *section;
82
83         if (mem_section[root])
84                 return -EEXIST;
85
86         section = sparse_index_alloc(nid);
87         if (!section)
88                 return -ENOMEM;
89
90         mem_section[root] = section;
91
92         return 0;
93 }
94 #else /* !SPARSEMEM_EXTREME */
95 static inline int sparse_index_init(unsigned long section_nr, int nid)
96 {
97         return 0;
98 }
99 #endif
100
101 /*
102  * Although written for the SPARSEMEM_EXTREME case, this happens
103  * to also work for the flat array case because
104  * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
105  */
106 int __section_nr(struct mem_section* ms)
107 {
108         unsigned long root_nr;
109         struct mem_section* root;
110
111         for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
112                 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
113                 if (!root)
114                         continue;
115
116                 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
117                      break;
118         }
119
120         VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
121
122         return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
123 }
124
125 /*
126  * During early boot, before section_mem_map is used for an actual
127  * mem_map, we use section_mem_map to store the section's NUMA
128  * node.  This keeps us from having to use another data structure.  The
129  * node information is cleared just before we store the real mem_map.
130  */
131 static inline unsigned long sparse_encode_early_nid(int nid)
132 {
133         return (nid << SECTION_NID_SHIFT);
134 }
135
136 static inline int sparse_early_nid(struct mem_section *section)
137 {
138         return (section->section_mem_map >> SECTION_NID_SHIFT);
139 }
140
141 /* Validate the physical addressing limitations of the model */
142 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
143                                                 unsigned long *end_pfn)
144 {
145         unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
146
147         /*
148          * Sanity checks - do not allow an architecture to pass
149          * in larger pfns than the maximum scope of sparsemem:
150          */
151         if (*start_pfn > max_sparsemem_pfn) {
152                 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
153                         "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
154                         *start_pfn, *end_pfn, max_sparsemem_pfn);
155                 WARN_ON_ONCE(1);
156                 *start_pfn = max_sparsemem_pfn;
157                 *end_pfn = max_sparsemem_pfn;
158         } else if (*end_pfn > max_sparsemem_pfn) {
159                 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
160                         "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
161                         *start_pfn, *end_pfn, max_sparsemem_pfn);
162                 WARN_ON_ONCE(1);
163                 *end_pfn = max_sparsemem_pfn;
164         }
165 }
166
167 /* Record a memory area against a node. */
168 void __init memory_present(int nid, unsigned long start, unsigned long end)
169 {
170         unsigned long pfn;
171
172         start &= PAGE_SECTION_MASK;
173         mminit_validate_memmodel_limits(&start, &end);
174         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
175                 unsigned long section = pfn_to_section_nr(pfn);
176                 struct mem_section *ms;
177
178                 sparse_index_init(section, nid);
179                 set_section_nid(section, nid);
180
181                 ms = __nr_to_section(section);
182                 if (!ms->section_mem_map)
183                         ms->section_mem_map = sparse_encode_early_nid(nid) |
184                                                         SECTION_MARKED_PRESENT;
185         }
186 }
187
188 /*
189  * Only used by the i386 NUMA architecures, but relatively
190  * generic code.
191  */
192 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
193                                                      unsigned long end_pfn)
194 {
195         unsigned long pfn;
196         unsigned long nr_pages = 0;
197
198         mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
199         for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
200                 if (nid != early_pfn_to_nid(pfn))
201                         continue;
202
203                 if (pfn_present(pfn))
204                         nr_pages += PAGES_PER_SECTION;
205         }
206
207         return nr_pages * sizeof(struct page);
208 }
209
210 /*
211  * Subtle, we encode the real pfn into the mem_map such that
212  * the identity pfn - section_mem_map will return the actual
213  * physical page frame number.
214  */
215 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
216 {
217         return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
218 }
219
220 /*
221  * Decode mem_map from the coded memmap
222  */
223 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
224 {
225         /* mask off the extra low bits of information */
226         coded_mem_map &= SECTION_MAP_MASK;
227         return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
228 }
229
230 static int __meminit sparse_init_one_section(struct mem_section *ms,
231                 unsigned long pnum, struct page *mem_map,
232                 unsigned long *pageblock_bitmap)
233 {
234         if (!present_section(ms))
235                 return -EINVAL;
236
237         ms->section_mem_map &= ~SECTION_MAP_MASK;
238         ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
239                                                         SECTION_HAS_MEM_MAP;
240         ms->pageblock_flags = pageblock_bitmap;
241
242         return 1;
243 }
244
245 unsigned long usemap_size(void)
246 {
247         unsigned long size_bytes;
248         size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
249         size_bytes = roundup(size_bytes, sizeof(unsigned long));
250         return size_bytes;
251 }
252
253 #ifdef CONFIG_MEMORY_HOTPLUG
254 static unsigned long *__kmalloc_section_usemap(void)
255 {
256         return kmalloc(usemap_size(), GFP_KERNEL);
257 }
258 #endif /* CONFIG_MEMORY_HOTPLUG */
259
260 #ifdef CONFIG_MEMORY_HOTREMOVE
261 static unsigned long * __init
262 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
263                                          unsigned long size)
264 {
265         unsigned long goal, limit;
266         unsigned long *p;
267         int nid;
268         /*
269          * A page may contain usemaps for other sections preventing the
270          * page being freed and making a section unremovable while
271          * other sections referencing the usemap retmain active. Similarly,
272          * a pgdat can prevent a section being removed. If section A
273          * contains a pgdat and section B contains the usemap, both
274          * sections become inter-dependent. This allocates usemaps
275          * from the same section as the pgdat where possible to avoid
276          * this problem.
277          */
278         goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
279         limit = goal + (1UL << PA_SECTION_SHIFT);
280         nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
281 again:
282         p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
283                                           SMP_CACHE_BYTES, goal, limit);
284         if (!p && limit) {
285                 limit = 0;
286                 goto again;
287         }
288         return p;
289 }
290
291 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
292 {
293         unsigned long usemap_snr, pgdat_snr;
294         static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
295         static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
296         struct pglist_data *pgdat = NODE_DATA(nid);
297         int usemap_nid;
298
299         usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
300         pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
301         if (usemap_snr == pgdat_snr)
302                 return;
303
304         if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
305                 /* skip redundant message */
306                 return;
307
308         old_usemap_snr = usemap_snr;
309         old_pgdat_snr = pgdat_snr;
310
311         usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
312         if (usemap_nid != nid) {
313                 printk(KERN_INFO
314                        "node %d must be removed before remove section %ld\n",
315                        nid, usemap_snr);
316                 return;
317         }
318         /*
319          * There is a circular dependency.
320          * Some platforms allow un-removable section because they will just
321          * gather other removable sections for dynamic partitioning.
322          * Just notify un-removable section's number here.
323          */
324         printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
325                pgdat_snr, nid);
326         printk(KERN_CONT
327                " have a circular dependency on usemap and pgdat allocations\n");
328 }
329 #else
330 static unsigned long * __init
331 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
332                                          unsigned long size)
333 {
334         return alloc_bootmem_node_nopanic(pgdat, size);
335 }
336
337 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
338 {
339 }
340 #endif /* CONFIG_MEMORY_HOTREMOVE */
341
342 static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
343                                  unsigned long pnum_begin,
344                                  unsigned long pnum_end,
345                                  unsigned long usemap_count, int nodeid)
346 {
347         void *usemap;
348         unsigned long pnum;
349         int size = usemap_size();
350
351         usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
352                                                           size * usemap_count);
353         if (!usemap) {
354                 printk(KERN_WARNING "%s: allocation failed\n", __func__);
355                 return;
356         }
357
358         for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
359                 if (!present_section_nr(pnum))
360                         continue;
361                 usemap_map[pnum] = usemap;
362                 usemap += size;
363                 check_usemap_section_nr(nodeid, usemap_map[pnum]);
364         }
365 }
366
367 #ifndef CONFIG_SPARSEMEM_VMEMMAP
368 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
369 {
370         struct page *map;
371         unsigned long size;
372
373         map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
374         if (map)
375                 return map;
376
377         size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
378         map = __alloc_bootmem_node_high(NODE_DATA(nid), size,
379                                          PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
380         return map;
381 }
382 void __init sparse_mem_maps_populate_node(struct page **map_map,
383                                           unsigned long pnum_begin,
384                                           unsigned long pnum_end,
385                                           unsigned long map_count, int nodeid)
386 {
387         void *map;
388         unsigned long pnum;
389         unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
390
391         map = alloc_remap(nodeid, size * map_count);
392         if (map) {
393                 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
394                         if (!present_section_nr(pnum))
395                                 continue;
396                         map_map[pnum] = map;
397                         map += size;
398                 }
399                 return;
400         }
401
402         size = PAGE_ALIGN(size);
403         map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count,
404                                          PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
405         if (map) {
406                 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
407                         if (!present_section_nr(pnum))
408                                 continue;
409                         map_map[pnum] = map;
410                         map += size;
411                 }
412                 return;
413         }
414
415         /* fallback */
416         for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
417                 struct mem_section *ms;
418
419                 if (!present_section_nr(pnum))
420                         continue;
421                 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
422                 if (map_map[pnum])
423                         continue;
424                 ms = __nr_to_section(pnum);
425                 printk(KERN_ERR "%s: sparsemem memory map backing failed "
426                         "some memory will not be available.\n", __func__);
427                 ms->section_mem_map = 0;
428         }
429 }
430 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
431
432 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
433 static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
434                                  unsigned long pnum_begin,
435                                  unsigned long pnum_end,
436                                  unsigned long map_count, int nodeid)
437 {
438         sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
439                                          map_count, nodeid);
440 }
441 #else
442 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
443 {
444         struct page *map;
445         struct mem_section *ms = __nr_to_section(pnum);
446         int nid = sparse_early_nid(ms);
447
448         map = sparse_mem_map_populate(pnum, nid);
449         if (map)
450                 return map;
451
452         printk(KERN_ERR "%s: sparsemem memory map backing failed "
453                         "some memory will not be available.\n", __func__);
454         ms->section_mem_map = 0;
455         return NULL;
456 }
457 #endif
458
459 void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
460 {
461 }
462
463 /*
464  * Allocate the accumulated non-linear sections, allocate a mem_map
465  * for each and record the physical to section mapping.
466  */
467 void __init sparse_init(void)
468 {
469         unsigned long pnum;
470         struct page *map;
471         unsigned long *usemap;
472         unsigned long **usemap_map;
473         int size;
474         int nodeid_begin = 0;
475         unsigned long pnum_begin = 0;
476         unsigned long usemap_count;
477 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
478         unsigned long map_count;
479         int size2;
480         struct page **map_map;
481 #endif
482
483         /* see include/linux/mmzone.h 'struct mem_section' definition */
484         BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
485
486         /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
487         set_pageblock_order();
488
489         /*
490          * map is using big page (aka 2M in x86 64 bit)
491          * usemap is less one page (aka 24 bytes)
492          * so alloc 2M (with 2M align) and 24 bytes in turn will
493          * make next 2M slip to one more 2M later.
494          * then in big system, the memory will have a lot of holes...
495          * here try to allocate 2M pages continuously.
496          *
497          * powerpc need to call sparse_init_one_section right after each
498          * sparse_early_mem_map_alloc, so allocate usemap_map at first.
499          */
500         size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
501         usemap_map = alloc_bootmem(size);
502         if (!usemap_map)
503                 panic("can not allocate usemap_map\n");
504
505         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
506                 struct mem_section *ms;
507
508                 if (!present_section_nr(pnum))
509                         continue;
510                 ms = __nr_to_section(pnum);
511                 nodeid_begin = sparse_early_nid(ms);
512                 pnum_begin = pnum;
513                 break;
514         }
515         usemap_count = 1;
516         for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
517                 struct mem_section *ms;
518                 int nodeid;
519
520                 if (!present_section_nr(pnum))
521                         continue;
522                 ms = __nr_to_section(pnum);
523                 nodeid = sparse_early_nid(ms);
524                 if (nodeid == nodeid_begin) {
525                         usemap_count++;
526                         continue;
527                 }
528                 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
529                 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
530                                                  usemap_count, nodeid_begin);
531                 /* new start, update count etc*/
532                 nodeid_begin = nodeid;
533                 pnum_begin = pnum;
534                 usemap_count = 1;
535         }
536         /* ok, last chunk */
537         sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
538                                          usemap_count, nodeid_begin);
539
540 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
541         size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
542         map_map = alloc_bootmem(size2);
543         if (!map_map)
544                 panic("can not allocate map_map\n");
545
546         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
547                 struct mem_section *ms;
548
549                 if (!present_section_nr(pnum))
550                         continue;
551                 ms = __nr_to_section(pnum);
552                 nodeid_begin = sparse_early_nid(ms);
553                 pnum_begin = pnum;
554                 break;
555         }
556         map_count = 1;
557         for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
558                 struct mem_section *ms;
559                 int nodeid;
560
561                 if (!present_section_nr(pnum))
562                         continue;
563                 ms = __nr_to_section(pnum);
564                 nodeid = sparse_early_nid(ms);
565                 if (nodeid == nodeid_begin) {
566                         map_count++;
567                         continue;
568                 }
569                 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
570                 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
571                                                  map_count, nodeid_begin);
572                 /* new start, update count etc*/
573                 nodeid_begin = nodeid;
574                 pnum_begin = pnum;
575                 map_count = 1;
576         }
577         /* ok, last chunk */
578         sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
579                                          map_count, nodeid_begin);
580 #endif
581
582         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
583                 if (!present_section_nr(pnum))
584                         continue;
585
586                 usemap = usemap_map[pnum];
587                 if (!usemap)
588                         continue;
589
590 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
591                 map = map_map[pnum];
592 #else
593                 map = sparse_early_mem_map_alloc(pnum);
594 #endif
595                 if (!map)
596                         continue;
597
598                 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
599                                                                 usemap);
600         }
601
602         vmemmap_populate_print_last();
603
604 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
605         free_bootmem(__pa(map_map), size2);
606 #endif
607         free_bootmem(__pa(usemap_map), size);
608 }
609
610 #ifdef CONFIG_MEMORY_HOTPLUG
611 #ifdef CONFIG_SPARSEMEM_VMEMMAP
612 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
613                                                  unsigned long nr_pages)
614 {
615         /* This will make the necessary allocations eventually. */
616         return sparse_mem_map_populate(pnum, nid);
617 }
618 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
619 {
620         unsigned long start = (unsigned long)memmap;
621         unsigned long end = (unsigned long)(memmap + nr_pages);
622
623         vmemmap_free(start, end);
624 }
625 #ifdef CONFIG_MEMORY_HOTREMOVE
626 static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
627 {
628         unsigned long start = (unsigned long)memmap;
629         unsigned long end = (unsigned long)(memmap + nr_pages);
630
631         vmemmap_free(start, end);
632 }
633 #endif /* CONFIG_MEMORY_HOTREMOVE */
634 #else
635 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
636 {
637         struct page *page, *ret;
638         unsigned long memmap_size = sizeof(struct page) * nr_pages;
639
640         page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
641         if (page)
642                 goto got_map_page;
643
644         ret = vmalloc(memmap_size);
645         if (ret)
646                 goto got_map_ptr;
647
648         return NULL;
649 got_map_page:
650         ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
651 got_map_ptr:
652
653         return ret;
654 }
655
656 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
657                                                   unsigned long nr_pages)
658 {
659         return __kmalloc_section_memmap(nr_pages);
660 }
661
662 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
663 {
664         if (is_vmalloc_addr(memmap))
665                 vfree(memmap);
666         else
667                 free_pages((unsigned long)memmap,
668                            get_order(sizeof(struct page) * nr_pages));
669 }
670
671 #ifdef CONFIG_MEMORY_HOTREMOVE
672 static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
673 {
674         unsigned long maps_section_nr, removing_section_nr, i;
675         unsigned long magic;
676         struct page *page = virt_to_page(memmap);
677
678         for (i = 0; i < nr_pages; i++, page++) {
679                 magic = (unsigned long) page->lru.next;
680
681                 BUG_ON(magic == NODE_INFO);
682
683                 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
684                 removing_section_nr = page->private;
685
686                 /*
687                  * When this function is called, the removing section is
688                  * logical offlined state. This means all pages are isolated
689                  * from page allocator. If removing section's memmap is placed
690                  * on the same section, it must not be freed.
691                  * If it is freed, page allocator may allocate it which will
692                  * be removed physically soon.
693                  */
694                 if (maps_section_nr != removing_section_nr)
695                         put_page_bootmem(page);
696         }
697 }
698 #endif /* CONFIG_MEMORY_HOTREMOVE */
699 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
700
701 /*
702  * returns the number of sections whose mem_maps were properly
703  * set.  If this is <=0, then that means that the passed-in
704  * map was not consumed and must be freed.
705  */
706 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
707                            int nr_pages)
708 {
709         unsigned long section_nr = pfn_to_section_nr(start_pfn);
710         struct pglist_data *pgdat = zone->zone_pgdat;
711         struct mem_section *ms;
712         struct page *memmap;
713         unsigned long *usemap;
714         unsigned long flags;
715         int ret;
716
717         /*
718          * no locking for this, because it does its own
719          * plus, it does a kmalloc
720          */
721         ret = sparse_index_init(section_nr, pgdat->node_id);
722         if (ret < 0 && ret != -EEXIST)
723                 return ret;
724         memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
725         if (!memmap)
726                 return -ENOMEM;
727         usemap = __kmalloc_section_usemap();
728         if (!usemap) {
729                 __kfree_section_memmap(memmap, nr_pages);
730                 return -ENOMEM;
731         }
732
733         pgdat_resize_lock(pgdat, &flags);
734
735         ms = __pfn_to_section(start_pfn);
736         if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
737                 ret = -EEXIST;
738                 goto out;
739         }
740
741         memset(memmap, 0, sizeof(struct page) * nr_pages);
742
743         ms->section_mem_map |= SECTION_MARKED_PRESENT;
744
745         ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
746
747 out:
748         pgdat_resize_unlock(pgdat, &flags);
749         if (ret <= 0) {
750                 kfree(usemap);
751                 __kfree_section_memmap(memmap, nr_pages);
752         }
753         return ret;
754 }
755
756 #ifdef CONFIG_MEMORY_FAILURE
757 static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
758 {
759         int i;
760
761         if (!memmap)
762                 return;
763
764         for (i = 0; i < PAGES_PER_SECTION; i++) {
765                 if (PageHWPoison(&memmap[i])) {
766                         atomic_long_sub(1, &num_poisoned_pages);
767                         ClearPageHWPoison(&memmap[i]);
768                 }
769         }
770 }
771 #else
772 static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
773 {
774 }
775 #endif
776
777 #ifdef CONFIG_MEMORY_HOTREMOVE
778 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
779 {
780         struct page *usemap_page;
781         unsigned long nr_pages;
782
783         if (!usemap)
784                 return;
785
786         usemap_page = virt_to_page(usemap);
787         /*
788          * Check to see if allocation came from hot-plug-add
789          */
790         if (PageSlab(usemap_page) || PageCompound(usemap_page)) {
791                 kfree(usemap);
792                 if (memmap)
793                         __kfree_section_memmap(memmap, PAGES_PER_SECTION);
794                 return;
795         }
796
797         /*
798          * The usemap came from bootmem. This is packed with other usemaps
799          * on the section which has pgdat at boot time. Just keep it as is now.
800          */
801
802         if (memmap) {
803                 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
804                         >> PAGE_SHIFT;
805
806                 free_map_bootmem(memmap, nr_pages);
807         }
808 }
809
810 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
811 {
812         struct page *memmap = NULL;
813         unsigned long *usemap = NULL, flags;
814         struct pglist_data *pgdat = zone->zone_pgdat;
815
816         pgdat_resize_lock(pgdat, &flags);
817         if (ms->section_mem_map) {
818                 usemap = ms->pageblock_flags;
819                 memmap = sparse_decode_mem_map(ms->section_mem_map,
820                                                 __section_nr(ms));
821                 ms->section_mem_map = 0;
822                 ms->pageblock_flags = NULL;
823         }
824         pgdat_resize_unlock(pgdat, &flags);
825
826         clear_hwpoisoned_pages(memmap, PAGES_PER_SECTION);
827         free_section_usemap(memmap, usemap);
828 }
829 #endif /* CONFIG_MEMORY_HOTREMOVE */
830 #endif /* CONFIG_MEMORY_HOTPLUG */