mm/sparse: optimize sparse_index_alloc
[profile/ivi/kernel-adaptation-intel-automotive.git] / mm / sparse.c
1 /*
2  * sparse memory mappings.
3  */
4 #include <linux/mm.h>
5 #include <linux/slab.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/highmem.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/vmalloc.h>
12 #include "internal.h"
13 #include <asm/dma.h>
14 #include <asm/pgalloc.h>
15 #include <asm/pgtable.h>
16
17 /*
18  * Permanent SPARSEMEM data:
19  *
20  * 1) mem_section       - memory sections, mem_map's for valid memory
21  */
22 #ifdef CONFIG_SPARSEMEM_EXTREME
23 struct mem_section *mem_section[NR_SECTION_ROOTS]
24         ____cacheline_internodealigned_in_smp;
25 #else
26 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
27         ____cacheline_internodealigned_in_smp;
28 #endif
29 EXPORT_SYMBOL(mem_section);
30
31 #ifdef NODE_NOT_IN_PAGE_FLAGS
32 /*
33  * If we did not store the node number in the page then we have to
34  * do a lookup in the section_to_node_table in order to find which
35  * node the page belongs to.
36  */
37 #if MAX_NUMNODES <= 256
38 static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
39 #else
40 static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41 #endif
42
43 int page_to_nid(const struct page *page)
44 {
45         return section_to_node_table[page_to_section(page)];
46 }
47 EXPORT_SYMBOL(page_to_nid);
48
49 static void set_section_nid(unsigned long section_nr, int nid)
50 {
51         section_to_node_table[section_nr] = nid;
52 }
53 #else /* !NODE_NOT_IN_PAGE_FLAGS */
54 static inline void set_section_nid(unsigned long section_nr, int nid)
55 {
56 }
57 #endif
58
59 #ifdef CONFIG_SPARSEMEM_EXTREME
60 static struct mem_section noinline __init_refok *sparse_index_alloc(int nid)
61 {
62         struct mem_section *section = NULL;
63         unsigned long array_size = SECTIONS_PER_ROOT *
64                                    sizeof(struct mem_section);
65
66         if (slab_is_available()) {
67                 if (node_state(nid, N_HIGH_MEMORY))
68                         section = kzalloc_node(array_size, GFP_KERNEL, nid);
69                 else
70                         section = kzalloc(array_size, GFP_KERNEL);
71         } else {
72                 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
73         }
74
75         return section;
76 }
77
78 static int __meminit sparse_index_init(unsigned long section_nr, int nid)
79 {
80         static DEFINE_SPINLOCK(index_init_lock);
81         unsigned long root = SECTION_NR_TO_ROOT(section_nr);
82         struct mem_section *section;
83         int ret = 0;
84
85         if (mem_section[root])
86                 return -EEXIST;
87
88         section = sparse_index_alloc(nid);
89         if (!section)
90                 return -ENOMEM;
91         /*
92          * This lock keeps two different sections from
93          * reallocating for the same index
94          */
95         spin_lock(&index_init_lock);
96
97         if (mem_section[root]) {
98                 ret = -EEXIST;
99                 goto out;
100         }
101
102         mem_section[root] = section;
103 out:
104         spin_unlock(&index_init_lock);
105         return ret;
106 }
107 #else /* !SPARSEMEM_EXTREME */
108 static inline int sparse_index_init(unsigned long section_nr, int nid)
109 {
110         return 0;
111 }
112 #endif
113
114 /*
115  * Although written for the SPARSEMEM_EXTREME case, this happens
116  * to also work for the flat array case because
117  * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
118  */
119 int __section_nr(struct mem_section* ms)
120 {
121         unsigned long root_nr;
122         struct mem_section* root;
123
124         for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
125                 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
126                 if (!root)
127                         continue;
128
129                 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
130                      break;
131         }
132
133         return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
134 }
135
136 /*
137  * During early boot, before section_mem_map is used for an actual
138  * mem_map, we use section_mem_map to store the section's NUMA
139  * node.  This keeps us from having to use another data structure.  The
140  * node information is cleared just before we store the real mem_map.
141  */
142 static inline unsigned long sparse_encode_early_nid(int nid)
143 {
144         return (nid << SECTION_NID_SHIFT);
145 }
146
147 static inline int sparse_early_nid(struct mem_section *section)
148 {
149         return (section->section_mem_map >> SECTION_NID_SHIFT);
150 }
151
152 /* Validate the physical addressing limitations of the model */
153 void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
154                                                 unsigned long *end_pfn)
155 {
156         unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
157
158         /*
159          * Sanity checks - do not allow an architecture to pass
160          * in larger pfns than the maximum scope of sparsemem:
161          */
162         if (*start_pfn > max_sparsemem_pfn) {
163                 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
164                         "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
165                         *start_pfn, *end_pfn, max_sparsemem_pfn);
166                 WARN_ON_ONCE(1);
167                 *start_pfn = max_sparsemem_pfn;
168                 *end_pfn = max_sparsemem_pfn;
169         } else if (*end_pfn > max_sparsemem_pfn) {
170                 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
171                         "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
172                         *start_pfn, *end_pfn, max_sparsemem_pfn);
173                 WARN_ON_ONCE(1);
174                 *end_pfn = max_sparsemem_pfn;
175         }
176 }
177
178 /* Record a memory area against a node. */
179 void __init memory_present(int nid, unsigned long start, unsigned long end)
180 {
181         unsigned long pfn;
182
183         start &= PAGE_SECTION_MASK;
184         mminit_validate_memmodel_limits(&start, &end);
185         for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
186                 unsigned long section = pfn_to_section_nr(pfn);
187                 struct mem_section *ms;
188
189                 sparse_index_init(section, nid);
190                 set_section_nid(section, nid);
191
192                 ms = __nr_to_section(section);
193                 if (!ms->section_mem_map)
194                         ms->section_mem_map = sparse_encode_early_nid(nid) |
195                                                         SECTION_MARKED_PRESENT;
196         }
197 }
198
199 /*
200  * Only used by the i386 NUMA architecures, but relatively
201  * generic code.
202  */
203 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
204                                                      unsigned long end_pfn)
205 {
206         unsigned long pfn;
207         unsigned long nr_pages = 0;
208
209         mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
210         for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
211                 if (nid != early_pfn_to_nid(pfn))
212                         continue;
213
214                 if (pfn_present(pfn))
215                         nr_pages += PAGES_PER_SECTION;
216         }
217
218         return nr_pages * sizeof(struct page);
219 }
220
221 /*
222  * Subtle, we encode the real pfn into the mem_map such that
223  * the identity pfn - section_mem_map will return the actual
224  * physical page frame number.
225  */
226 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
227 {
228         return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
229 }
230
231 /*
232  * Decode mem_map from the coded memmap
233  */
234 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
235 {
236         /* mask off the extra low bits of information */
237         coded_mem_map &= SECTION_MAP_MASK;
238         return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
239 }
240
241 static int __meminit sparse_init_one_section(struct mem_section *ms,
242                 unsigned long pnum, struct page *mem_map,
243                 unsigned long *pageblock_bitmap)
244 {
245         if (!present_section(ms))
246                 return -EINVAL;
247
248         ms->section_mem_map &= ~SECTION_MAP_MASK;
249         ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) |
250                                                         SECTION_HAS_MEM_MAP;
251         ms->pageblock_flags = pageblock_bitmap;
252
253         return 1;
254 }
255
256 unsigned long usemap_size(void)
257 {
258         unsigned long size_bytes;
259         size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
260         size_bytes = roundup(size_bytes, sizeof(unsigned long));
261         return size_bytes;
262 }
263
264 #ifdef CONFIG_MEMORY_HOTPLUG
265 static unsigned long *__kmalloc_section_usemap(void)
266 {
267         return kmalloc(usemap_size(), GFP_KERNEL);
268 }
269 #endif /* CONFIG_MEMORY_HOTPLUG */
270
271 #ifdef CONFIG_MEMORY_HOTREMOVE
272 static unsigned long * __init
273 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
274                                          unsigned long size)
275 {
276         unsigned long goal, limit;
277         unsigned long *p;
278         int nid;
279         /*
280          * A page may contain usemaps for other sections preventing the
281          * page being freed and making a section unremovable while
282          * other sections referencing the usemap retmain active. Similarly,
283          * a pgdat can prevent a section being removed. If section A
284          * contains a pgdat and section B contains the usemap, both
285          * sections become inter-dependent. This allocates usemaps
286          * from the same section as the pgdat where possible to avoid
287          * this problem.
288          */
289         goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
290         limit = goal + (1UL << PA_SECTION_SHIFT);
291         nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
292 again:
293         p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
294                                           SMP_CACHE_BYTES, goal, limit);
295         if (!p && limit) {
296                 limit = 0;
297                 goto again;
298         }
299         return p;
300 }
301
302 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
303 {
304         unsigned long usemap_snr, pgdat_snr;
305         static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
306         static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
307         struct pglist_data *pgdat = NODE_DATA(nid);
308         int usemap_nid;
309
310         usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
311         pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
312         if (usemap_snr == pgdat_snr)
313                 return;
314
315         if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
316                 /* skip redundant message */
317                 return;
318
319         old_usemap_snr = usemap_snr;
320         old_pgdat_snr = pgdat_snr;
321
322         usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
323         if (usemap_nid != nid) {
324                 printk(KERN_INFO
325                        "node %d must be removed before remove section %ld\n",
326                        nid, usemap_snr);
327                 return;
328         }
329         /*
330          * There is a circular dependency.
331          * Some platforms allow un-removable section because they will just
332          * gather other removable sections for dynamic partitioning.
333          * Just notify un-removable section's number here.
334          */
335         printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr,
336                pgdat_snr, nid);
337         printk(KERN_CONT
338                " have a circular dependency on usemap and pgdat allocations\n");
339 }
340 #else
341 static unsigned long * __init
342 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
343                                          unsigned long size)
344 {
345         return alloc_bootmem_node_nopanic(pgdat, size);
346 }
347
348 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
349 {
350 }
351 #endif /* CONFIG_MEMORY_HOTREMOVE */
352
353 static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
354                                  unsigned long pnum_begin,
355                                  unsigned long pnum_end,
356                                  unsigned long usemap_count, int nodeid)
357 {
358         void *usemap;
359         unsigned long pnum;
360         int size = usemap_size();
361
362         usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
363                                                           size * usemap_count);
364         if (!usemap) {
365                 printk(KERN_WARNING "%s: allocation failed\n", __func__);
366                 return;
367         }
368
369         for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
370                 if (!present_section_nr(pnum))
371                         continue;
372                 usemap_map[pnum] = usemap;
373                 usemap += size;
374                 check_usemap_section_nr(nodeid, usemap_map[pnum]);
375         }
376 }
377
378 #ifndef CONFIG_SPARSEMEM_VMEMMAP
379 struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
380 {
381         struct page *map;
382         unsigned long size;
383
384         map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
385         if (map)
386                 return map;
387
388         size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
389         map = __alloc_bootmem_node_high(NODE_DATA(nid), size,
390                                          PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
391         return map;
392 }
393 void __init sparse_mem_maps_populate_node(struct page **map_map,
394                                           unsigned long pnum_begin,
395                                           unsigned long pnum_end,
396                                           unsigned long map_count, int nodeid)
397 {
398         void *map;
399         unsigned long pnum;
400         unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
401
402         map = alloc_remap(nodeid, size * map_count);
403         if (map) {
404                 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
405                         if (!present_section_nr(pnum))
406                                 continue;
407                         map_map[pnum] = map;
408                         map += size;
409                 }
410                 return;
411         }
412
413         size = PAGE_ALIGN(size);
414         map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count,
415                                          PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
416         if (map) {
417                 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
418                         if (!present_section_nr(pnum))
419                                 continue;
420                         map_map[pnum] = map;
421                         map += size;
422                 }
423                 return;
424         }
425
426         /* fallback */
427         for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
428                 struct mem_section *ms;
429
430                 if (!present_section_nr(pnum))
431                         continue;
432                 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
433                 if (map_map[pnum])
434                         continue;
435                 ms = __nr_to_section(pnum);
436                 printk(KERN_ERR "%s: sparsemem memory map backing failed "
437                         "some memory will not be available.\n", __func__);
438                 ms->section_mem_map = 0;
439         }
440 }
441 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
442
443 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
444 static void __init sparse_early_mem_maps_alloc_node(struct page **map_map,
445                                  unsigned long pnum_begin,
446                                  unsigned long pnum_end,
447                                  unsigned long map_count, int nodeid)
448 {
449         sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end,
450                                          map_count, nodeid);
451 }
452 #else
453 static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
454 {
455         struct page *map;
456         struct mem_section *ms = __nr_to_section(pnum);
457         int nid = sparse_early_nid(ms);
458
459         map = sparse_mem_map_populate(pnum, nid);
460         if (map)
461                 return map;
462
463         printk(KERN_ERR "%s: sparsemem memory map backing failed "
464                         "some memory will not be available.\n", __func__);
465         ms->section_mem_map = 0;
466         return NULL;
467 }
468 #endif
469
470 void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
471 {
472 }
473
474 /*
475  * Allocate the accumulated non-linear sections, allocate a mem_map
476  * for each and record the physical to section mapping.
477  */
478 void __init sparse_init(void)
479 {
480         unsigned long pnum;
481         struct page *map;
482         unsigned long *usemap;
483         unsigned long **usemap_map;
484         int size;
485         int nodeid_begin = 0;
486         unsigned long pnum_begin = 0;
487         unsigned long usemap_count;
488 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
489         unsigned long map_count;
490         int size2;
491         struct page **map_map;
492 #endif
493
494         /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
495         set_pageblock_order();
496
497         /*
498          * map is using big page (aka 2M in x86 64 bit)
499          * usemap is less one page (aka 24 bytes)
500          * so alloc 2M (with 2M align) and 24 bytes in turn will
501          * make next 2M slip to one more 2M later.
502          * then in big system, the memory will have a lot of holes...
503          * here try to allocate 2M pages continuously.
504          *
505          * powerpc need to call sparse_init_one_section right after each
506          * sparse_early_mem_map_alloc, so allocate usemap_map at first.
507          */
508         size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
509         usemap_map = alloc_bootmem(size);
510         if (!usemap_map)
511                 panic("can not allocate usemap_map\n");
512
513         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
514                 struct mem_section *ms;
515
516                 if (!present_section_nr(pnum))
517                         continue;
518                 ms = __nr_to_section(pnum);
519                 nodeid_begin = sparse_early_nid(ms);
520                 pnum_begin = pnum;
521                 break;
522         }
523         usemap_count = 1;
524         for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
525                 struct mem_section *ms;
526                 int nodeid;
527
528                 if (!present_section_nr(pnum))
529                         continue;
530                 ms = __nr_to_section(pnum);
531                 nodeid = sparse_early_nid(ms);
532                 if (nodeid == nodeid_begin) {
533                         usemap_count++;
534                         continue;
535                 }
536                 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
537                 sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum,
538                                                  usemap_count, nodeid_begin);
539                 /* new start, update count etc*/
540                 nodeid_begin = nodeid;
541                 pnum_begin = pnum;
542                 usemap_count = 1;
543         }
544         /* ok, last chunk */
545         sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS,
546                                          usemap_count, nodeid_begin);
547
548 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
549         size2 = sizeof(struct page *) * NR_MEM_SECTIONS;
550         map_map = alloc_bootmem(size2);
551         if (!map_map)
552                 panic("can not allocate map_map\n");
553
554         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
555                 struct mem_section *ms;
556
557                 if (!present_section_nr(pnum))
558                         continue;
559                 ms = __nr_to_section(pnum);
560                 nodeid_begin = sparse_early_nid(ms);
561                 pnum_begin = pnum;
562                 break;
563         }
564         map_count = 1;
565         for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) {
566                 struct mem_section *ms;
567                 int nodeid;
568
569                 if (!present_section_nr(pnum))
570                         continue;
571                 ms = __nr_to_section(pnum);
572                 nodeid = sparse_early_nid(ms);
573                 if (nodeid == nodeid_begin) {
574                         map_count++;
575                         continue;
576                 }
577                 /* ok, we need to take cake of from pnum_begin to pnum - 1*/
578                 sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum,
579                                                  map_count, nodeid_begin);
580                 /* new start, update count etc*/
581                 nodeid_begin = nodeid;
582                 pnum_begin = pnum;
583                 map_count = 1;
584         }
585         /* ok, last chunk */
586         sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS,
587                                          map_count, nodeid_begin);
588 #endif
589
590         for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
591                 if (!present_section_nr(pnum))
592                         continue;
593
594                 usemap = usemap_map[pnum];
595                 if (!usemap)
596                         continue;
597
598 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
599                 map = map_map[pnum];
600 #else
601                 map = sparse_early_mem_map_alloc(pnum);
602 #endif
603                 if (!map)
604                         continue;
605
606                 sparse_init_one_section(__nr_to_section(pnum), pnum, map,
607                                                                 usemap);
608         }
609
610         vmemmap_populate_print_last();
611
612 #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER
613         free_bootmem(__pa(map_map), size2);
614 #endif
615         free_bootmem(__pa(usemap_map), size);
616 }
617
618 #ifdef CONFIG_MEMORY_HOTPLUG
619 #ifdef CONFIG_SPARSEMEM_VMEMMAP
620 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
621                                                  unsigned long nr_pages)
622 {
623         /* This will make the necessary allocations eventually. */
624         return sparse_mem_map_populate(pnum, nid);
625 }
626 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
627 {
628         return; /* XXX: Not implemented yet */
629 }
630 static void free_map_bootmem(struct page *page, unsigned long nr_pages)
631 {
632 }
633 #else
634 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
635 {
636         struct page *page, *ret;
637         unsigned long memmap_size = sizeof(struct page) * nr_pages;
638
639         page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size));
640         if (page)
641                 goto got_map_page;
642
643         ret = vmalloc(memmap_size);
644         if (ret)
645                 goto got_map_ptr;
646
647         return NULL;
648 got_map_page:
649         ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
650 got_map_ptr:
651         memset(ret, 0, memmap_size);
652
653         return ret;
654 }
655
656 static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
657                                                   unsigned long nr_pages)
658 {
659         return __kmalloc_section_memmap(nr_pages);
660 }
661
662 static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
663 {
664         if (is_vmalloc_addr(memmap))
665                 vfree(memmap);
666         else
667                 free_pages((unsigned long)memmap,
668                            get_order(sizeof(struct page) * nr_pages));
669 }
670
671 static void free_map_bootmem(struct page *page, unsigned long nr_pages)
672 {
673         unsigned long maps_section_nr, removing_section_nr, i;
674         unsigned long magic;
675
676         for (i = 0; i < nr_pages; i++, page++) {
677                 magic = (unsigned long) page->lru.next;
678
679                 BUG_ON(magic == NODE_INFO);
680
681                 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
682                 removing_section_nr = page->private;
683
684                 /*
685                  * When this function is called, the removing section is
686                  * logical offlined state. This means all pages are isolated
687                  * from page allocator. If removing section's memmap is placed
688                  * on the same section, it must not be freed.
689                  * If it is freed, page allocator may allocate it which will
690                  * be removed physically soon.
691                  */
692                 if (maps_section_nr != removing_section_nr)
693                         put_page_bootmem(page);
694         }
695 }
696 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
697
698 static void free_section_usemap(struct page *memmap, unsigned long *usemap)
699 {
700         struct page *usemap_page;
701         unsigned long nr_pages;
702
703         if (!usemap)
704                 return;
705
706         usemap_page = virt_to_page(usemap);
707         /*
708          * Check to see if allocation came from hot-plug-add
709          */
710         if (PageSlab(usemap_page)) {
711                 kfree(usemap);
712                 if (memmap)
713                         __kfree_section_memmap(memmap, PAGES_PER_SECTION);
714                 return;
715         }
716
717         /*
718          * The usemap came from bootmem. This is packed with other usemaps
719          * on the section which has pgdat at boot time. Just keep it as is now.
720          */
721
722         if (memmap) {
723                 struct page *memmap_page;
724                 memmap_page = virt_to_page(memmap);
725
726                 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
727                         >> PAGE_SHIFT;
728
729                 free_map_bootmem(memmap_page, nr_pages);
730         }
731 }
732
733 /*
734  * returns the number of sections whose mem_maps were properly
735  * set.  If this is <=0, then that means that the passed-in
736  * map was not consumed and must be freed.
737  */
738 int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
739                            int nr_pages)
740 {
741         unsigned long section_nr = pfn_to_section_nr(start_pfn);
742         struct pglist_data *pgdat = zone->zone_pgdat;
743         struct mem_section *ms;
744         struct page *memmap;
745         unsigned long *usemap;
746         unsigned long flags;
747         int ret;
748
749         /*
750          * no locking for this, because it does its own
751          * plus, it does a kmalloc
752          */
753         ret = sparse_index_init(section_nr, pgdat->node_id);
754         if (ret < 0 && ret != -EEXIST)
755                 return ret;
756         memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages);
757         if (!memmap)
758                 return -ENOMEM;
759         usemap = __kmalloc_section_usemap();
760         if (!usemap) {
761                 __kfree_section_memmap(memmap, nr_pages);
762                 return -ENOMEM;
763         }
764
765         pgdat_resize_lock(pgdat, &flags);
766
767         ms = __pfn_to_section(start_pfn);
768         if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
769                 ret = -EEXIST;
770                 goto out;
771         }
772
773         ms->section_mem_map |= SECTION_MARKED_PRESENT;
774
775         ret = sparse_init_one_section(ms, section_nr, memmap, usemap);
776
777 out:
778         pgdat_resize_unlock(pgdat, &flags);
779         if (ret <= 0) {
780                 kfree(usemap);
781                 __kfree_section_memmap(memmap, nr_pages);
782         }
783         return ret;
784 }
785
786 void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
787 {
788         struct page *memmap = NULL;
789         unsigned long *usemap = NULL;
790
791         if (ms->section_mem_map) {
792                 usemap = ms->pageblock_flags;
793                 memmap = sparse_decode_mem_map(ms->section_mem_map,
794                                                 __section_nr(ms));
795                 ms->section_mem_map = 0;
796                 ms->pageblock_flags = NULL;
797         }
798
799         free_section_usemap(memmap, usemap);
800 }
801 #endif