shmem: fix faulting into a hole while it's punched
[platform/adaptation/renesas_rcar/renesas_kernel.git] / mm / percpu.c
index 0d10def..a2a54a8 100644 (file)
@@ -612,7 +612,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
        chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
                                                sizeof(chunk->map[0]));
        if (!chunk->map) {
-               kfree(chunk);
+               pcpu_mem_free(chunk, pcpu_chunk_struct_size);
                return NULL;
        }
 
@@ -1063,7 +1063,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
                          __alignof__(ai->groups[0].cpu_map[0]));
        ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
 
-       ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
+       ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
        if (!ptr)
                return NULL;
        ai = ptr;
@@ -1088,7 +1088,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
  */
 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
 {
-       free_bootmem(__pa(ai), ai->__ai_size);
+       memblock_free_early(__pa(ai), ai->__ai_size);
 }
 
 /**
@@ -1246,10 +1246,12 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
        PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
 
        /* process group information and build config tables accordingly */
-       group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
-       group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
-       unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
-       unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
+       group_offsets = memblock_virt_alloc(ai->nr_groups *
+                                            sizeof(group_offsets[0]), 0);
+       group_sizes = memblock_virt_alloc(ai->nr_groups *
+                                          sizeof(group_sizes[0]), 0);
+       unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
+       unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
 
        for (cpu = 0; cpu < nr_cpu_ids; cpu++)
                unit_map[cpu] = UINT_MAX;
@@ -1311,7 +1313,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
         * empty chunks.
         */
        pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
-       pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
+       pcpu_slot = memblock_virt_alloc(
+                       pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
        for (i = 0; i < pcpu_nr_slots; i++)
                INIT_LIST_HEAD(&pcpu_slot[i]);
 
@@ -1322,7 +1325,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
         * covers static area + reserved area (mostly used for module
         * static percpu allocation).
         */
-       schunk = alloc_bootmem(pcpu_chunk_struct_size);
+       schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
        INIT_LIST_HEAD(&schunk->list);
        schunk->base_addr = base_addr;
        schunk->map = smap;
@@ -1346,7 +1349,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
 
        /* init dynamic chunk if necessary */
        if (dyn_size) {
-               dchunk = alloc_bootmem(pcpu_chunk_struct_size);
+               dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
                INIT_LIST_HEAD(&dchunk->list);
                dchunk->base_addr = base_addr;
                dchunk->map = dmap;
@@ -1626,7 +1629,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
        size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
        areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
 
-       areas = alloc_bootmem_nopanic(areas_size);
+       areas = memblock_virt_alloc_nopanic(areas_size, 0);
        if (!areas) {
                rc = -ENOMEM;
                goto out_free;
@@ -1686,10 +1689,10 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
        max_distance += ai->unit_size;
 
        /* warn if maximum distance is further than 75% of vmalloc space */
-       if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
+       if (max_distance > VMALLOC_TOTAL * 3 / 4) {
                pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
                           "space 0x%lx\n", max_distance,
-                          (unsigned long)(VMALLOC_END - VMALLOC_START));
+                          VMALLOC_TOTAL);
 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
                /* and fail if we have fallback */
                rc = -EINVAL;
@@ -1712,7 +1715,7 @@ out_free_areas:
 out_free:
        pcpu_free_alloc_info(ai);
        if (areas)
-               free_bootmem(__pa(areas), areas_size);
+               memblock_free_early(__pa(areas), areas_size);
        return rc;
 }
 #endif /* BUILD_EMBED_FIRST_CHUNK */
@@ -1760,7 +1763,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
        /* unaligned allocations can't be freed, round up to page size */
        pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
                               sizeof(pages[0]));
-       pages = alloc_bootmem(pages_size);
+       pages = memblock_virt_alloc(pages_size, 0);
 
        /* allocate pages */
        j = 0;
@@ -1823,7 +1826,7 @@ enomem:
                free_fn(page_address(pages[j]), PAGE_SIZE);
        rc = -ENOMEM;
 out_free_ar:
-       free_bootmem(__pa(pages), pages_size);
+       memblock_free_early(__pa(pages), pages_size);
        pcpu_free_alloc_info(ai);
        return rc;
 }
@@ -1848,12 +1851,13 @@ EXPORT_SYMBOL(__per_cpu_offset);
 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
                                       size_t align)
 {
-       return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
+       return  memblock_virt_alloc_from_nopanic(
+                       size, align, __pa(MAX_DMA_ADDRESS));
 }
 
 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
 {
-       free_bootmem(__pa(ptr), size);
+       memblock_free_early(__pa(ptr), size);
 }
 
 void __init setup_per_cpu_areas(void)
@@ -1896,7 +1900,9 @@ void __init setup_per_cpu_areas(void)
        void *fc;
 
        ai = pcpu_alloc_alloc_info(1, 1);
-       fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+       fc = memblock_virt_alloc_from_nopanic(unit_size,
+                                             PAGE_SIZE,
+                                             __pa(MAX_DMA_ADDRESS));
        if (!ai || !fc)
                panic("Failed to allocate memory for percpu areas.");
        /* kmemleak tracks the percpu allocations separately */