projects
/
platform
/
adaptation
/
renesas_rcar
/
renesas_kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mm: avoid increase sizeof(struct page) due to split page table lock
[platform/adaptation/renesas_rcar/renesas_kernel.git]
/
mm
/
vmalloc.c
diff --git
a/mm/vmalloc.c
b/mm/vmalloc.c
index
1074543
..
0fdf968
100644
(file)
--- a/
mm/vmalloc.c
+++ b/
mm/vmalloc.c
@@
-359,6
+359,12
@@
static struct vmap_area *alloc_vmap_area(unsigned long size,
if (unlikely(!va))
return ERR_PTR(-ENOMEM);
if (unlikely(!va))
return ERR_PTR(-ENOMEM);
+ /*
+ * Only scan the relevant parts containing pointers to other objects
+ * to avoid false negatives.
+ */
+ kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
+
retry:
spin_lock(&vmap_area_lock);
/*
retry:
spin_lock(&vmap_area_lock);
/*
@@
-1546,7
+1552,7
@@
static void *__vmalloc_node(unsigned long size, unsigned long align,
gfp_t gfp_mask, pgprot_t prot,
int node, const void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
gfp_t gfp_mask, pgprot_t prot,
int node, const void *caller);
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
- pgprot_t prot, int node
, const void *caller
)
+ pgprot_t prot, int node)
{
const int order = 0;
struct page **pages;
{
const int order = 0;
struct page **pages;
@@
-1560,13
+1566,12
@@
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
- PAGE_KERNEL, node, caller);
+ PAGE_KERNEL, node,
area->
caller);
area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size, nested_gfp, node);
}
area->pages = pages;
area->flags |= VM_VPAGES;
} else {
pages = kmalloc_node(array_size, nested_gfp, node);
}
area->pages = pages;
- area->caller = caller;
if (!area->pages) {
remove_vm_area(area->addr);
kfree(area);
if (!area->pages) {
remove_vm_area(area->addr);
kfree(area);
@@
-1577,7
+1582,7
@@
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
struct page *page;
gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
struct page *page;
gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
- if (node
< 0
)
+ if (node
== NUMA_NO_NODE
)
page = alloc_page(tmp_mask);
else
page = alloc_pages_node(node, tmp_mask, order);
page = alloc_page(tmp_mask);
else
page = alloc_pages_node(node, tmp_mask, order);
@@
-1634,9
+1639,9
@@
void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!area)
goto fail;
if (!area)
goto fail;
- addr = __vmalloc_area_node(area, gfp_mask, prot, node
, caller
);
+ addr = __vmalloc_area_node(area, gfp_mask, prot, node);
if (!addr)
if (!addr)
-
goto fail
;
+
return NULL
;
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
/*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
@@
-1646,11
+1651,11
@@
void *__vmalloc_node_range(unsigned long size, unsigned long align,
clear_vm_uninitialized_flag(area);
/*
clear_vm_uninitialized_flag(area);
/*
- * A ref_count =
3 is needed because the vm_struct and vmap_area
- *
structures allocated in the __get_vm_area_node() function contain
- *
references to the virtual address of
the vmalloc'ed block.
+ * A ref_count =
2 is needed because vm_struct allocated in
+ *
__get_vm_area_node() contains a reference to the virtual address of
+ * the vmalloc'ed block.
*/
*/
- kmemleak_alloc(addr, real_size,
3
, gfp_mask);
+ kmemleak_alloc(addr, real_size,
2
, gfp_mask);
return addr;
return addr;
@@
-2563,6
+2568,11
@@
static void show_numa_info(struct seq_file *m, struct vm_struct *v)
if (!counters)
return;
if (!counters)
return;
+ /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
+ smp_rmb();
+ if (v->flags & VM_UNINITIALIZED)
+ return;
+
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
for (nr = 0; nr < v->nr_pages; nr++)
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
for (nr = 0; nr < v->nr_pages; nr++)
@@
-2579,23
+2589,15
@@
static int s_show(struct seq_file *m, void *p)
struct vmap_area *va = p;
struct vm_struct *v;
struct vmap_area *va = p;
struct vm_struct *v;
- if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
+ /*
+ * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
+ * behalf of vmap area is being tear down or vm_map_ram allocation.
+ */
+ if (!(va->flags & VM_VM_AREA))
return 0;
return 0;
- if (!(va->flags & VM_VM_AREA)) {
- seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
- (void *)va->va_start, (void *)va->va_end,
- va->va_end - va->va_start);
- return 0;
- }
-
v = va->vm;
v = va->vm;
- /* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
- smp_rmb();
- if (v->flags & VM_UNINITIALIZED)
- return 0;
-
seq_printf(m, "0x%pK-0x%pK %7ld",
v->addr, v->addr + v->size, v->size);
seq_printf(m, "0x%pK-0x%pK %7ld",
v->addr, v->addr + v->size, v->size);