projects
/
platform
/
adaptation
/
renesas_rcar
/
renesas_kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
slub: Get rid of the node field
[platform/adaptation/renesas_rcar/renesas_kernel.git]
/
mm
/
slub.c
diff --git
a/mm/slub.c
b/mm/slub.c
index
80848cd
..
aed8792
100644
(file)
--- a/
mm/slub.c
+++ b/
mm/slub.c
@@
-1490,12
+1490,12
@@
static inline void remove_partial(struct kmem_cache_node *n,
}
/*
}
/*
- *
Lock slab, remove from the partial list and put the object into the
- *
per cpu
freelist.
+ *
Remove slab from the partial list, freeze it and
+ *
return the pointer to the
freelist.
*
* Returns a list of objects or NULL if it fails.
*
*
* Returns a list of objects or NULL if it fails.
*
- * Must hold list_lock.
+ * Must hold list_lock
since we modify the partial list
.
*/
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
*/
static inline void *acquire_slab(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page,
@@
-1510,22
+1510,24
@@
static inline void *acquire_slab(struct kmem_cache *s,
* The old freelist is the list of objects for the
* per cpu allocation list.
*/
* The old freelist is the list of objects for the
* per cpu allocation list.
*/
- do {
- freelist = page->freelist;
- counters = page->counters;
- new.counters = counters;
- if (mode)
- new.inuse = page->objects;
+ freelist = page->freelist;
+ counters = page->counters;
+ new.counters = counters;
+ if (mode)
+ new.inuse = page->objects;
-
VM_BUG_ON(new.frozen);
-
new.frozen = 1;
+ VM_BUG_ON(new.frozen);
+ new.frozen = 1;
-
} while
(!__cmpxchg_double_slab(s, page,
+
if
(!__cmpxchg_double_slab(s, page,
freelist, counters,
NULL, new.counters,
freelist, counters,
NULL, new.counters,
- "lock and freeze"));
+ "acquire_slab"))
+
+ return NULL;
remove_partial(n, page);
remove_partial(n, page);
+ WARN_ON(!freelist);
return freelist;
}
return freelist;
}
@@
-1559,7
+1561,6
@@
static void *get_partial_node(struct kmem_cache *s,
if (!object) {
c->page = page;
if (!object) {
c->page = page;
- c->node = page_to_nid(page);
stat(s, ALLOC_FROM_PARTIAL);
object = t;
available = page->objects - page->inuse;
stat(s, ALLOC_FROM_PARTIAL);
object = t;
available = page->objects - page->inuse;
@@
-2055,7
+2056,7
@@
static void flush_all(struct kmem_cache *s)
static inline int node_match(struct kmem_cache_cpu *c, int node)
{
#ifdef CONFIG_NUMA
static inline int node_match(struct kmem_cache_cpu *c, int node)
{
#ifdef CONFIG_NUMA
- if (node != NUMA_NO_NODE &&
c->node
!= node)
+ if (node != NUMA_NO_NODE &&
page_to_nid(c->page)
!= node)
return 0;
#endif
return 1;
return 0;
#endif
return 1;
@@
-2127,10
+2128,16
@@
slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_cpu **pc)
{
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_cpu **pc)
{
- void *
objec
t;
- struct kmem_cache_cpu *c;
- struct page *page
= new_slab(s, flags, node)
;
+ void *
freelis
t;
+ struct kmem_cache_cpu *c
= *pc
;
+ struct page *page;
+ freelist = get_partial(s, flags, node, c);
+
+ if (freelist)
+ return freelist;
+
+ page = new_slab(s, flags, node);
if (page) {
c = __this_cpu_ptr(s->cpu_slab);
if (c->page)
if (page) {
c = __this_cpu_ptr(s->cpu_slab);
if (c->page)
@@
-2140,17
+2147,16
@@
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
* No other reference to the page yet so we can
* muck around with it freely without cmpxchg
*/
* No other reference to the page yet so we can
* muck around with it freely without cmpxchg
*/
-
objec
t = page->freelist;
+
freelis
t = page->freelist;
page->freelist = NULL;
stat(s, ALLOC_SLAB);
page->freelist = NULL;
stat(s, ALLOC_SLAB);
- c->node = page_to_nid(page);
c->page = page;
*pc = c;
} else
c->page = page;
*pc = c;
} else
-
objec
t = NULL;
+
freelis
t = NULL;
- return
objec
t;
+ return
freelis
t;
}
/*
}
/*
@@
-2170,6
+2176,7
@@
static inline void *get_freelist(struct kmem_cache *s, struct page *page)
do {
freelist = page->freelist;
counters = page->counters;
do {
freelist = page->freelist;
counters = page->counters;
+
new.counters = counters;
VM_BUG_ON(!new.frozen);
new.counters = counters;
VM_BUG_ON(!new.frozen);
@@
-2203,7
+2210,7
@@
static inline void *get_freelist(struct kmem_cache *s, struct page *page)
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
unsigned long addr, struct kmem_cache_cpu *c)
{
- void *
*objec
t;
+ void *
freelis
t;
unsigned long flags;
local_irq_save(flags);
unsigned long flags;
local_irq_save(flags);
@@
-2219,6
+2226,7
@@
static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (!c->page)
goto new_slab;
redo:
if (!c->page)
goto new_slab;
redo:
+
if (unlikely(!node_match(c, node))) {
stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, c);
if (unlikely(!node_match(c, node))) {
stat(s, ALLOC_NODE_MISMATCH);
deactivate_slab(s, c);
@@
-2226,15
+2234,15
@@
redo:
}
/* must check again c->freelist in case of cpu migration or IRQ */
}
/* must check again c->freelist in case of cpu migration or IRQ */
-
objec
t = c->freelist;
- if (
objec
t)
+
freelis
t = c->freelist;
+ if (
freelis
t)
goto load_freelist;
stat(s, ALLOC_SLOWPATH);
goto load_freelist;
stat(s, ALLOC_SLOWPATH);
-
objec
t = get_freelist(s, c->page);
+
freelis
t = get_freelist(s, c->page);
- if (!
objec
t) {
+ if (!
freelis
t) {
c->page = NULL;
stat(s, DEACTIVATE_BYPASS);
goto new_slab;
c->page = NULL;
stat(s, DEACTIVATE_BYPASS);
goto new_slab;
@@
-2243,50
+2251,48
@@
redo:
stat(s, ALLOC_REFILL);
load_freelist:
stat(s, ALLOC_REFILL);
load_freelist:
- c->freelist = get_freepointer(s, object);
+ /*
+ * freelist is pointing to the list of objects to be used.
+ * page is pointing to the page from which the objects are obtained.
+ * That page must be frozen for per cpu allocations to work.
+ */
+ VM_BUG_ON(!c->page->frozen);
+ c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
local_irq_restore(flags);
c->tid = next_tid(c->tid);
local_irq_restore(flags);
- return
objec
t;
+ return
freelis
t;
new_slab:
if (c->partial) {
c->page = c->partial;
c->partial = c->page->next;
new_slab:
if (c->partial) {
c->page = c->partial;
c->partial = c->page->next;
- c->node = page_to_nid(c->page);
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
}
stat(s, CPU_PARTIAL_ALLOC);
c->freelist = NULL;
goto redo;
}
- /* Then do expensive stuff like retrieving pages from the partial lists */
- object = get_partial(s, gfpflags, node, c);
+ freelist = new_slab_objects(s, gfpflags, node, &c);
- if (unlikely(!object)) {
+ if (unlikely(!freelist)) {
+ if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
+ slab_out_of_memory(s, gfpflags, node);
- object = new_slab_objects(s, gfpflags, node, &c);
-
- if (unlikely(!object)) {
- if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
- slab_out_of_memory(s, gfpflags, node);
-
- local_irq_restore(flags);
- return NULL;
- }
+ local_irq_restore(flags);
+ return NULL;
}
if (likely(!kmem_cache_debug(s)))
goto load_freelist;
/* Only entered in the debug case */
}
if (likely(!kmem_cache_debug(s)))
goto load_freelist;
/* Only entered in the debug case */
- if (!alloc_debug_processing(s, c->page,
objec
t, addr))
+ if (!alloc_debug_processing(s, c->page,
freelis
t, addr))
goto new_slab; /* Slab failed checks. Next slab needed */
goto new_slab; /* Slab failed checks. Next slab needed */
- c->freelist = get_freepointer(s,
objec
t);
+ c->freelist = get_freepointer(s,
freelis
t);
deactivate_slab(s, c);
deactivate_slab(s, c);
- c->node = NUMA_NO_NODE;
local_irq_restore(flags);
local_irq_restore(flags);
- return
objec
t;
+ return
freelis
t;
}
/*
}
/*
@@
-4497,30
+4503,31
@@
static ssize_t show_slab_objects(struct kmem_cache *s,
for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
for_each_possible_cpu(cpu) {
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
- int node
= ACCESS_ONCE(c->node)
;
+ int node;
struct page *page;
struct page *page;
- if (node < 0)
- continue;
page = ACCESS_ONCE(c->page);
page = ACCESS_ONCE(c->page);
- if (page) {
- if (flags & SO_TOTAL)
- x = page->objects;
- else if (flags & SO_OBJECTS)
- x = page->inuse;
- else
- x = 1;
+ if (!page)
+ continue;
- total += x;
- nodes[node] += x;
- }
- page = c->partial;
+ node = page_to_nid(page);
+ if (flags & SO_TOTAL)
+ x = page->objects;
+ else if (flags & SO_OBJECTS)
+ x = page->inuse;
+ else
+ x = 1;
+
+ total += x;
+ nodes[node] += x;
+ page = ACCESS_ONCE(c->partial);
if (page) {
x = page->pobjects;
total += x;
nodes[node] += x;
}
if (page) {
x = page->pobjects;
total += x;
nodes[node] += x;
}
+
per_cpu[node]++;
}
}
per_cpu[node]++;
}
}