projects
/
platform
/
kernel
/
linux-rpi.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
rxrpc: Fix generation of serial numbers to skip zero
[platform/kernel/linux-rpi.git]
/
mm
/
slab_common.c
diff --git
a/mm/slab_common.c
b/mm/slab_common.c
index
cd71f95
..
9bbffe8
100644
(file)
--- a/
mm/slab_common.c
+++ b/
mm/slab_common.c
@@
-479,7
+479,7
@@
void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
void kmem_cache_destroy(struct kmem_cache *s)
{
- int
refcnt
;
+ int
err = -EBUSY
;
bool rcu_set;
if (unlikely(!s) || !kasan_check_byte(s))
bool rcu_set;
if (unlikely(!s) || !kasan_check_byte(s))
@@
-490,17
+490,17
@@
void kmem_cache_destroy(struct kmem_cache *s)
rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
-
refcnt = --s->refcount
;
- if (
refc
nt)
+
s->refcount--
;
+ if (
s->refcou
nt)
goto out_unlock;
goto out_unlock;
- WARN(shutdown_cache(s),
-
"%s %s: Slab cache still has objects when called from %pS",
+ err = shutdown_cache(s);
+
WARN(err,
"%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_);
out_unlock:
mutex_unlock(&slab_mutex);
cpus_read_unlock();
__func__, s->name, (void *)_RET_IP_);
out_unlock:
mutex_unlock(&slab_mutex);
cpus_read_unlock();
- if (!
refcnt
&& !rcu_set)
+ if (!
err
&& !rcu_set)
kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);
kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@
-745,24
+745,24
@@
struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
size_t kmalloc_size_roundup(size_t size)
{
size_t kmalloc_size_roundup(size_t size)
{
- struct kmem_cache *c;
+ if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
+ /*
+ * The flags don't matter since size_index is common to all.
+ * Neither does the caller for just getting ->object_size.
+ */
+ return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
+ }
- /* Short-circuit the 0 size case. */
- if (unlikely(size == 0))
- return 0;
- /* Short-circuit saturated "too-large" case. */
- if (unlikely(size == SIZE_MAX))
- return SIZE_MAX;
/* Above the smaller buckets, size is a multiple of page size. */
/* Above the smaller buckets, size is a multiple of page size. */
- if (size
> KMALLOC_MAX_CACHE
_SIZE)
+ if (size
&& size <= KMALLOC_MAX
_SIZE)
return PAGE_SIZE << get_order(size);
/*
return PAGE_SIZE << get_order(size);
/*
- *
The flags don't matter since size_index is common to all.
- *
Neither does the caller for just getting ->object_size
.
+ *
Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
+ *
and very large size - kmalloc() may fail
.
*/
*/
-
c = kmalloc_slab(size, GFP_KERNEL, 0)
;
- return c ? c->object_size : 0;
+
return size
;
+
}
EXPORT_SYMBOL(kmalloc_size_roundup);
}
EXPORT_SYMBOL(kmalloc_size_roundup);
@@
-895,10
+895,13
@@
void __init setup_kmalloc_cache_index_table(void)
static unsigned int __kmalloc_minalign(void)
{
static unsigned int __kmalloc_minalign(void)
{
+ unsigned int minalign = dma_get_cache_alignment();
+
if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
is_swiotlb_allocated())
if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
is_swiotlb_allocated())
- return ARCH_KMALLOC_MINALIGN;
- return dma_get_cache_alignment();
+ minalign = ARCH_KMALLOC_MINALIGN;
+
+ return max(minalign, arch_slab_minalign());
}
void __init
}
void __init