Commit 345c905d authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Pekka Enberg
Browse files

slub: Make cpu partial slab support configurable

CPU partial support can introduce level of indeterminism that is not
wanted in certain context (like a realtime kernel). Make it

This patch is based on Christoph Lameter's "slub: Make cpu partial slab
support configurable V2".

Acked-by: default avatarChristoph Lameter <>
Signed-off-by: default avatarJoonsoo Kim <>
Signed-off-by: default avatarPekka Enberg <>
parent e7efa615
......@@ -1511,6 +1511,17 @@ config SLOB
default y
depends on SLUB
bool "SLUB per cpu partial cache"
Per cpu partial caches accellerate objects allocation and freeing
that is local to a processor at the price of more indeterminism
in the latency of the free. On overflow these caches will be cleared
which requires the taking of locks that may cause latency spikes.
Typically one would choose no for a realtime system.
bool "Allow mmapped anonymous memory to be uninitialized"
depends on EXPERT && !MMU
......@@ -122,6 +122,15 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
return !kmem_cache_debug(s);
return false;
* Issues still to be resolved:
......@@ -1572,7 +1581,8 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
put_cpu_partial(s, page, 0);
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
if (!kmem_cache_has_cpu_partial(s)
|| available > s->cpu_partial / 2)
......@@ -1883,6 +1893,7 @@ redo:
static void unfreeze_partials(struct kmem_cache *s,
struct kmem_cache_cpu *c)
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL;
......@@ -1937,6 +1948,7 @@ static void unfreeze_partials(struct kmem_cache *s,
discard_slab(s, page);
stat(s, FREE_SLAB);
......@@ -1950,6 +1962,7 @@ static void unfreeze_partials(struct kmem_cache *s,
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
struct page *oldpage;
int pages;
int pobjects;
......@@ -1989,6 +2002,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->next = oldpage;
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
......@@ -2497,7 +2511,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
if ((!new.inuse || !prior) && !was_frozen) {
if (!kmem_cache_debug(s) && !prior)
if (kmem_cache_has_cpu_partial(s) && !prior)
* Slab was on no list before and will be partially empty
......@@ -2552,8 +2566,9 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* Objects left in the slab. If it was not on the partial list before
* then add it.
if (kmem_cache_debug(s) && unlikely(!prior)) {
remove_full(s, page);
if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
if (kmem_cache_debug(s))
remove_full(s, page);
add_partial(n, page, DEACTIVATE_TO_TAIL);
......@@ -3061,7 +3076,7 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
* per node list when we run out of per cpu objects. We only fetch 50%
* to keep some capacity around for frees.
if (kmem_cache_debug(s))
if (!kmem_cache_has_cpu_partial(s))
s->cpu_partial = 0;
else if (s->size >= PAGE_SIZE)
s->cpu_partial = 2;
......@@ -4456,7 +4471,7 @@ static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
err = strict_strtoul(buf, 10, &objects);
if (err)
return err;
if (objects && kmem_cache_debug(s))
if (objects && !kmem_cache_has_cpu_partial(s))
return -EINVAL;
s->cpu_partial = objects;
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment