Commit 1e4dd946 authored by Steven Rostedt's avatar Steven Rostedt Committed by Linus Torvalds
Browse files

slub: do not assert not having lock in removing freed partial

Vladimir reported the following issue:

Commit c65c1877

 ("slub: use lockdep_assert_held") requires
remove_partial() to be called with n->list_lock held, but free_partial()
called from kmem_cache_close() on cache destruction does not follow this
rule, leading to a warning:

  WARNING: CPU: 0 PID: 2787 at mm/slub.c:1536 __kmem_cache_shutdown+0x1b2/0x1f0()
  Modules linked in:
  CPU: 0 PID: 2787 Comm: modprobe Tainted: G        W    3.14.0-rc1-mm1+ #1
  Hardware name:
   0000000000000600 ffff88003ae1dde8 ffffffff816d9583 0000000000000600
   0000000000000000 ffff88003ae1de28 ffffffff8107c107 0000000000000000
   ffff880037ab2b00 ffff88007c240d30 ffffea0001ee5280 ffffea0001ee52a0
  Call Trace:
    __kmem_cache_shutdown+0x1b2/0x1f0
    kmem_cache_destroy+0x43/0xf0
    xfs_destroy_zones+0x103/0x110 [xfs]
    exit_xfs_fs+0x38/0x4e4 [xfs]
    SyS_delete_module+0x19a/0x1f0
    system_call_fastpath+0x16/0x1b

His solution was to add a spinlock in order to quiet lockdep.  Although
there would be no contention to adding the lock, that lock also requires
disabling of interrupts which will have a larger impact on the system.

Instead of adding a spinlock to a location where it is not needed for
lockdep, make a __remove_partial() function that does not test if the
list_lock is held, as no one should have it due to it being freed.

Also added a __add_partial() function that does not do the lock
validation either, as it is not needed for the creation of the cache.

Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Reported-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Suggested-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Acked-by: default avatarVladimir Davydov <vdavydov@parallels.com>
Acked-by: default avatarChristoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 25fba9be
...@@ -1518,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page) ...@@ -1518,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
/* /*
* Management of partially allocated slabs. * Management of partially allocated slabs.
*/ */
static inline void add_partial(struct kmem_cache_node *n, static inline void
struct page *page, int tail) __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
{ {
lockdep_assert_held(&n->list_lock);
n->nr_partial++; n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL) if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial); list_add_tail(&page->lru, &n->partial);
...@@ -1530,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n, ...@@ -1530,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n,
list_add(&page->lru, &n->partial); list_add(&page->lru, &n->partial);
} }
static inline void remove_partial(struct kmem_cache_node *n, static inline void add_partial(struct kmem_cache_node *n,
struct page *page) struct page *page, int tail)
{ {
lockdep_assert_held(&n->list_lock); lockdep_assert_held(&n->list_lock);
__add_partial(n, page, tail);
}
static inline void
__remove_partial(struct kmem_cache_node *n, struct page *page)
{
list_del(&page->lru); list_del(&page->lru);
n->nr_partial--; n->nr_partial--;
} }
static inline void remove_partial(struct kmem_cache_node *n,
struct page *page)
{
lockdep_assert_held(&n->list_lock);
__remove_partial(n, page);
}
/* /*
* Remove slab from the partial list, freeze it and * Remove slab from the partial list, freeze it and
* return the pointer to the freelist. * return the pointer to the freelist.
...@@ -2904,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node) ...@@ -2904,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node)
inc_slabs_node(kmem_cache_node, node, page->objects); inc_slabs_node(kmem_cache_node, node, page->objects);
/* /*
* the lock is for lockdep's sake, not for any actual * No locks need to be taken here as it has just been
* race protection * initialized and there is no concurrent access.
*/ */
spin_lock(&n->list_lock); __add_partial(n, page, DEACTIVATE_TO_HEAD);
add_partial(n, page, DEACTIVATE_TO_HEAD);
spin_unlock(&n->list_lock);
} }
static void free_kmem_cache_nodes(struct kmem_cache *s) static void free_kmem_cache_nodes(struct kmem_cache *s)
...@@ -3195,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) ...@@ -3195,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
list_for_each_entry_safe(page, h, &n->partial, lru) { list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) { if (!page->inuse) {
remove_partial(n, page); __remove_partial(n, page);
discard_slab(s, page); discard_slab(s, page);
} else { } else {
list_slab_objects(s, page, list_slab_objects(s, page,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment