Commit bf5eb3de authored by Tejun Heo's avatar Tejun Heo Committed by Linus Torvalds
Browse files

slub: separate out sysfs_slab_release() from sysfs_slab_remove()

Separate out slub sysfs removal and release, and call the former earlier
from __kmem_cache_shutdown().  There's no reason to defer sysfs removal
through RCU and this will later allow us to remove sysfs files way
earlier during memory cgroup offline instead of release.

Link: http://lkml.kernel.org/r/20170117235411.9408-3-tj@kernel.org


Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarVladimir Davydov <vdavydov.dev@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 290b6a58
...@@ -113,9 +113,9 @@ struct kmem_cache { ...@@ -113,9 +113,9 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS #define SLAB_SUPPORTS_SYSFS
void sysfs_slab_remove(struct kmem_cache *); void sysfs_slab_release(struct kmem_cache *);
#else #else
static inline void sysfs_slab_remove(struct kmem_cache *s) static inline void sysfs_slab_release(struct kmem_cache *s)
{ {
} }
#endif #endif
......
...@@ -483,7 +483,7 @@ static void release_caches(struct list_head *release, bool need_rcu_barrier) ...@@ -483,7 +483,7 @@ static void release_caches(struct list_head *release, bool need_rcu_barrier)
list_for_each_entry_safe(s, s2, release, list) { list_for_each_entry_safe(s, s2, release, list) {
#ifdef SLAB_SUPPORTS_SYSFS #ifdef SLAB_SUPPORTS_SYSFS
sysfs_slab_remove(s); sysfs_slab_release(s);
#else #else
slab_kmem_cache_release(s); slab_kmem_cache_release(s);
#endif #endif
......
...@@ -214,11 +214,13 @@ enum track_item { TRACK_ALLOC, TRACK_FREE }; ...@@ -214,11 +214,13 @@ enum track_item { TRACK_ALLOC, TRACK_FREE };
static int sysfs_slab_add(struct kmem_cache *); static int sysfs_slab_add(struct kmem_cache *);
static int sysfs_slab_alias(struct kmem_cache *, const char *); static int sysfs_slab_alias(struct kmem_cache *, const char *);
static void memcg_propagate_slab_attrs(struct kmem_cache *s); static void memcg_propagate_slab_attrs(struct kmem_cache *s);
static void sysfs_slab_remove(struct kmem_cache *s);
#else #else
static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
{ return 0; } { return 0; }
static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { } static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
static inline void sysfs_slab_remove(struct kmem_cache *s) { }
#endif #endif
static inline void stat(const struct kmem_cache *s, enum stat_item si) static inline void stat(const struct kmem_cache *s, enum stat_item si)
...@@ -3687,6 +3689,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s) ...@@ -3687,6 +3689,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
if (n->nr_partial || slabs_node(s, node)) if (n->nr_partial || slabs_node(s, node))
return 1; return 1;
} }
sysfs_slab_remove(s);
return 0; return 0;
} }
...@@ -5637,7 +5640,7 @@ out_del_kobj: ...@@ -5637,7 +5640,7 @@ out_del_kobj:
goto out; goto out;
} }
void sysfs_slab_remove(struct kmem_cache *s) static void sysfs_slab_remove(struct kmem_cache *s)
{ {
if (slab_state < FULL) if (slab_state < FULL)
/* /*
...@@ -5651,7 +5654,12 @@ void sysfs_slab_remove(struct kmem_cache *s) ...@@ -5651,7 +5654,12 @@ void sysfs_slab_remove(struct kmem_cache *s)
#endif #endif
kobject_uevent(&s->kobj, KOBJ_REMOVE); kobject_uevent(&s->kobj, KOBJ_REMOVE);
kobject_del(&s->kobj); kobject_del(&s->kobj);
kobject_put(&s->kobj); }
void sysfs_slab_release(struct kmem_cache *s)
{
if (slab_state >= FULL)
kobject_put(&s->kobj);
} }
/* /*
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment