Commit 729bd0b7 authored by Pekka Enberg's avatar Pekka Enberg Committed by Linus Torvalds
Browse files

[PATCH] slab: extract cache_free_alien from __cache_free



Move alien object freeing to cache_free_alien() to reduce #ifdef clutter in
__cache_free().
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Acked-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4da5eda0
......@@ -1024,6 +1024,40 @@ static void drain_alien_cache(struct kmem_cache *cachep,
}
}
}
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
struct slab *slabp = virt_to_slab(objp);
int nodeid = slabp->nodeid;
struct kmem_list3 *l3;
struct array_cache *alien = NULL;
/*
* Make sure we are not freeing a object from another node to the array
* cache on this cpu.
*/
if (likely(slabp->nodeid == numa_node_id()))
return 0;
l3 = cachep->nodelists[numa_node_id()];
STATS_INC_NODEFREES(cachep);
if (l3->alien && l3->alien[nodeid]) {
alien = l3->alien[nodeid];
spin_lock(&alien->lock);
if (unlikely(alien->avail == alien->limit)) {
STATS_INC_ACOVERFLOW(cachep);
__drain_alien_cache(cachep, alien, nodeid);
}
alien->entry[alien->avail++] = objp;
spin_unlock(&alien->lock);
} else {
spin_lock(&(cachep->nodelists[nodeid])->list_lock);
free_block(cachep, &objp, 1, nodeid);
spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
}
return 1;
}
#else
#define drain_alien_cache(cachep, alien) do { } while (0)
......@@ -1038,6 +1072,11 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
return 0;
}
#endif
static int cpuup_callback(struct notifier_block *nfb,
......@@ -3087,41 +3126,9 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp)
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
/* Make sure we are not freeing a object from another
* node to the array cache on this cpu.
*/
#ifdef CONFIG_NUMA
{
struct slab *slabp;
slabp = virt_to_slab(objp);
if (unlikely(slabp->nodeid != numa_node_id())) {
struct array_cache *alien = NULL;
int nodeid = slabp->nodeid;
struct kmem_list3 *l3;
l3 = cachep->nodelists[numa_node_id()];
STATS_INC_NODEFREES(cachep);
if (l3->alien && l3->alien[nodeid]) {
alien = l3->alien[nodeid];
spin_lock(&alien->lock);
if (unlikely(alien->avail == alien->limit)) {
STATS_INC_ACOVERFLOW(cachep);
__drain_alien_cache(cachep,
alien, nodeid);
}
alien->entry[alien->avail++] = objp;
spin_unlock(&alien->lock);
} else {
spin_lock(&(cachep->nodelists[nodeid])->
list_lock);
free_block(cachep, &objp, 1, nodeid);
spin_unlock(&(cachep->nodelists[nodeid])->
list_lock);
}
return;
}
}
#endif
if (cache_free_alien(cachep, objp))
return;
if (likely(ac->avail < ac->limit)) {
STATS_INC_FREEHIT(cachep);
ac->entry[ac->avail++] = objp;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment