Commit ce8eb6c4 authored by Christoph Lameter's avatar Christoph Lameter Committed by Pekka Enberg
Browse files

slab: Rename list3/l3 to node



The list3 or l3 pointers are pointing to per node structures. Reflect
that in the names of variables used.

Signed-off-by: default avatarChristoph Lameter <cl@linux.com>
Signed-off-by: default avatarPekka Enberg <penberg@kernel.org>
parent 2c59dd65
......@@ -306,13 +306,13 @@ struct kmem_cache_node {
* Need this for bootstrapping a per node allocator.
*/
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
static struct kmem_cache_node __initdata initkmem_list3[NUM_INIT_LISTS];
static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
#define CACHE_CACHE 0
#define SIZE_AC MAX_NUMNODES
#define SIZE_L3 (2 * MAX_NUMNODES)
#define SIZE_NODE (2 * MAX_NUMNODES)
static int drain_freelist(struct kmem_cache *cache,
struct kmem_cache_node *l3, int tofree);
struct kmem_cache_node *n, int tofree);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
......@@ -321,9 +321,9 @@ static void cache_reap(struct work_struct *unused);
static int slab_early_init = 1;
#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
#define INDEX_L3 kmalloc_index(sizeof(struct kmem_cache_node))
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
static void kmem_list3_init(struct kmem_cache_node *parent)
static void kmem_cache_node_init(struct kmem_cache_node *parent)
{
INIT_LIST_HEAD(&parent->slabs_full);
INIT_LIST_HEAD(&parent->slabs_partial);
......@@ -538,15 +538,15 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
int q)
{
struct array_cache **alc;
struct kmem_cache_node *l3;
struct kmem_cache_node *n;
int r;
l3 = cachep->node[q];
if (!l3)
n = cachep->node[q];
if (!n)
return;
lockdep_set_class(&l3->list_lock, l3_key);
alc = l3->alien;
lockdep_set_class(&n->list_lock, l3_key);
alc = n->alien;
/*
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
......@@ -583,14 +583,14 @@ static void init_node_lock_keys(int q)
return;
for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
struct kmem_cache_node *l3;
struct kmem_cache_node *n;
struct kmem_cache *cache = kmalloc_caches[i];
if (!cache)
continue;
l3 = cache->node[q];
if (!l3 || OFF_SLAB(cache))
n = cache->node[q];
if (!n || OFF_SLAB(cache))
continue;
slab_set_lock_classes(cache, &on_slab_l3_key,
......@@ -857,29 +857,29 @@ static inline bool is_slab_pfmemalloc(struct slab *slabp)
static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
struct array_cache *ac)
{
struct kmem_cache_node *l3 = cachep->node[numa_mem_id()];
struct kmem_cache_node *n = cachep->node[numa_mem_id()];
struct slab *slabp;
unsigned long flags;
if (!pfmemalloc_active)
return;
spin_lock_irqsave(&l3->list_lock, flags);
list_for_each_entry(slabp, &l3->slabs_full, list)
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(slabp, &n->slabs_full, list)
if (is_slab_pfmemalloc(slabp))
goto out;
list_for_each_entry(slabp, &l3->slabs_partial, list)
list_for_each_entry(slabp, &n->slabs_partial, list)
if (is_slab_pfmemalloc(slabp))
goto out;
list_for_each_entry(slabp, &l3->slabs_free, list)
list_for_each_entry(slabp, &n->slabs_free, list)
if (is_slab_pfmemalloc(slabp))
goto out;
pfmemalloc_active = false;
out:
spin_unlock_irqrestore(&l3->list_lock, flags);
spin_unlock_irqrestore(&n->list_lock, flags);
}
static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
......@@ -890,7 +890,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
/* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
if (unlikely(is_obj_pfmemalloc(objp))) {
struct kmem_cache_node *l3;
struct kmem_cache_node *n;
if (gfp_pfmemalloc_allowed(flags)) {
clear_obj_pfmemalloc(&objp);
......@@ -912,8 +912,8 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
* If there are empty slabs on the slabs_free list and we are
* being forced to refill the cache, mark this one !pfmemalloc.
*/
l3 = cachep->node[numa_mem_id()];
if (!list_empty(&l3->slabs_free) && force_refill) {
n = cachep->node[numa_mem_id()];
if (!list_empty(&n->slabs_free) && force_refill) {
struct slab *slabp = virt_to_slab(objp);
ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem));
clear_obj_pfmemalloc(&objp);
......@@ -990,7 +990,7 @@ static int transfer_objects(struct array_cache *to,
#ifndef CONFIG_NUMA
#define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3) do { } while (0)
#define reap_alien(cachep, n) do { } while (0)
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{
......@@ -1062,33 +1062,33 @@ static void free_alien_cache(struct array_cache **ac_ptr)
static void __drain_alien_cache(struct kmem_cache *cachep,
struct array_cache *ac, int node)
{
struct kmem_cache_node *rl3 = cachep->node[node];
struct kmem_cache_node *n = cachep->node[node];
if (ac->avail) {
spin_lock(&rl3->list_lock);
spin_lock(&n->list_lock);
/*
* Stuff objects into the remote nodes shared array first.
* That way we could avoid the overhead of putting the objects
* into the free lists and getting them back later.
*/
if (rl3->shared)
transfer_objects(rl3->shared, ac, ac->limit);
if (n->shared)
transfer_objects(n->shared, ac, ac->limit);
free_block(cachep, ac->entry, ac->avail, node);
ac->avail = 0;
spin_unlock(&rl3->list_lock);
spin_unlock(&n->list_lock);
}
}
/*
* Called from cache_reap() to regularly drain alien caches round robin.
*/
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *l3)
static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
{
int node = __this_cpu_read(slab_reap_node);
if (l3->alien) {
struct array_cache *ac = l3->alien[node];
if (n->alien) {
struct array_cache *ac = n->alien[node];
if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
__drain_alien_cache(cachep, ac, node);
......@@ -1118,7 +1118,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
struct slab *slabp = virt_to_slab(objp);
int nodeid = slabp->nodeid;
struct kmem_cache_node *l3;
struct kmem_cache_node *n;
struct array_cache *alien = NULL;
int node;
......@@ -1131,10 +1131,10 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
if (likely(slabp->nodeid == node))
return 0;
l3 = cachep->node[node];
n = cachep->node[node];
STATS_INC_NODEFREES(cachep);
if (l3->alien && l3->alien[nodeid]) {
alien = l3->alien[nodeid];
if (n->alien && n->alien[nodeid]) {
alien = n->alien[nodeid];
spin_lock(&alien->lock);
if (unlikely(alien->avail == alien->limit)) {
STATS_INC_ACOVERFLOW(cachep);
......@@ -1153,7 +1153,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
/*
* Allocates and initializes node for a node on each slab cache, used for
* either memory or cpu hotplug. If memory is being hot-added, the kmem_list3
* either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
* will be allocated off-node since memory is not yet online for the new node.
* When hotplugging memory or a cpu, existing node are not replaced if
* already in use.
......@@ -1163,7 +1163,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
static int init_cache_node_node(int node)
{
struct kmem_cache *cachep;
struct kmem_cache_node *l3;
struct kmem_cache_node *n;
const int memsize = sizeof(struct kmem_cache_node);
list_for_each_entry(cachep, &slab_caches, list) {
......@@ -1173,11 +1173,11 @@ static int init_cache_node_node(int node)
* node has not already allocated this
*/
if (!cachep->node[node]) {
l3 = kmalloc_node(memsize, GFP_KERNEL, node);
if (!l3)
n = kmalloc_node(memsize, GFP_KERNEL, node);
if (!n)
return -ENOMEM;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
kmem_cache_node_init(n);
n->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
/*
......@@ -1185,7 +1185,7 @@ static int init_cache_node_node(int node)
* go. slab_mutex is sufficient
* protection here.
*/
cachep->node[node] = l3;
cachep->node[node] = n;
}
spin_lock_irq(&cachep->node[node]->list_lock);
......@@ -1200,7 +1200,7 @@ static int init_cache_node_node(int node)
static void __cpuinit cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
struct kmem_cache_node *l3 = NULL;
struct kmem_cache_node *n = NULL;
int node = cpu_to_mem(cpu);
const struct cpumask *mask = cpumask_of_node(node);
......@@ -1212,34 +1212,34 @@ static void __cpuinit cpuup_canceled(long cpu)
/* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu];
cachep->array[cpu] = NULL;
l3 = cachep->node[node];
n = cachep->node[node];
if (!l3)
if (!n)
goto free_array_cache;
spin_lock_irq(&l3->list_lock);
spin_lock_irq(&n->list_lock);
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
/* Free limit for this kmem_cache_node */
n->free_limit -= cachep->batchcount;
if (nc)
free_block(cachep, nc->entry, nc->avail, node);
if (!cpumask_empty(mask)) {
spin_unlock_irq(&l3->list_lock);
spin_unlock_irq(&n->list_lock);
goto free_array_cache;
}
shared = l3->shared;
shared = n->shared;
if (shared) {
free_block(cachep, shared->entry,
shared->avail, node);
l3->shared = NULL;
n->shared = NULL;
}
alien = l3->alien;
l3->alien = NULL;
alien = n->alien;
n->alien = NULL;
spin_unlock_irq(&l3->list_lock);
spin_unlock_irq(&n->list_lock);
kfree(shared);
if (alien) {
......@@ -1255,17 +1255,17 @@ free_array_cache:
* shrink each nodelist to its limit.
*/
list_for_each_entry(cachep, &slab_caches, list) {
l3 = cachep->node[node];
if (!l3)
n = cachep->node[node];
if (!n)
continue;
drain_freelist(cachep, l3, l3->free_objects);
drain_freelist(cachep, n, n->free_objects);
}
}
static int __cpuinit cpuup_prepare(long cpu)
{
struct kmem_cache *cachep;
struct kmem_cache_node *l3 = NULL;
struct kmem_cache_node *n = NULL;
int node = cpu_to_mem(cpu);
int err;
......@@ -1273,7 +1273,7 @@ static int __cpuinit cpuup_prepare(long cpu)
* We need to do this right in the beginning since
* alloc_arraycache's are going to use this list.
* kmalloc_node allows us to add the slab to the right
* kmem_list3 and not this cpu's kmem_list3
* kmem_cache_node and not this cpu's kmem_cache_node
*/
err = init_cache_node_node(node);
if (err < 0)
......@@ -1310,25 +1310,25 @@ static int __cpuinit cpuup_prepare(long cpu)
}
}
cachep->array[cpu] = nc;
l3 = cachep->node[node];
BUG_ON(!l3);
n = cachep->node[node];
BUG_ON(!n);
spin_lock_irq(&l3->list_lock);
if (!l3->shared) {
spin_lock_irq(&n->list_lock);
if (!n->shared) {
/*
* We are serialised from CPU_DEAD or
* CPU_UP_CANCELLED by the cpucontrol lock
*/
l3->shared = shared;
n->shared = shared;
shared = NULL;
}
#ifdef CONFIG_NUMA
if (!l3->alien) {
l3->alien = alien;
if (!n->alien) {
n->alien = alien;
alien = NULL;
}
#endif
spin_unlock_irq(&l3->list_lock);
spin_unlock_irq(&n->list_lock);
kfree(shared);
free_alien_cache(alien);
if (cachep->flags & SLAB_DEBUG_OBJECTS)
......@@ -1383,9 +1383,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
case CPU_DEAD_FROZEN:
/*
* Even if all the cpus of a node are down, we don't free the
* kmem_list3 of any cache. This to avoid a race between
* kmem_cache_node of any cache. This to avoid a race between
* cpu_down, and a kmalloc allocation from another cpu for
* memory from the node of the cpu going down. The list3
* memory from the node of the cpu going down. The node
* structure is usually allocated from kmem_cache_create() and
* gets destroyed at kmem_cache_destroy().
*/
......@@ -1419,16 +1419,16 @@ static int __meminit drain_cache_node_node(int node)
int ret = 0;
list_for_each_entry(cachep, &slab_caches, list) {
struct kmem_cache_node *l3;
struct kmem_cache_node *n;
l3 = cachep->node[node];
if (!l3)
n = cachep->node[node];
if (!n)
continue;
drain_freelist(cachep, l3, l3->free_objects);
drain_freelist(cachep, n, n->free_objects);
if (!list_empty(&l3->slabs_full) ||
!list_empty(&l3->slabs_partial)) {
if (!list_empty(&n->slabs_full) ||
!list_empty(&n->slabs_partial)) {
ret = -EBUSY;
break;
}
......@@ -1470,7 +1470,7 @@ out:
#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
/*
* swap the static kmem_list3 with kmalloced memory
* swap the static kmem_cache_node with kmalloced memory
*/
static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
int nodeid)
......@@ -1491,15 +1491,15 @@ static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *
}
/*
* For setting up all the kmem_list3s for cache whose buffer_size is same as
* size of kmem_list3.
* For setting up all the kmem_cache_node for cache whose buffer_size is same as
* size of kmem_cache_node.
*/
static void __init set_up_list3s(struct kmem_cache *cachep, int index)
static void __init set_up_node(struct kmem_cache *cachep, int index)
{
int node;
for_each_online_node(node) {
cachep->node[node] = &initkmem_list3[index + node];
cachep->node[node] = &init_kmem_cache_node[index + node];
cachep->node[node]->next_reap = jiffies +
REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
......@@ -1530,9 +1530,9 @@ void __init kmem_cache_init(void)
use_alien_caches = 0;
for (i = 0; i < NUM_INIT_LISTS; i++)
kmem_list3_init(&initkmem_list3[i]);
kmem_cache_node_init(&init_kmem_cache_node[i]);
set_up_list3s(kmem_cache, CACHE_CACHE);
set_up_node(kmem_cache, CACHE_CACHE);
/*
* Fragmentation resistance on low memory - only use bigger
......@@ -1548,7 +1548,7 @@ void __init kmem_cache_init(void)
* kmem_cache structures of all caches, except kmem_cache itself:
* kmem_cache is statically allocated.
* Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated
* kmem_cache_node structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap.
* 2) Create the first kmalloc cache.
* The struct kmem_cache for the new cache is allocated normally.
......@@ -1557,7 +1557,7 @@ void __init kmem_cache_init(void)
* head arrays.
* 4) Replace the __init data head arrays for kmem_cache and the first
* kmalloc cache with kmalloc allocated arrays.
* 5) Replace the __init data for kmem_list3 for kmem_cache and
* 5) Replace the __init data for kmem_cache_node for kmem_cache and
* the other cache's with kmalloc allocated memory.
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/
......@@ -1577,17 +1577,17 @@ void __init kmem_cache_init(void)
/*
* Initialize the caches that provide memory for the array cache and the
* kmem_list3 structures first. Without this, further allocations will
* kmem_cache_node structures first. Without this, further allocations will
* bug.
*/
kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
if (INDEX_AC != INDEX_L3)
kmalloc_caches[INDEX_L3] =
create_kmalloc_cache("kmalloc-l3",
kmalloc_size(INDEX_L3), ARCH_KMALLOC_FLAGS);
if (INDEX_AC != INDEX_NODE)
kmalloc_caches[INDEX_NODE] =
create_kmalloc_cache("kmalloc-node",
kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
slab_early_init = 0;
......@@ -1619,19 +1619,19 @@ void __init kmem_cache_init(void)
kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
}
/* 5) Replace the bootstrap kmem_list3's */
/* 5) Replace the bootstrap kmem_cache_node */
{
int nid;
for_each_online_node(nid) {
init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
init_list(kmalloc_caches[INDEX_AC],
&initkmem_list3[SIZE_AC + nid], nid);
&init_kmem_cache_node[SIZE_AC + nid], nid);
if (INDEX_AC != INDEX_L3) {
init_list(kmalloc_caches[INDEX_L3],
&initkmem_list3[SIZE_L3 + nid], nid);
if (INDEX_AC != INDEX_NODE) {
init_list(kmalloc_caches[INDEX_NODE],
&init_kmem_cache_node[SIZE_NODE + nid], nid);
}
}
}
......@@ -1697,7 +1697,7 @@ __initcall(cpucache_init);
static noinline void
slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
{
struct kmem_cache_node *l3;
struct kmem_cache_node *n;
struct slab *slabp;
unsigned long flags;
int node;
......@@ -1712,24 +1712,24 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
unsigned long active_slabs = 0, num_slabs = 0;
l3 = cachep->node[node];
if (!l3)
n = cachep->node[node];
if (!n)
continue;
spin_lock_irqsave(&l3->list_lock, flags);
list_for_each_entry(slabp, &l3->slabs_full, list) {
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(slabp, &n->slabs_full, list) {
active_objs += cachep->num;
active_slabs++;
}
list_for_each_entry(slabp, &l3->slabs_partial, list) {
list_for_each_entry(slabp, &n->slabs_partial, list) {
active_objs += slabp->inuse;
active_slabs++;
}
list_for_each_entry(slabp, &l3->slabs_free, list)
list_for_each_entry(slabp, &n->slabs_free, list)
num_slabs++;
free_objects += l3->free_objects;
spin_unlock_irqrestore(&l3->list_lock, flags);
free_objects += n->free_objects;
spin_unlock_irqrestore(&n->list_lock, flags);
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
......@@ -2154,7 +2154,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
if (slab_state == DOWN) {
/*
* Note: Creation of first cache (kmem_cache).
* The setup_list3s is taken care
* The setup_node is taken care
* of by the caller of __kmem_cache_create
*/
cachep->array[smp_processor_id()] = &initarray_generic.cache;
......@@ -2168,13 +2168,13 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
cachep->array[smp_processor_id()] = &initarray_generic.cache;
/*
* If the cache that's used by kmalloc(sizeof(kmem_list3)) is
* the second cache, then we need to set up all its list3s,
* If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
* the second cache, then we need to set up all its node/,
* otherwise the creation of further caches will BUG().
*/
set_up_list3s(cachep, SIZE_AC);
if (INDEX_AC == INDEX_L3)
slab_state = PARTIAL_L3;
set_up_node(cachep, SIZE_AC);
if (INDEX_AC == INDEX_NODE)
slab_state = PARTIAL_NODE;
else
slab_state = PARTIAL_ARRAYCACHE;
} else {
......@@ -2183,8 +2183,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
kmalloc(sizeof(struct arraycache_init), gfp);
if (slab_state == PARTIAL_ARRAYCACHE) {
set_up_list3s(cachep, SIZE_L3);
slab_state = PARTIAL_L3;
set_up_node(cachep, SIZE_NODE);
slab_state = PARTIAL_NODE;
} else {
int node;
for_each_online_node(node) {
......@@ -2192,7 +2192,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
kmalloc_node(sizeof(struct kmem_cache_node),
gfp, node);
BUG_ON(!cachep->node[node]);
kmem_list3_init(cachep->node[node]);
kmem_cache_node_init(cachep->node[node]);
}
}
}
......@@ -2322,7 +2322,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
size += BYTES_PER_WORD;
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= kmalloc_size(INDEX_L3 + 1)
if (size >= kmalloc_size(INDEX_NODE + 1)
&& cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
size = PAGE_SIZE;
......@@ -2457,7 +2457,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
#define check_spinlock_acquired_node(x, y) do { } while(0)
#endif
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3,
static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n,
struct array_cache *ac,
int force, int node);
......@@ -2477,21 +2477,21 @@ static void do_drain(void *arg)
static void drain_cpu_caches(struct kmem_cache *cachep)
{
struct kmem_cache_node *l3;
struct kmem_cache_node *n;
int node;
on_each_cpu(do_drain, cachep, 1);
check_irq_on();
for_each_online_node(node) {
l3 = cachep->node[node];
if (l3 && l3->alien)
drain_alien_cache(cachep, l3->alien);
n = cachep->node[node];
if (n && n->alien)
drain_alien_cache(cachep, n->alien);
}
for_each_online_node(node) {
l3 = cachep->node[node];
if (l3)
drain_array(cachep, l3, l3->shared, 1, node);
n = cachep->node[node];
if (n)
drain_array(cachep, n, n->shared, 1, node);
}
}
......@@ -2502,19 +2502,19 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
* Returns the actual number of slabs released.
*/
static int drain_freelist(struct kmem_cache *cache,
struct kmem_cache_node *l3, int tofree)
struct kmem_cache_node *n, int tofree)