Commit a737b3e2 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds
Browse files

[PATCH] slab cleanup



slab.c has become a bit revolting again.  Try to repair it.

- Coding style fixes

- Don't do assignments-in-if-statements.

- Don't typecast assignments to/from void*

Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f30cf7d1
......@@ -50,7 +50,7 @@
* The head array is strictly LIFO and should improve the cache hit rates.
* On SMP, it additionally reduces the spinlock operations.
*
* The c_cpuarray may not be read with enabled local interrupts -
* The c_cpuarray may not be read with enabled local interrupts -
* it's changed with a smp_call_function().
*
* SMP synchronization:
......@@ -266,16 +266,17 @@ struct array_cache {
unsigned int batchcount;
unsigned int touched;
spinlock_t lock;
void *entry[0]; /*
* Must have this definition in here for the proper
* alignment of array_cache. Also simplifies accessing
* the entries.
* [0] is for gcc 2.95. It should really be [].
*/
void *entry[0]; /*
* Must have this definition in here for the proper
* alignment of array_cache. Also simplifies accessing
* the entries.
* [0] is for gcc 2.95. It should really be [].
*/
};
/* bootstrap: The caches do not work without cpuarrays anymore,
* but the cpuarrays are allocated from the generic caches...
/*
* bootstrap: The caches do not work without cpuarrays anymore, but the
* cpuarrays are allocated from the generic caches...
*/
#define BOOT_CPUCACHE_ENTRIES 1
struct arraycache_init {
......@@ -310,10 +311,8 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define SIZE_L3 (1 + MAX_NUMNODES)
/*
* This function must be completely optimized away if
* a constant is passed to it. Mostly the same as
* what is in linux/slab.h except it returns an
* index.
* This function must be completely optimized away if a constant is passed to
* it. Mostly the same as what is in linux/slab.h except it returns an index.
*/
static __always_inline int index_of(const size_t size)
{
......@@ -351,14 +350,14 @@ static void kmem_list3_init(struct kmem_list3 *parent)
parent->free_touched = 0;
}
#define MAKE_LIST(cachep, listp, slab, nodeid) \
do { \
INIT_LIST_HEAD(listp); \
list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
#define MAKE_LIST(cachep, listp, slab, nodeid) \
do { \
INIT_LIST_HEAD(listp); \
list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
} while (0)
#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
do { \
#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
do { \
MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
......@@ -379,8 +378,8 @@ struct kmem_cache {
unsigned int buffer_size;
/* 2) touched by every alloc & free from the backend */
struct kmem_list3 *nodelists[MAX_NUMNODES];
unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */
unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */
spinlock_t spinlock;
/* 3) cache_grow/shrink */
......@@ -390,11 +389,11 @@ struct kmem_cache {
/* force GFP flags, e.g. GFP_DMA */
gfp_t gfpflags;
size_t colour; /* cache colouring range */
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
struct kmem_cache *slabp_cache;
unsigned int slab_size;
unsigned int dflags; /* dynamic flags */
unsigned int dflags; /* dynamic flags */
/* constructor func */
void (*ctor) (void *, struct kmem_cache *, unsigned long);
......@@ -438,8 +437,9 @@ struct kmem_cache {
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
#define BATCHREFILL_LIMIT 16
/* Optimization question: fewer reaps means less
* probability for unnessary cpucache drain/refill cycles.
/*
* Optimization question: fewer reaps means less probability for unnessary
* cpucache drain/refill cycles.
*
* OTOH the cpuarrays can contain lots of objects,
* which could lock up otherwise freeable slabs.
......@@ -453,17 +453,19 @@ struct kmem_cache {
#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
#define STATS_INC_GROWN(x) ((x)->grown++)
#define STATS_INC_REAPED(x) ((x)->reaped++)
#define STATS_SET_HIGH(x) do { if ((x)->num_active > (x)->high_mark) \
(x)->high_mark = (x)->num_active; \
} while (0)
#define STATS_SET_HIGH(x) \
do { \
if ((x)->num_active > (x)->high_mark) \
(x)->high_mark = (x)->num_active; \
} while (0)
#define STATS_INC_ERR(x) ((x)->errors++)
#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
#define STATS_SET_FREEABLE(x, i) \
do { if ((x)->max_freeable < i) \
(x)->max_freeable = i; \
} while (0)
#define STATS_SET_FREEABLE(x, i) \
do { \
if ((x)->max_freeable < i) \
(x)->max_freeable = i; \
} while (0)
#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
......@@ -478,9 +480,7 @@ struct kmem_cache {
#define STATS_INC_ERR(x) do { } while (0)
#define STATS_INC_NODEALLOCS(x) do { } while (0)
#define STATS_INC_NODEFREES(x) do { } while (0)
#define STATS_SET_FREEABLE(x, i) \
do { } while (0)
#define STATS_SET_FREEABLE(x, i) do { } while (0)
#define STATS_INC_ALLOCHIT(x) do { } while (0)
#define STATS_INC_ALLOCMISS(x) do { } while (0)
#define STATS_INC_FREEHIT(x) do { } while (0)
......@@ -488,7 +488,8 @@ struct kmem_cache {
#endif
#if DEBUG
/* Magic nums for obj red zoning.
/*
* Magic nums for obj red zoning.
* Placed in the first word before and the first word after an obj.
*/
#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
......@@ -499,7 +500,8 @@ struct kmem_cache {
#define POISON_FREE 0x6b /* for use-after-free poisoning */
#define POISON_END 0xa5 /* end-byte of poisoning */
/* memory layout of objects:
/*
* memory layout of objects:
* 0 : objp
* 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
* the end of an object is aligned with the end of the real
......@@ -508,7 +510,8 @@ struct kmem_cache {
* redzone word.
* cachep->obj_offset: The real object.
* cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
* cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
* cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
* [BYTES_PER_WORD long]
*/
static int obj_offset(struct kmem_cache *cachep)
{
......@@ -552,8 +555,8 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
/*
* Maximum size of an obj (in 2^order pages)
* and absolute limit for the gfp order.
* Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
* order.
*/
#if defined(CONFIG_LARGE_ALLOCS)
#define MAX_OBJ_ORDER 13 /* up to 32Mb */
......@@ -573,9 +576,10 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#define BREAK_GFP_ORDER_LO 0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
/* Functions for storing/retrieving the cachep and or slab from the
* global 'mem_map'. These are used to find the slab an obj belongs to.
* With kfree(), these are used to find the cache which an obj belongs to.
/*
* Functions for storing/retrieving the cachep and or slab from the page
* allocator. These are used to find the slab an obj belongs to. With kfree(),
* these are used to find the cache which an obj belongs to.
*/
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
......@@ -621,7 +625,9 @@ static inline unsigned int obj_to_index(struct kmem_cache *cache,
return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
}
/* These are the default caches for kmalloc. Custom caches can have other sizes. */
/*
* These are the default caches for kmalloc. Custom caches can have other sizes.
*/
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
......@@ -667,8 +673,8 @@ static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
/*
* vm_enough_memory() looks at this to determine how many
* slab-allocated pages are possibly freeable under pressure
* vm_enough_memory() looks at this to determine how many slab-allocated pages
* are possibly freeable under pressure
*
* SLAB_RECLAIM_ACCOUNT turns this on per-slab
*/
......@@ -687,7 +693,8 @@ static enum {
static DEFINE_PER_CPU(struct work_struct, reap_work);
static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static void enable_cpucache(struct kmem_cache *cachep);
static void cache_reap(void *unused);
static int __node_shrink(struct kmem_cache *cachep, int node);
......@@ -697,7 +704,8 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
return cachep->array[smp_processor_id()];
}
static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
static inline struct kmem_cache *__find_general_cachep(size_t size,
gfp_t gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;
......@@ -732,8 +740,9 @@ static size_t slab_mgmt_size(size_t nr_objs, size_t align)
return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
/* Calculate the number of objects and left-over bytes for a given
buffer size. */
/*
* Calculate the number of objects and left-over bytes for a given buffer size.
*/
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
size_t align, int flags, size_t *left_over,
unsigned int *num)
......@@ -794,7 +803,8 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
static void __slab_error(const char *function, struct kmem_cache *cachep,
char *msg)
{
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg);
......@@ -918,10 +928,8 @@ static void free_alien_cache(struct array_cache **ac_ptr)
if (!ac_ptr)
return;
for_each_node(i)
kfree(ac_ptr[i]);
kfree(ac_ptr);
}
......@@ -955,7 +963,8 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
}
}
static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
static void drain_alien_cache(struct kmem_cache *cachep,
struct array_cache **alien)
{
int i = 0;
struct array_cache *ac;
......@@ -998,20 +1007,22 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
mutex_lock(&cache_chain_mutex);
/* we need to do this right in the beginning since
/*
* We need to do this right in the beginning since
* alloc_arraycache's are going to use this list.
* kmalloc_node allows us to add the slab to the right
* kmem_list3 and not this cpu's kmem_list3
*/
list_for_each_entry(cachep, &cache_chain, next) {
/* setup the size64 kmemlist for cpu before we can
/*
* Set up the size64 kmemlist for cpu before we can
* begin anything. Make sure some other cpu on this
* node has not already allocated this
*/
if (!cachep->nodelists[node]) {
if (!(l3 = kmalloc_node(memsize,
GFP_KERNEL, node)))
l3 = kmalloc_node(memsize, GFP_KERNEL, node);
if (!l3)
goto bad;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
......@@ -1027,13 +1038,15 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
spin_lock_irq(&cachep->nodelists[node]->list_lock);
cachep->nodelists[node]->free_limit =
(1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
(1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
spin_unlock_irq(&cachep->nodelists[node]->list_lock);
}
/* Now we can go ahead with allocating the shared array's
& array cache's */
/*
* Now we can go ahead with allocating the shared arrays and
* array caches
*/
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
......@@ -1053,7 +1066,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
if (!alien)
goto bad;
cachep->array[cpu] = nc;
l3 = cachep->nodelists[node];
BUG_ON(!l3);
......@@ -1073,7 +1085,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
}
#endif
spin_unlock_irq(&l3->list_lock);
kfree(shared);
free_alien_cache(alien);
}
......@@ -1095,7 +1106,6 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
/* fall thru */
case CPU_UP_CANCELED:
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
......@@ -1162,7 +1172,7 @@ free_array_cache:
#endif
}
return NOTIFY_OK;
bad:
bad:
mutex_unlock(&cache_chain_mutex);
return NOTIFY_BAD;
}
......@@ -1172,7 +1182,8 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/*
* swap the static kmem_list3 with kmalloced memory
*/
static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
int nodeid)
{
struct kmem_list3 *ptr;
......@@ -1187,8 +1198,9 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int no
local_irq_enable();
}
/* Initialisation.
* Called after the gfp() functions have been enabled, and before smp_init().
/*
* Initialisation. Called after the page allocator have been initialised and
* before smp_init().
*/
void __init kmem_cache_init(void)
{
......@@ -1213,9 +1225,9 @@ void __init kmem_cache_init(void)
/* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet:
* 1) initialize the cache_cache cache: it contains the struct kmem_cache
* structures of all caches, except cache_cache itself: cache_cache
* is statically allocated.
* 1) initialize the cache_cache cache: it contains the struct
* kmem_cache structures of all caches, except cache_cache itself:
* cache_cache is statically allocated.
* Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap.
......@@ -1238,7 +1250,8 @@ void __init kmem_cache_init(void)
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
cache_line_size());
for (order = 0; order < MAX_ORDER; order++) {
cache_estimate(order, cache_cache.buffer_size,
......@@ -1257,24 +1270,26 @@ void __init kmem_cache_init(void)
sizes = malloc_sizes;
names = cache_names;
/* Initialize the caches that provide memory for the array cache
* and the kmem_list3 structures first.
* Without this, further allocations will bug
/*
* Initialize the caches that provide memory for the array cache and the
* kmem_list3 structures first. Without this, further allocations will
* bug.
*/
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS |
SLAB_PANIC), NULL, NULL);
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
if (INDEX_AC != INDEX_L3)
if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep =
kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
NULL);
kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
}
while (sizes->cs_size != ULONG_MAX) {
/*
......@@ -1284,13 +1299,13 @@ void __init kmem_cache_init(void)
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches.
*/
if (!sizes->cs_cachep)
if (!sizes->cs_cachep) {
sizes->cs_cachep = kmem_cache_create(names->name,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS
| SLAB_PANIC),
NULL, NULL);
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL, NULL);
}
/* Inc off-slab bufctl limit until the ceiling is hit. */
if (!(OFF_SLAB(sizes->cs_cachep))) {
......@@ -1299,13 +1314,11 @@ void __init kmem_cache_init(void)
}
sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
(ARCH_KMALLOC_FLAGS |
SLAB_CACHE_DMA |
SLAB_PANIC), NULL,
NULL);
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
NULL, NULL);
sizes++;
names++;
}
......@@ -1357,20 +1370,22 @@ void __init kmem_cache_init(void)
struct kmem_cache *cachep;
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep);
enable_cpucache(cachep);
mutex_unlock(&cache_chain_mutex);
}
/* Done! */
g_cpucache_up = FULL;
/* Register a cpu startup notifier callback
* that initializes cpu_cache_get for all new cpus
/*
* Register a cpu startup notifier callback that initializes
* cpu_cache_get for all new cpus
*/
register_cpu_notifier(&cpucache_notifier);
/* The reap timers are started later, with a module init call:
* That part of the kernel is not yet operational.
/*
* The reap timers are started later, with a module init call: That part
* of the kernel is not yet operational.
*/
}
......@@ -1378,16 +1393,13 @@ static int __init cpucache_init(void)
{
int cpu;
/*
* Register the timers that return unneeded
* pages to gfp.
/*
* Register the timers that return unneeded pages to the page allocator
*/
for_each_online_cpu(cpu)
start_cpu_timer(cpu);
start_cpu_timer(cpu);
return 0;
}
__initcall(cpucache_init);
/*
......@@ -1501,9 +1513,8 @@ static void dump_line(char *data, int offset, int limit)
{
int i;
printk(KERN_ERR "%03x:", offset);
for (i = 0; i < limit; i++) {
for (i = 0; i < limit; i++)
printk(" %02x", (unsigned char)data[offset + i]);
}
printk("\n");
}
#endif
......@@ -1517,15 +1528,15 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
if (cachep->flags & SLAB_RED_ZONE) {
printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
*dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
*dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
if (cachep->flags & SLAB_STORE_USER) {
printk(KERN_ERR "Last user: [<%p>]",
*dbg_userword(cachep, objp));
*dbg_userword(cachep, objp));
print_symbol("(%s)",
(unsigned long)*dbg_userword(cachep, objp));
(unsigned long)*dbg_userword(cachep, objp));
printk("\n");
}
realobj = (char *)objp + obj_offset(cachep);
......@@ -1558,8 +1569,8 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp)
/* Print header */
if (lines == 0) {
printk(KERN_ERR
"Slab corruption: start=%p, len=%d\n",
realobj, size);
"Slab corruption: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 0);
}
/* Hexdump the affected line */
......@@ -1614,11 +1625,10 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->buffer_size % PAGE_SIZE) == 0
&& OFF_SLAB(cachep))
if (cachep->buffer_size % PAGE_SIZE == 0 &&
OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
cachep->buffer_size / PAGE_SIZE,
1);
cachep->buffer_size / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
......@@ -1650,10 +1660,10 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
}
#endif
/**
/*
* Destroy all the objs in a slab, and release the mem back to the system.
* Before calling the slab must have been unlinked from the cache.
* The cache-lock is not held/needed.
* Before calling the slab must have been unlinked from the cache. The
* cache-lock is not held/needed.
*/
static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
{
......@@ -1674,8 +1684,10 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
}
}
/* For setting up all the kmem_list3s for cache whose buffer_size is same
as size of kmem_list3. */
/*
* For setting up all the kmem_list3s for cache whose buffer_size is same as
* size of kmem_list3.
*/
static void set_up_list3s(struct kmem_cache *cachep, int index)
{
int node;
......@@ -1701,13 +1713,13 @@ static void set_up_list3s(struct kmem_cache *cachep, int index)
* high order pages for slabs. When the gfp() functions are more friendly
* towards high-order requests, this should be changed.
*/
static inline size_t calculate_slab_order(struct kmem_cache *cachep,
static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t size, size_t align, unsigned long flags)
{
size_t left_over = 0;
int gfporder;
for (gfporder = 0 ; gfporder <= MAX_GFP_ORDER; gfporder++) {
for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
unsigned int num;
size_t remainder;
......@@ -1742,7 +1754,7 @@ static inline size_t calculate_slab_order(struct kmem_cache *cachep,
/*
* Acceptable internal fragmentation?
*/
if ((left_over * 8) <= (PAGE_SIZE << gfporder))
if (left_over * 8 <= (PAGE_SIZE << gfporder))
break;
}
return left_over;
......@@ -1817,9 +1829,8 @@ static void setup_cpu_cache(struct kmem_cache *cachep)
* and the @dtor is run before the pages are handed back.
*
* @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting
* unloaded.
*
* the module calling this has to destroy the cache before getting unloaded.
*
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
......@@ -1837,7 +1848,8 @@ static void setup_cpu_cache(struct kmem_cache *cachep)
*/
struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
unsigned long flags,
void (*ctor)(void*, struct kmem_cache *, unsigned long),
void (*dtor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
......@@ -1847,12 +1859,10 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/*
* Sanity checks... these are all serious usage bugs.
*/
if ((!name) ||
in_interrupt() ||
(size < BYT