slab.h 13.3 KB
Newer Older
1
2
3
4
5
6
#ifndef MM_SLAB_H
#define MM_SLAB_H
/*
 * Internal slab definitions
 */

7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
#ifdef CONFIG_SLOB
/*
 * Common fields provided in kmem_cache by all slab allocators
 * This struct is either used directly by the allocator (SLOB)
 * or the allocator must include definitions for all fields
 * provided in kmem_cache_common in their definition of kmem_cache.
 *
 * Once we can do anonymous structs (C11 standard) we could put a
 * anonymous struct definition in these allocators so that the
 * separate allocations in the kmem_cache structure of SLAB and
 * SLUB is no longer needed.
 */
struct kmem_cache {
	unsigned int object_size;/* The original size of the object */
	unsigned int size;	/* The aligned/padded/added on size  */
	unsigned int align;	/* Alignment as calculated */
	unsigned long flags;	/* Active flags on the slab */
	const char *name;	/* Slab name for sysfs */
	int refcount;		/* Use counter */
	void (*ctor)(void *);	/* Called on object slot creation */
	struct list_head list;	/* List of all slab caches on the system */
};

#endif /* CONFIG_SLOB */

#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#endif

#ifdef CONFIG_SLUB
#include <linux/slub_def.h>
#endif

#include <linux/memcontrol.h>
41
42
43
44
#include <linux/fault-inject.h>
#include <linux/kmemcheck.h>
#include <linux/kasan.h>
#include <linux/kmemleak.h>
45
#include <linux/random.h>
46

47
48
49
50
51
52
53
54
55
56
57
/*
 * State of the slab allocator.
 *
 * This is used to describe the states of the allocator during bootup.
 * Allocators use this to gradually bootstrap themselves. Most allocators
 * have the problem that the structures used for managing slab caches are
 * allocated from slab caches themselves.
 */
enum slab_state {
	DOWN,			/* No slab functionality yet */
	PARTIAL,		/* SLUB: kmem_cache_node available */
58
	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
59
60
61
62
63
64
	UP,			/* Slab caches usable but not all extras yet */
	FULL			/* Everything is working */
};

extern enum slab_state slab_state;

65
66
/* The slab cache mutex protects the management structures during changes */
extern struct mutex slab_mutex;
67
68

/* The list of all slab caches on the system */
69
70
extern struct list_head slab_caches;

71
72
73
/* The slab cache that manages slab cache information */
extern struct kmem_cache *kmem_cache;

74
75
76
unsigned long calculate_alignment(unsigned long flags,
		unsigned long align, unsigned long size);

77
78
#ifndef CONFIG_SLOB
/* Kmalloc array related functions */
79
void setup_kmalloc_cache_index_table(void);
80
void create_kmalloc_caches(unsigned long);
81
82
83

/* Find the kmalloc slab corresponding for a certain size */
struct kmem_cache *kmalloc_slab(size_t, gfp_t);
84
85
86
#endif


87
/* Functions provided by the slab allocators */
88
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
89

90
91
92
93
94
extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
			unsigned long flags);
extern void create_boot_cache(struct kmem_cache *, const char *name,
			size_t size, unsigned long flags);

95
96
97
int slab_unmergeable(struct kmem_cache *s);
struct kmem_cache *find_mergeable(size_t size, size_t align,
		unsigned long flags, const char *name, void (*ctor)(void *));
Joonsoo Kim's avatar
Joonsoo Kim committed
98
#ifndef CONFIG_SLOB
99
struct kmem_cache *
100
101
__kmem_cache_alias(const char *name, size_t size, size_t align,
		   unsigned long flags, void (*ctor)(void *));
102
103
104
105

unsigned long kmem_cache_flags(unsigned long object_size,
	unsigned long flags, const char *name,
	void (*ctor)(void *));
106
#else
107
static inline struct kmem_cache *
108
109
__kmem_cache_alias(const char *name, size_t size, size_t align,
		   unsigned long flags, void (*ctor)(void *))
110
{ return NULL; }
111
112
113
114
115
116
117

static inline unsigned long kmem_cache_flags(unsigned long object_size,
	unsigned long flags, const char *name,
	void (*ctor)(void *))
{
	return flags;
}
118
119
120
#endif


121
122
123
124
125
126
127
128
/* Legal flag mask for kmem_cache_create(), for various configurations */
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )

#if defined(CONFIG_DEBUG_SLAB)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
#elif defined(CONFIG_SLUB_DEBUG)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
129
			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
130
131
132
133
134
135
#else
#define SLAB_DEBUG_FLAGS (0)
#endif

#if defined(CONFIG_SLAB)
#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
Vladimir Davydov's avatar
Vladimir Davydov committed
136
137
			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
			  SLAB_NOTRACK | SLAB_ACCOUNT)
138
139
#elif defined(CONFIG_SLUB)
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
Vladimir Davydov's avatar
Vladimir Davydov committed
140
			  SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
141
142
143
144
145
146
#else
#define SLAB_CACHE_FLAGS (0)
#endif

#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)

147
int __kmem_cache_shutdown(struct kmem_cache *);
148
void __kmem_cache_release(struct kmem_cache *);
149
int __kmem_cache_shrink(struct kmem_cache *, bool);
150
void slab_kmem_cache_release(struct kmem_cache *);
151

152
153
154
struct seq_file;
struct file;

155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
struct slabinfo {
	unsigned long active_objs;
	unsigned long num_objs;
	unsigned long active_slabs;
	unsigned long num_slabs;
	unsigned long shared_avail;
	unsigned int limit;
	unsigned int batchcount;
	unsigned int shared;
	unsigned int objects_per_slab;
	unsigned int cache_order;
};

void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
170
171
ssize_t slabinfo_write(struct file *file, const char __user *buffer,
		       size_t count, loff_t *ppos);
Glauber Costa's avatar
Glauber Costa committed
172

173
174
175
/*
 * Generic implementation of bulk operations
 * These are useful for situations in which the allocator cannot
Jesper Dangaard Brouer's avatar
Jesper Dangaard Brouer committed
176
 * perform optimizations. In that case segments of the object listed
177
178
179
 * may be allocated or freed using these operations.
 */
void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
180
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
181

182
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
183
184
185
186
187
188
189
190
/*
 * Iterate over all memcg caches of the given root cache. The caller must hold
 * slab_mutex.
 */
#define for_each_memcg_cache(iter, root) \
	list_for_each_entry(iter, &(root)->memcg_params.list, \
			    memcg_params.list)

Glauber Costa's avatar
Glauber Costa committed
191
192
static inline bool is_root_cache(struct kmem_cache *s)
{
193
	return s->memcg_params.is_root_cache;
Glauber Costa's avatar
Glauber Costa committed
194
}
195

196
static inline bool slab_equal_or_root(struct kmem_cache *s,
197
				      struct kmem_cache *p)
198
{
199
	return p == s || p == s->memcg_params.root_cache;
200
}
201
202
203
204
205
206
207
208
209

/*
 * We use suffixes to the name in memcg because we can't have caches
 * created in the system with the same name. But when we print them
 * locally, better refer to them with the base name
 */
static inline const char *cache_name(struct kmem_cache *s)
{
	if (!is_root_cache(s))
210
		s = s->memcg_params.root_cache;
211
212
213
	return s->name;
}

214
215
/*
 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
216
217
 * That said the caller must assure the memcg's cache won't go away by either
 * taking a css reference to the owner cgroup, or holding the slab_mutex.
218
 */
219
220
static inline struct kmem_cache *
cache_from_memcg_idx(struct kmem_cache *s, int idx)
221
{
222
	struct kmem_cache *cachep;
223
	struct memcg_cache_array *arr;
224
225

	rcu_read_lock();
226
	arr = rcu_dereference(s->memcg_params.memcg_caches);
227
228
229
230

	/*
	 * Make sure we will access the up-to-date value. The code updating
	 * memcg_caches issues a write barrier to match this (see
231
	 * memcg_create_kmem_cache()).
232
	 */
233
	cachep = lockless_dereference(arr->entries[idx]);
234
235
	rcu_read_unlock();

236
	return cachep;
237
}
Glauber Costa's avatar
Glauber Costa committed
238
239
240
241
242

static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
	if (is_root_cache(s))
		return s;
243
	return s->memcg_params.root_cache;
Glauber Costa's avatar
Glauber Costa committed
244
}
245

246
247
248
static __always_inline int memcg_charge_slab(struct page *page,
					     gfp_t gfp, int order,
					     struct kmem_cache *s)
249
{
250
251
	int ret;

252
253
254
255
	if (!memcg_kmem_enabled())
		return 0;
	if (is_root_cache(s))
		return 0;
256

257
	ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
258
259
260
261
262
263
264
265
266
267
268
269
270
	if (ret)
		return ret;

	memcg_kmem_update_page_stat(page,
			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
			1 << order);
	return 0;
}

static __always_inline void memcg_uncharge_slab(struct page *page, int order,
						struct kmem_cache *s)
{
271
272
273
	if (!memcg_kmem_enabled())
		return;

274
275
276
277
278
	memcg_kmem_update_page_stat(page,
			(s->flags & SLAB_RECLAIM_ACCOUNT) ?
			MEMCG_SLAB_RECLAIMABLE : MEMCG_SLAB_UNRECLAIMABLE,
			-(1 << order));
	memcg_kmem_uncharge(page, order);
279
}
280
281
282

extern void slab_init_memcg_params(struct kmem_cache *);

283
#else /* CONFIG_MEMCG && !CONFIG_SLOB */
284

285
286
287
#define for_each_memcg_cache(iter, root) \
	for ((void)(iter), (void)(root); 0; )

Glauber Costa's avatar
Glauber Costa committed
288
289
290
291
292
static inline bool is_root_cache(struct kmem_cache *s)
{
	return true;
}

293
294
295
296
297
static inline bool slab_equal_or_root(struct kmem_cache *s,
				      struct kmem_cache *p)
{
	return true;
}
298
299
300
301
302
303

static inline const char *cache_name(struct kmem_cache *s)
{
	return s->name;
}

304
305
static inline struct kmem_cache *
cache_from_memcg_idx(struct kmem_cache *s, int idx)
306
307
308
{
	return NULL;
}
Glauber Costa's avatar
Glauber Costa committed
309
310
311
312
313

static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
{
	return s;
}
314

315
316
static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
				    struct kmem_cache *s)
317
318
319
320
{
	return 0;
}

321
322
323
324
325
static inline void memcg_uncharge_slab(struct page *page, int order,
				       struct kmem_cache *s)
{
}

326
327
328
static inline void slab_init_memcg_params(struct kmem_cache *s)
{
}
329
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
330
331
332
333
334
335
336
337
338
339
340
341
342

static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
	struct kmem_cache *cachep;
	struct page *page;

	/*
	 * When kmemcg is not being used, both assignments should return the
	 * same value. but we don't want to pay the assignment price in that
	 * case. If it is not compiled in, the compiler should be smart enough
	 * to not do even the assignment. In that case, slab_equal_or_root
	 * will also be a constant.
	 */
343
344
	if (!memcg_kmem_enabled() &&
	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
345
346
347
348
349
350
351
352
		return s;

	page = virt_to_head_page(x);
	cachep = page->slab_cache;
	if (slab_equal_or_root(cachep, s))
		return cachep;

	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
353
	       __func__, s->name, cachep->name);
354
355
356
	WARN_ON_ONCE(1);
	return s;
}
357

358
359
360
361
362
363
364
365
366
367
368
369
370
371
static inline size_t slab_ksize(const struct kmem_cache *s)
{
#ifndef CONFIG_SLUB
	return s->object_size;

#else /* CONFIG_SLUB */
# ifdef CONFIG_SLUB_DEBUG
	/*
	 * Debugging requires use of the padding between object
	 * and whatever may come after it.
	 */
	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
		return s->object_size;
# endif
372
373
	if (s->flags & SLAB_KASAN)
		return s->object_size;
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
	/*
	 * If we have the need to store the freelist pointer
	 * back there or track user information then we can
	 * only use the space before that information.
	 */
	if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
		return s->inuse;
	/*
	 * Else we can use all the padding etc for the allocation
	 */
	return s->size;
#endif
}

static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
						     gfp_t flags)
{
	flags &= gfp_allowed_mask;
	lockdep_trace_alloc(flags);
	might_sleep_if(gfpflags_allow_blocking(flags));

395
	if (should_failslab(s, flags))
396
397
		return NULL;

398
399
400
401
402
	if (memcg_kmem_enabled() &&
	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
		return memcg_kmem_get_cache(s);

	return s;
403
404
405
406
407
408
409
410
411
412
413
414
415
416
}

static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
					size_t size, void **p)
{
	size_t i;

	flags &= gfp_allowed_mask;
	for (i = 0; i < size; i++) {
		void *object = p[i];

		kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
		kmemleak_alloc_recursive(object, s->object_size, 1,
					 s->flags, flags);
417
		kasan_slab_alloc(s, object, flags);
418
	}
419
420
421

	if (memcg_kmem_enabled())
		memcg_kmem_put_cache(s);
422
423
}

424
#ifndef CONFIG_SLOB
425
426
427
428
429
430
431
432
433
434
435
436
437
438
/*
 * The slab lists for all objects.
 */
struct kmem_cache_node {
	spinlock_t list_lock;

#ifdef CONFIG_SLAB
	struct list_head slabs_partial;	/* partial list first, better asm code */
	struct list_head slabs_full;
	struct list_head slabs_free;
	unsigned long free_objects;
	unsigned int free_limit;
	unsigned int colour_next;	/* Per-node cache coloring */
	struct array_cache *shared;	/* shared per node */
Joonsoo Kim's avatar
Joonsoo Kim committed
439
	struct alien_cache **alien;	/* on other nodes */
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
	unsigned long next_reap;	/* updated without locking */
	int free_touched;		/* updated without locking */
#endif

#ifdef CONFIG_SLUB
	unsigned long nr_partial;
	struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
	atomic_long_t nr_slabs;
	atomic_long_t total_objects;
	struct list_head full;
#endif
#endif

};
455

456
457
458
459
460
461
462
463
464
465
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
	return s->node[node];
}

/*
 * Iterator over all nodes. The body will be executed for each node that has
 * a kmem_cache_node structure allocated (which is true for all online nodes)
 */
#define for_each_kmem_cache_node(__s, __node, __n) \
466
467
	for (__node = 0; __node < nr_node_ids; __node++) \
		 if ((__n = get_node(__s, __node)))
468
469
470

#endif

471
void *slab_start(struct seq_file *m, loff_t *pos);
472
473
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
void slab_stop(struct seq_file *m, void *p);
474
int memcg_slab_show(struct seq_file *m, void *p);
475

476
477
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);

478
479
480
481
482
483
484
485
486
487
488
489
490
#ifdef CONFIG_SLAB_FREELIST_RANDOM
int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
			gfp_t gfp);
void cache_random_seq_destroy(struct kmem_cache *cachep);
#else
static inline int cache_random_seq_create(struct kmem_cache *cachep,
					unsigned int count, gfp_t gfp)
{
	return 0;
}
static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
#endif /* CONFIG_SLAB_FREELIST_RANDOM */

491
#endif /* MM_SLAB_H */