mm.h 79.1 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
#ifndef _LINUX_MM_H
#define _LINUX_MM_H

#include <linux/errno.h>

#ifdef __KERNEL__

8
#include <linux/mmdebug.h>
Linus Torvalds's avatar
Linus Torvalds committed
9
#include <linux/gfp.h>
10
#include <linux/bug.h>
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
14
#include <linux/atomic.h>
15
#include <linux/debug_locks.h>
16
#include <linux/mm_types.h>
17
#include <linux/range.h>
18
#include <linux/pfn.h>
19
#include <linux/percpu-refcount.h>
Andrea Arcangeli's avatar
Andrea Arcangeli committed
20
#include <linux/bit_spinlock.h>
21
#include <linux/shrinker.h>
22
#include <linux/resource.h>
23
#include <linux/page_ext.h>
24
#include <linux/err.h>
25
#include <linux/page_ref.h>
Linus Torvalds's avatar
Linus Torvalds committed
26
27
28

struct mempolicy;
struct anon_vma;
29
struct anon_vma_chain;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
30
struct file_ra_state;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
31
struct user_struct;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
32
struct writeback_control;
33
struct bdi_writeback;
Linus Torvalds's avatar
Linus Torvalds committed
34

35
36
void init_mm_internals(void);

37
#ifndef CONFIG_NEED_MULTIPLE_NODES	/* Don't use mapnrs, do it properly */
Linus Torvalds's avatar
Linus Torvalds committed
38
extern unsigned long max_mapnr;
39
40
41
42
43
44
45

static inline void set_max_mapnr(unsigned long limit)
{
	max_mapnr = limit;
}
#else
static inline void set_max_mapnr(unsigned long limit) { }
Linus Torvalds's avatar
Linus Torvalds committed
46
47
#endif

48
extern unsigned long totalram_pages;
Linus Torvalds's avatar
Linus Torvalds committed
49
50
51
52
53
54
55
56
57
extern void * high_memory;
extern int page_cluster;

#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
#define sysctl_legacy_va_layout 0
#endif

58
59
60
61
62
63
64
65
66
67
68
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
extern const int mmap_rnd_bits_min;
extern const int mmap_rnd_bits_max;
extern int mmap_rnd_bits __read_mostly;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
extern const int mmap_rnd_compat_bits_min;
extern const int mmap_rnd_compat_bits_max;
extern int mmap_rnd_compat_bits __read_mostly;
#endif

Linus Torvalds's avatar
Linus Torvalds committed
69
70
71
72
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>

73
74
75
76
#ifndef __pa_symbol
#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif

77
78
79
80
#ifndef page_to_virt
#define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
#endif

Laura Abbott's avatar
Laura Abbott committed
81
82
83
84
#ifndef lm_alias
#define lm_alias(x)	__va(__pa_symbol(x))
#endif

85
86
87
88
89
90
91
92
93
94
95
/*
 * To prevent common memory management code establishing
 * a zero page mapping on a read fault.
 * This macro should be defined within <asm/pgtable.h>.
 * s390 does this to prevent multiplexing of hardware bits
 * related to the physical page in case of virtualization.
 */
#ifndef mm_forbids_zeropage
#define mm_forbids_zeropage(X)	(0)
#endif

96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
/*
 * Default maximum number of active map areas, this limits the number of vmas
 * per mm struct. Users can overwrite this number by sysctl but there is a
 * problem.
 *
 * When a program's coredump is generated as ELF format, a section is created
 * per a vma. In ELF, the number of sections is represented in unsigned short.
 * This means the number of sections should be smaller than 65535 at coredump.
 * Because the kernel adds some informative sections to a image of program at
 * generating coredump, we need some margin. The number of extra sections is
 * 1-3 now and depends on arch. We use "5" as safe margin, here.
 *
 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
 * not a hard limit any more. Although some userspace tools can be surprised by
 * that.
 */
#define MAPCOUNT_ELF_CORE_MARGIN	(5)
#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)

extern int sysctl_max_map_count;

117
extern unsigned long sysctl_user_reserve_kbytes;
118
extern unsigned long sysctl_admin_reserve_kbytes;
119

120
121
122
123
124
125
126
127
128
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;

extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
				    size_t *, loff_t *);
extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
				    size_t *, loff_t *);

Linus Torvalds's avatar
Linus Torvalds committed
129
130
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))

131
132
133
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)

134
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
135
#define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
136

Linus Torvalds's avatar
Linus Torvalds committed
137
138
139
140
141
142
143
144
145
/*
 * Linux kernel virtual memory manager primitives.
 * The idea being to have a "virtual" mm in the same way
 * we have a virtual fs - giving a cleaner interface to the
 * mm details, and allowing different kinds of memory mappings
 * (from shared memory to executable loading to arbitrary
 * mmap() functions).
 */

146
147
extern struct kmem_cache *vm_area_cachep;

Linus Torvalds's avatar
Linus Torvalds committed
148
#ifndef CONFIG_MMU
149
150
extern struct rb_root nommu_region_tree;
extern struct rw_semaphore nommu_region_sem;
Linus Torvalds's avatar
Linus Torvalds committed
151
152
153
154
155

extern unsigned int kobjsize(const void *objp);
#endif

/*
Hugh Dickins's avatar
Hugh Dickins committed
156
 * vm_flags in vm_area_struct, see mm_types.h.
157
 * When changing, update also include/trace/events/mmflags.h
Linus Torvalds's avatar
Linus Torvalds committed
158
 */
159
160
#define VM_NONE		0x00000000

Linus Torvalds's avatar
Linus Torvalds committed
161
162
163
164
165
#define VM_READ		0x00000001	/* currently active flags */
#define VM_WRITE	0x00000002
#define VM_EXEC		0x00000004
#define VM_SHARED	0x00000008

166
/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
Linus Torvalds's avatar
Linus Torvalds committed
167
168
169
170
171
172
#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
#define VM_MAYWRITE	0x00000020
#define VM_MAYEXEC	0x00000040
#define VM_MAYSHARE	0x00000080

#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
173
#define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
174
#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
Linus Torvalds's avatar
Linus Torvalds committed
175
#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
176
#define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
Linus Torvalds's avatar
Linus Torvalds committed
177
178
179
180
181
182
183
184
185
186

#define VM_LOCKED	0x00002000
#define VM_IO           0x00004000	/* Memory mapped I/O or similar */

					/* Used by sys_madvise() */
#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */

#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
Eric B Munson's avatar
Eric B Munson committed
187
#define VM_LOCKONFAULT	0x00080000	/* Lock the pages covered when they are faulted in */
Linus Torvalds's avatar
Linus Torvalds committed
188
#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
189
#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
Linus Torvalds's avatar
Linus Torvalds committed
190
#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
191
#define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
192
#define VM_ARCH_2	0x02000000
193
#define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
194

195
196
197
198
199
200
#ifdef CONFIG_MEM_SOFT_DIRTY
# define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
#else
# define VM_SOFTDIRTY	0
#endif

Jared Hulbert's avatar
Jared Hulbert committed
201
#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
202
203
#define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
Hugh Dickins's avatar
Hugh Dickins committed
204
#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
Linus Torvalds's avatar
Linus Torvalds committed
205

206
207
208
209
210
211
212
213
214
215
216
#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
#define VM_HIGH_ARCH_BIT_0	32	/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_1	33	/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_2	34	/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3	35	/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0	BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1	BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2	BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3	BIT(VM_HIGH_ARCH_BIT_3)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */

217
218
#if defined(CONFIG_X86)
# define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
219
220
221
222
223
224
225
#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
# define VM_PKEY_SHIFT	VM_HIGH_ARCH_BIT_0
# define VM_PKEY_BIT0	VM_HIGH_ARCH_0	/* A protection key is a 4-bit value */
# define VM_PKEY_BIT1	VM_HIGH_ARCH_1
# define VM_PKEY_BIT2	VM_HIGH_ARCH_2
# define VM_PKEY_BIT3	VM_HIGH_ARCH_3
#endif
226
227
228
229
#elif defined(CONFIG_PPC)
# define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP	VM_ARCH_1
230
231
#elif defined(CONFIG_METAG)
# define VM_GROWSUP	VM_ARCH_1
232
233
234
235
236
237
#elif defined(CONFIG_IA64)
# define VM_GROWSUP	VM_ARCH_1
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
#endif

238
239
240
241
242
#if defined(CONFIG_X86)
/* MPX specific bounds table or bounds directory */
# define VM_MPX		VM_ARCH_2
#endif

243
244
245
246
#ifndef VM_GROWSUP
# define VM_GROWSUP	VM_NONE
#endif

247
248
249
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)

Linus Torvalds's avatar
Linus Torvalds committed
250
251
252
253
254
#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif

#ifdef CONFIG_STACK_GROWSUP
255
#define VM_STACK	VM_GROWSUP
Linus Torvalds's avatar
Linus Torvalds committed
256
#else
257
#define VM_STACK	VM_GROWSDOWN
Linus Torvalds's avatar
Linus Torvalds committed
258
259
#endif

260
261
#define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)

262
/*
263
264
 * Special vmas that are non-mergable, non-mlock()able.
 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
265
 */
266
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
267

268
269
270
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK	VM_NOHUGEPAGE

Eric B Munson's avatar
Eric B Munson committed
271
272
273
/* This mask is used to clear all the VMA flags used by mlock */
#define VM_LOCKED_CLEAR_MASK	(~(VM_LOCKED | VM_LOCKONFAULT))

Linus Torvalds's avatar
Linus Torvalds committed
274
275
276
277
278
279
/*
 * mapping from the currently active vm_flags protection bits (the
 * low four bits) to a page protection mask..
 */
extern pgprot_t protection_map[16];

Nick Piggin's avatar
Nick Piggin committed
280
#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
281
282
283
284
285
286
#define FAULT_FLAG_MKWRITE	0x02	/* Fault was mkwrite of existing pte */
#define FAULT_FLAG_ALLOW_RETRY	0x04	/* Retry fault if blocking */
#define FAULT_FLAG_RETRY_NOWAIT	0x08	/* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_KILLABLE	0x10	/* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED	0x20	/* Second try */
#define FAULT_FLAG_USER		0x40	/* The fault originated in userspace */
287
#define FAULT_FLAG_REMOTE	0x80	/* faulting for non current tsk/mm */
288
#define FAULT_FLAG_INSTRUCTION  0x100	/* The fault was during an instruction fetch */
Nick Piggin's avatar
Nick Piggin committed
289

290
291
292
293
294
295
296
297
298
299
300
#define FAULT_FLAG_TRACE \
	{ FAULT_FLAG_WRITE,		"WRITE" }, \
	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
	{ FAULT_FLAG_TRIED,		"TRIED" }, \
	{ FAULT_FLAG_USER,		"USER" }, \
	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }

301
/*
Nick Piggin's avatar
Nick Piggin committed
302
 * vm_fault is filled by the the pagefault handler and passed to the vma's
Nick Piggin's avatar
Nick Piggin committed
303
304
 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 * of VM_FAULT_xxx flags that give details about how the fault was handled.
305
 *
306
307
308
 * MM layer fills up gfp_mask for page allocations but fault handler might
 * alter it if its implementation requires a different allocation context.
 *
309
 * pgoff should be used in favour of virtual_address, if possible.
310
 */
Nick Piggin's avatar
Nick Piggin committed
311
struct vm_fault {
312
	struct vm_area_struct *vma;	/* Target VMA */
Nick Piggin's avatar
Nick Piggin committed
313
	unsigned int flags;		/* FAULT_FLAG_xxx flags */
314
	gfp_t gfp_mask;			/* gfp mask to be used for allocations */
Nick Piggin's avatar
Nick Piggin committed
315
	pgoff_t pgoff;			/* Logical page offset based on vma */
316
317
	unsigned long address;		/* Faulting virtual address */
	pmd_t *pmd;			/* Pointer to pmd entry matching
318
					 * the 'address' */
319
320
321
	pud_t *pud;			/* Pointer to pud entry matching
					 * the 'address'
					 */
322
	pte_t orig_pte;			/* Value of PTE at the time of fault */
Nick Piggin's avatar
Nick Piggin committed
323

324
325
	struct page *cow_page;		/* Page handler may use for COW fault */
	struct mem_cgroup *memcg;	/* Cgroup cow_page belongs to */
Nick Piggin's avatar
Nick Piggin committed
326
	struct page *page;		/* ->fault handlers should return a
Nick Piggin's avatar
Nick Piggin committed
327
					 * page here, unless VM_FAULT_NOPAGE
Nick Piggin's avatar
Nick Piggin committed
328
					 * is set (which is also implied by
Nick Piggin's avatar
Nick Piggin committed
329
					 * VM_FAULT_ERROR).
Nick Piggin's avatar
Nick Piggin committed
330
					 */
331
	/* These three entries are valid only while holding ptl lock */
Kirill A. Shutemov's avatar
Kirill A. Shutemov committed
332
333
334
335
336
337
338
339
	pte_t *pte;			/* Pointer to pte entry matching
					 * the 'address'. NULL if the page
					 * table hasn't been allocated.
					 */
	spinlock_t *ptl;		/* Page table lock.
					 * Protects pte page table if 'pte'
					 * is not NULL, otherwise pmd.
					 */
340
341
342
343
344
345
346
	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
					 * vm_ops->map_pages() calls
					 * alloc_set_pte() from atomic context.
					 * do_fault_around() pre-allocates
					 * page table to avoid allocation from
					 * atomic context.
					 */
347
};
Linus Torvalds's avatar
Linus Torvalds committed
348

349
350
351
352
353
354
355
/* page entry size for vm->huge_fault() */
enum page_entry_size {
	PE_SIZE_PTE = 0,
	PE_SIZE_PMD,
	PE_SIZE_PUD,
};

Linus Torvalds's avatar
Linus Torvalds committed
356
357
358
359
360
361
362
363
/*
 * These are the virtual MM functions - opening of an area, closing and
 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 * to the functions called when a no-page or a wp-page exception occurs. 
 */
struct vm_operations_struct {
	void (*open)(struct vm_area_struct * area);
	void (*close)(struct vm_area_struct * area);
364
	int (*mremap)(struct vm_area_struct * area);
365
	int (*fault)(struct vm_fault *vmf);
366
	int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
367
	void (*map_pages)(struct vm_fault *vmf,
Kirill A. Shutemov's avatar
Kirill A. Shutemov committed
368
			pgoff_t start_pgoff, pgoff_t end_pgoff);
369
370
371

	/* notification that a previously read-only page is about to become
	 * writable, if an error is returned it will cause a SIGBUS */
372
	int (*page_mkwrite)(struct vm_fault *vmf);
373

374
	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
375
	int (*pfn_mkwrite)(struct vm_fault *vmf);
376

377
378
379
380
381
	/* called by access_process_vm when get_user_pages() fails, typically
	 * for use by special VMAs that can switch between memory and hardware
	 */
	int (*access)(struct vm_area_struct *vma, unsigned long addr,
		      void *buf, int len, int write);
382
383
384
385
386
387

	/* Called by the /proc/PID/maps code to ask the vma whether it
	 * has a special name.  Returning non-NULL will also cause this
	 * vma to be dumped unconditionally. */
	const char *(*name)(struct vm_area_struct *vma);

Linus Torvalds's avatar
Linus Torvalds committed
388
#ifdef CONFIG_NUMA
389
390
391
392
393
394
395
	/*
	 * set_policy() op must add a reference to any non-NULL @new mempolicy
	 * to hold the policy upon return.  Caller should pass NULL @new to
	 * remove a policy and fall back to surrounding context--i.e. do not
	 * install a MPOL_DEFAULT policy, nor the task or system default
	 * mempolicy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
396
	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
397
398
399
400
401
402
403
404
405
406
407

	/*
	 * get_policy() op must add reference [mpol_get()] to any policy at
	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
	 * in mm/mempolicy.c will do this automatically.
	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
	 * must return NULL--i.e., do not "fallback" to task or system default
	 * policy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
408
409
410
	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
					unsigned long addr);
#endif
411
412
413
414
415
416
417
	/*
	 * Called by vm_normal_page() for special PTEs to find the
	 * page for @addr.  This is useful if the default behavior
	 * (using pte_page()) would not find the correct page.
	 */
	struct page *(*find_special_page)(struct vm_area_struct *vma,
					  unsigned long addr);
Linus Torvalds's avatar
Linus Torvalds committed
418
419
420
421
422
};

struct mmu_gather;
struct inode;

Andrew Morton's avatar
Andrew Morton committed
423
424
#define page_private(page)		((page)->private)
#define set_page_private(page, v)	((page)->private = (v))
425

426
427
428
429
430
#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd)
{
	return 0;
}
431
432
433
434
static inline int pud_devmap(pud_t pud)
{
	return 0;
}
435
436
#endif

Linus Torvalds's avatar
Linus Torvalds committed
437
438
439
440
441
/*
 * FIXME: take this include out, include page-flags.h in
 * files which need it (119 of them)
 */
#include <linux/page-flags.h>
442
#include <linux/huge_mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457

/*
 * Methods to modify the page usage count.
 *
 * What counts for a page usage:
 * - cache mapping   (page->mapping)
 * - private data    (page->private)
 * - page mapped in a task's page tables, each mapping
 *   is counted separately
 *
 * Also, many kernel routines increase the page count before a critical
 * routine so they can be sure the page doesn't go away from under them.
 */

/*
Nick Piggin's avatar
Nick Piggin committed
458
 * Drop a ref, return true if the refcount fell to zero (the page has no users)
Linus Torvalds's avatar
Linus Torvalds committed
459
 */
460
461
static inline int put_page_testzero(struct page *page)
{
462
463
	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
	return page_ref_dec_and_test(page);
464
}
Linus Torvalds's avatar
Linus Torvalds committed
465
466

/*
467
468
 * Try to grab a ref unless the page has a refcount of zero, return false if
 * that is the case.
469
470
 * This can be called when MMU is off so it must not access
 * any of the virtual mappings.
Linus Torvalds's avatar
Linus Torvalds committed
471
 */
472
473
static inline int get_page_unless_zero(struct page *page)
{
474
	return page_ref_add_unless(page, 1, 0);
475
}
Linus Torvalds's avatar
Linus Torvalds committed
476

477
extern int page_is_ram(unsigned long pfn);
478
479
480
481
482
483
484

enum {
	REGION_INTERSECTS,
	REGION_DISJOINT,
	REGION_MIXED,
};

485
486
int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
		      unsigned long desc);
487

488
/* Support for virtually mapped pages */
489
490
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
491

492
493
494
495
496
497
/*
 * Determine if an address is within the vmalloc range
 *
 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 * is no special casing required.
 */
498
static inline bool is_vmalloc_addr(const void *x)
499
{
500
#ifdef CONFIG_MMU
501
502
503
	unsigned long addr = (unsigned long)x;

	return addr >= VMALLOC_START && addr < VMALLOC_END;
504
#else
505
	return false;
506
#endif
507
}
508
509
510
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
511
static inline int is_vmalloc_or_module_addr(const void *x)
512
513
514
515
{
	return 0;
}
#endif
516

Al Viro's avatar
Al Viro committed
517
518
extern void kvfree(const void *addr);

519
520
521
522
523
524
525
static inline atomic_t *compound_mapcount_ptr(struct page *page)
{
	return &page[1].compound_mapcount;
}

static inline int compound_mapcount(struct page *page)
{
526
	VM_BUG_ON_PAGE(!PageCompound(page), page);
527
528
529
530
	page = compound_head(page);
	return atomic_read(compound_mapcount_ptr(page)) + 1;
}

531
532
533
534
535
/*
 * The atomic page->_mapcount, starts from -1: so that transitions
 * both from it and to it can be tracked, using atomic_inc_and_test
 * and atomic_add_negative(-1).
 */
536
static inline void page_mapcount_reset(struct page *page)
537
538
539
540
{
	atomic_set(&(page)->_mapcount, -1);
}

541
542
int __page_mapcount(struct page *page);

543
544
static inline int page_mapcount(struct page *page)
{
545
	VM_BUG_ON_PAGE(PageSlab(page), page);
546

547
548
549
550
551
552
553
	if (unlikely(PageCompound(page)))
		return __page_mapcount(page);
	return atomic_read(&page->_mapcount) + 1;
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int total_mapcount(struct page *page);
554
int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
555
556
557
558
#else
static inline int total_mapcount(struct page *page)
{
	return page_mapcount(page);
559
}
560
561
562
563
564
565
566
567
static inline int page_trans_huge_mapcount(struct page *page,
					   int *total_mapcount)
{
	int mapcount = page_mapcount(page);
	if (total_mapcount)
		*total_mapcount = mapcount;
	return mapcount;
}
568
#endif
569

570
571
572
static inline struct page *virt_to_head_page(const void *x)
{
	struct page *page = virt_to_page(x);
573

574
	return compound_head(page);
575
576
}

577
578
void __put_page(struct page *page);

579
void put_pages_list(struct list_head *pages);
Linus Torvalds's avatar
Linus Torvalds committed
580

581
582
void split_page(struct page *page, unsigned int order);

583
584
585
/*
 * Compound pages have a destructor function.  Provide a
 * prototype for that function and accessor functions.
586
 * These are _only_ valid on the head of a compound page.
587
 */
588
589
590
591
592
593
594
595
typedef void compound_page_dtor(struct page *);

/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
enum compound_dtor_id {
	NULL_COMPOUND_DTOR,
	COMPOUND_PAGE_DTOR,
#ifdef CONFIG_HUGETLB_PAGE
	HUGETLB_PAGE_DTOR,
596
597
598
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	TRANSHUGE_PAGE_DTOR,
599
600
601
602
#endif
	NR_COMPOUND_DTORS,
};
extern compound_page_dtor * const compound_page_dtors[];
603
604

static inline void set_compound_page_dtor(struct page *page,
605
		enum compound_dtor_id compound_dtor)
606
{
607
608
	VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
	page[1].compound_dtor = compound_dtor;
609
610
611
612
}

static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
613
614
	VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
	return compound_page_dtors[page[1].compound_dtor];
615
616
}

617
static inline unsigned int compound_order(struct page *page)
618
{
619
	if (!PageHead(page))
620
		return 0;
621
	return page[1].compound_order;
622
623
}

624
static inline void set_compound_order(struct page *page, unsigned int order)
625
{
626
	page[1].compound_order = order;
627
628
}

629
630
void free_compound_page(struct page *page);

631
#ifdef CONFIG_MMU
Andrea Arcangeli's avatar
Andrea Arcangeli committed
632
633
634
635
636
637
638
639
640
641
642
643
/*
 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 * servicing faults for write access.  In the normal case, do always want
 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 * that do not have writing enabled, when used by access_process_vm.
 */
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
	if (likely(vma->vm_flags & VM_WRITE))
		pte = pte_mkwrite(pte);
	return pte;
}
644

645
int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
646
		struct page *page);
647
int finish_fault(struct vm_fault *vmf);
648
int finish_mkwrite_fault(struct vm_fault *vmf);
649
#endif
Andrea Arcangeli's avatar
Andrea Arcangeli committed
650

Linus Torvalds's avatar
Linus Torvalds committed
651
652
653
654
655
656
657
/*
 * Multiple processes may "see" the same page. E.g. for untouched
 * mappings of /dev/null, all processes see the same page full of
 * zeroes, and text pages of executables and shared libraries have
 * only one copy in memory, at most, normally.
 *
 * For the non-reserved pages, page_count(page) denotes a reference count.
658
659
 *   page_count() == 0 means the page is free. page->lru is then used for
 *   freelist management in the buddy allocator.
Nick Piggin's avatar
Nick Piggin committed
660
 *   page_count() > 0  means the page has been allocated.
Linus Torvalds's avatar
Linus Torvalds committed
661
 *
Nick Piggin's avatar
Nick Piggin committed
662
663
664
665
666
 * Pages are allocated by the slab allocator in order to provide memory
 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 * unless a particular usage is carefully commented. (the responsibility of
 * freeing the kmalloc memory is the caller's, of course).
Linus Torvalds's avatar
Linus Torvalds committed
667
 *
Nick Piggin's avatar
Nick Piggin committed
668
669
670
671
672
673
674
675
676
 * A page may be used by anyone else who does a __get_free_page().
 * In this case, page_count still tracks the references, and should only
 * be used through the normal accessor functions. The top bits of page->flags
 * and page->virtual store page management information, but all other fields
 * are unused and could be used privately, carefully. The management of this
 * page is the responsibility of the one who allocated it, and those who have
 * subsequently been given references to it.
 *
 * The other pages (we may call them "pagecache pages") are completely
Linus Torvalds's avatar
Linus Torvalds committed
677
678
679
 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 * The following discussion applies only to them.
 *
Nick Piggin's avatar
Nick Piggin committed
680
681
682
683
 * A pagecache page contains an opaque `private' member, which belongs to the
 * page's address_space. Usually, this is the address of a circular list of
 * the page's disk buffers. PG_private must be set to tell the VM to call
 * into the filesystem to release these pages.
Linus Torvalds's avatar
Linus Torvalds committed
684
 *
Nick Piggin's avatar
Nick Piggin committed
685
686
 * A page may belong to an inode's memory mapping. In this case, page->mapping
 * is the pointer to the inode, and page->index is the file offset of the page,
687
 * in units of PAGE_SIZE.
Linus Torvalds's avatar
Linus Torvalds committed
688
 *
Nick Piggin's avatar
Nick Piggin committed
689
690
691
 * If pagecache pages are not associated with an inode, they are said to be
 * anonymous pages. These may become associated with the swapcache, and in that
 * case PG_swapcache is set, and page->private is an offset into the swapcache.
Linus Torvalds's avatar
Linus Torvalds committed
692
 *
Nick Piggin's avatar
Nick Piggin committed
693
694
695
 * In either case (swapcache or inode backed), the pagecache itself holds one
 * reference to the page. Setting PG_private should also increment the
 * refcount. The each user mapping also has a reference to the page.
Linus Torvalds's avatar
Linus Torvalds committed
696
 *
Nick Piggin's avatar
Nick Piggin committed
697
698
699
700
 * The pagecache pages are stored in a per-mapping radix tree, which is
 * rooted at mapping->page_tree, and indexed by offset.
 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 * lists, we instead now tag pages as dirty/writeback in the radix tree.
Linus Torvalds's avatar
Linus Torvalds committed
701
 *
Nick Piggin's avatar
Nick Piggin committed
702
 * All pagecache pages may be subject to I/O:
Linus Torvalds's avatar
Linus Torvalds committed
703
704
 * - inode pages may need to be read from disk,
 * - inode pages which have been modified and are MAP_SHARED may need
Nick Piggin's avatar
Nick Piggin committed
705
706
707
708
 *   to be written back to the inode on disk,
 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 *   modified may need to be swapped out to swap space and (later) to be read
 *   back into memory.
Linus Torvalds's avatar
Linus Torvalds committed
709
710
711
712
713
714
 */

/*
 * The zone field is never updated after free_area_init_core()
 * sets it, so none of the operations on it need to be atomic.
 */
715

716
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
717
#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
718
719
#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
720
#define LAST_CPUPID_PGOFF	(ZONES_PGOFF - LAST_CPUPID_WIDTH)
721

722
/*
Lucas De Marchi's avatar
Lucas De Marchi committed
723
 * Define the bit shifts to access each section.  For non-existent
724
725
726
 * sections we define the shift as 0; that plus a 0 mask ensures
 * the compiler will optimise away reference to them.
 */
727
728
729
#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
730
#define LAST_CPUPID_PGSHIFT	(LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
731

732
733
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
734
#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
735
736
#define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
						SECTIONS_PGOFF : ZONES_PGOFF)
737
#else
738
#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
739
740
#define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
						NODES_PGOFF : ZONES_PGOFF)
741
742
#endif

743
#define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
744

745
746
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
747
748
#endif

749
750
751
#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
752
#define LAST_CPUPID_MASK	((1UL << LAST_CPUPID_SHIFT) - 1)
753
#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
754

755
static inline enum zone_type page_zonenum(const struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
756
{
757
	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
758
759
}

760
#ifdef CONFIG_ZONE_DEVICE
761
762
void get_zone_device_page(struct page *page);
void put_zone_device_page(struct page *page);
763
764
765
766
767
static inline bool is_zone_device_page(const struct page *page)
{
	return page_zonenum(page) == ZONE_DEVICE;
}
#else
768
769
770
771
772
773
static inline void get_zone_device_page(struct page *page)
{
}
static inline void put_zone_device_page(struct page *page)
{
}
774
775
776
777
778
779
static inline bool is_zone_device_page(const struct page *page)
{
	return false;
}
#endif

780
781
782
783
784
static inline void get_page(struct page *page)
{
	page = compound_head(page);
	/*
	 * Getting a normal page or the head of a compound page
785
	 * requires to already have an elevated page->_refcount.
786
	 */
787
788
	VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
	page_ref_inc(page);
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804

	if (unlikely(is_zone_device_page(page)))
		get_zone_device_page(page);
}

static inline void put_page(struct page *page)
{
	page = compound_head(page);

	if (put_page_testzero(page))
		__put_page(page);

	if (unlikely(is_zone_device_page(page)))
		put_zone_device_page(page);
}

Cody P Schafer's avatar
Cody P Schafer committed
805
806
807
808
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif

809
/*
810
811
812
813
814
815
 * The identification function is mainly used by the buddy allocator for
 * determining if two pages could be buddies. We are not really identifying
 * the zone since we could be using the section number id if we do not have
 * node id available in page flags.
 * We only guarantee that it will return the same value for two combinable
 * pages in a zone.
816
 */
817
818
static inline int page_zone_id(struct page *page)
{
819
	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
820
821
}

822
static inline int zone_to_nid(struct zone *zone)
823
{
824
825
826
827
828
#ifdef CONFIG_NUMA
	return zone->node;
#else
	return 0;
#endif
829
830
}

831
#ifdef NODE_NOT_IN_PAGE_FLAGS
832
extern int page_to_nid(const struct page *page);
833
#else
834
static inline int page_to_nid(const struct page *page)
835
{
836
	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
837
}
838
839
#endif

840
#ifdef CONFIG_NUMA_BALANCING
841
static inline int cpu_pid_to_cpupid(int cpu, int pid)
842
{
843
	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
844
845
}

846
static inline int cpupid_to_pid(int cpupid)
847
{
848
	return cpupid & LAST__PID_MASK;
849
}
850

851
static inline int cpupid_to_cpu(int cpupid)
852
{
853
	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
854
855
}

856
static inline int cpupid_to_nid(int cpupid)
857
{
858
	return cpu_to_node(cpupid_to_cpu(cpupid));
859
860
}

861
static inline bool cpupid_pid_unset(int cpupid)
862
{
863
	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
864
865
}

866
static inline bool cpupid_cpu_unset(int cpupid)
867
{
868
	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
869
870
}

871
872
873
874
875
876
static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
{
	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
}

#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
877
878
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
879
{
880
	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
881
}
882
883
884
885
886
887

static inline int page_cpupid_last(struct page *page)
{
	return page->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
888
{
889
	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
890
891
}
#else
892
static inline int page_cpupid_last(struct page *page)
893
{
894
	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
895
896
}

897
extern int page_cpupid_xchg_last(struct page *page, int cpupid);
898

899
static inline void page_cpupid_reset_last(struct page *page)
900
{
901
	page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
902
}
903
904
905
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
#else /* !CONFIG_NUMA_BALANCING */
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
906
{
907
	return page_to_nid(page); /* XXX */
908
909
}

910
static inline int page_cpupid_last(struct page *page)
911
{
912
	return page_to_nid(page); /* XXX */
913
914
}

915
static inline int cpupid_to_nid(int cpupid)
916
917
918
919
{
	return -1;
}

920
static inline int cpupid_to_pid(int cpupid)
921
922
923
924
{
	return -1;
}

925
static inline int cpupid_to_cpu(int cpupid)
926
927
928
929
{
	return -1;
}

930
931
932
933
934
935
static inline int cpu_pid_to_cpupid(int nid, int pid)
{
	return -1;
}

static inline bool cpupid_pid_unset(int cpupid)
936
937
938
939
{
	return 1;
}

940
static inline void page_cpupid_reset_last(struct page *page)
941
942
{
}
943
944
945
946
947

static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
	return false;
}
948
#endif /* CONFIG_NUMA_BALANCING */
949

950
static inline struct zone *page_zone(const struct page *page)
951
952
953
954
{
	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}

955
956
957
958
959
static inline pg_data_t *page_pgdat(const struct page *page)
{
	return NODE_DATA(page_to_nid(page));
}

Cody P Schafer's avatar
Cody P Schafer committed
960
#ifdef SECTION_IN_PAGE_FLAGS
961
962
963
964
965
966
static inline void set_page_section(struct page *page, unsigned long section)
{
	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}

967
static inline unsigned long page_to_section(const struct page *page)
968
969
970
{
	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
971
#endif
972

973
static inline void set_page_zone(struct page *page, enum zone_type zone)
974
975
976
977
{
	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}
978

979
980
981
982
static inline void set_page_node(struct page *page, unsigned long node)
{
	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
Linus Torvalds's avatar
Linus Torvalds committed
983
}
984

985
static inline void set_page_links(struct page *page, enum zone_type zone,
986
	unsigned long node, unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
987
{
988
989
	set_page_zone(page, zone);
	set_page_node(page, node);
Cody P Schafer's avatar
Cody P Schafer committed
990
#ifdef SECTION_IN_PAGE_FLAGS
991
	set_page_section(page, pfn_to_section_nr(pfn));
992
#endif
Linus Torvalds's avatar
Linus Torvalds committed
993
994
}

Greg Thelen's avatar
Greg Thelen committed
995
996
997
998
999
#ifdef CONFIG_MEMCG
static inline struct mem_cgroup *page_memcg(struct page *page)
{
	return page->mem_cgroup;
}
1000
static inline struct mem_cgroup *page_memcg_rcu(struct page *page)