mm.h 85 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds's avatar
Linus Torvalds committed
2
3
4
5
6
7
8
#ifndef _LINUX_MM_H
#define _LINUX_MM_H

#include <linux/errno.h>

#ifdef __KERNEL__

9
#include <linux/mmdebug.h>
Linus Torvalds's avatar
Linus Torvalds committed
10
#include <linux/gfp.h>
11
#include <linux/bug.h>
Linus Torvalds's avatar
Linus Torvalds committed
12
13
14
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
15
#include <linux/atomic.h>
16
#include <linux/debug_locks.h>
17
#include <linux/mm_types.h>
18
#include <linux/range.h>
19
#include <linux/pfn.h>
20
#include <linux/percpu-refcount.h>
Andrea Arcangeli's avatar
Andrea Arcangeli committed
21
#include <linux/bit_spinlock.h>
22
#include <linux/shrinker.h>
23
#include <linux/resource.h>
24
#include <linux/page_ext.h>
25
#include <linux/err.h>
26
#include <linux/page_ref.h>
27
#include <linux/memremap.h>
Linus Torvalds's avatar
Linus Torvalds committed
28
29
30

struct mempolicy;
struct anon_vma;
31
struct anon_vma_chain;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
32
struct file_ra_state;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
33
struct user_struct;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
34
struct writeback_control;
35
struct bdi_writeback;
Linus Torvalds's avatar
Linus Torvalds committed
36

37
38
void init_mm_internals(void);

39
#ifndef CONFIG_NEED_MULTIPLE_NODES	/* Don't use mapnrs, do it properly */
Linus Torvalds's avatar
Linus Torvalds committed
40
extern unsigned long max_mapnr;
41
42
43
44
45
46
47

static inline void set_max_mapnr(unsigned long limit)
{
	max_mapnr = limit;
}
#else
static inline void set_max_mapnr(unsigned long limit) { }
Linus Torvalds's avatar
Linus Torvalds committed
48
49
#endif

50
extern unsigned long totalram_pages;
Linus Torvalds's avatar
Linus Torvalds committed
51
52
53
54
55
56
57
58
59
extern void * high_memory;
extern int page_cluster;

#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
#define sysctl_legacy_va_layout 0
#endif

60
61
62
63
64
65
66
67
68
69
70
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
extern const int mmap_rnd_bits_min;
extern const int mmap_rnd_bits_max;
extern int mmap_rnd_bits __read_mostly;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
extern const int mmap_rnd_compat_bits_min;
extern const int mmap_rnd_compat_bits_max;
extern int mmap_rnd_compat_bits __read_mostly;
#endif

Linus Torvalds's avatar
Linus Torvalds committed
71
72
73
74
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>

75
76
77
78
#ifndef __pa_symbol
#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif

79
80
81
82
#ifndef page_to_virt
#define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
#endif

Laura Abbott's avatar
Laura Abbott committed
83
84
85
86
#ifndef lm_alias
#define lm_alias(x)	__va(__pa_symbol(x))
#endif

87
88
89
90
91
92
93
94
95
96
97
/*
 * To prevent common memory management code establishing
 * a zero page mapping on a read fault.
 * This macro should be defined within <asm/pgtable.h>.
 * s390 does this to prevent multiplexing of hardware bits
 * related to the physical page in case of virtualization.
 */
#ifndef mm_forbids_zeropage
#define mm_forbids_zeropage(X)	(0)
#endif

98
99
100
101
102
103
104
105
106
/*
 * On some architectures it is expensive to call memset() for small sizes.
 * Those architectures should provide their own implementation of "struct page"
 * zeroing by defining this macro in <asm/pgtable.h>.
 */
#ifndef mm_zero_struct_page
#define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
#endif

107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
/*
 * Default maximum number of active map areas, this limits the number of vmas
 * per mm struct. Users can overwrite this number by sysctl but there is a
 * problem.
 *
 * When a program's coredump is generated as ELF format, a section is created
 * per a vma. In ELF, the number of sections is represented in unsigned short.
 * This means the number of sections should be smaller than 65535 at coredump.
 * Because the kernel adds some informative sections to a image of program at
 * generating coredump, we need some margin. The number of extra sections is
 * 1-3 now and depends on arch. We use "5" as safe margin, here.
 *
 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
 * not a hard limit any more. Although some userspace tools can be surprised by
 * that.
 */
#define MAPCOUNT_ELF_CORE_MARGIN	(5)
#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)

extern int sysctl_max_map_count;

128
extern unsigned long sysctl_user_reserve_kbytes;
129
extern unsigned long sysctl_admin_reserve_kbytes;
130

131
132
133
134
135
136
137
138
139
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;

extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
				    size_t *, loff_t *);
extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
				    size_t *, loff_t *);

Linus Torvalds's avatar
Linus Torvalds committed
140
141
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))

142
143
144
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)

145
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
146
#define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
147

Linus Torvalds's avatar
Linus Torvalds committed
148
149
150
151
152
153
154
155
156
/*
 * Linux kernel virtual memory manager primitives.
 * The idea being to have a "virtual" mm in the same way
 * we have a virtual fs - giving a cleaner interface to the
 * mm details, and allowing different kinds of memory mappings
 * (from shared memory to executable loading to arbitrary
 * mmap() functions).
 */

157
158
extern struct kmem_cache *vm_area_cachep;

Linus Torvalds's avatar
Linus Torvalds committed
159
#ifndef CONFIG_MMU
160
161
extern struct rb_root nommu_region_tree;
extern struct rw_semaphore nommu_region_sem;
Linus Torvalds's avatar
Linus Torvalds committed
162
163
164
165
166

extern unsigned int kobjsize(const void *objp);
#endif

/*
Hugh Dickins's avatar
Hugh Dickins committed
167
 * vm_flags in vm_area_struct, see mm_types.h.
168
 * When changing, update also include/trace/events/mmflags.h
Linus Torvalds's avatar
Linus Torvalds committed
169
 */
170
171
#define VM_NONE		0x00000000

Linus Torvalds's avatar
Linus Torvalds committed
172
173
174
175
176
#define VM_READ		0x00000001	/* currently active flags */
#define VM_WRITE	0x00000002
#define VM_EXEC		0x00000004
#define VM_SHARED	0x00000008

177
/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
Linus Torvalds's avatar
Linus Torvalds committed
178
179
180
181
182
183
#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
#define VM_MAYWRITE	0x00000020
#define VM_MAYEXEC	0x00000040
#define VM_MAYSHARE	0x00000080

#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
184
#define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
185
#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
Linus Torvalds's avatar
Linus Torvalds committed
186
#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
187
#define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
Linus Torvalds's avatar
Linus Torvalds committed
188
189
190
191
192
193
194
195
196
197

#define VM_LOCKED	0x00002000
#define VM_IO           0x00004000	/* Memory mapped I/O or similar */

					/* Used by sys_madvise() */
#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */

#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
Eric B Munson's avatar
Eric B Munson committed
198
#define VM_LOCKONFAULT	0x00080000	/* Lock the pages covered when they are faulted in */
Linus Torvalds's avatar
Linus Torvalds committed
199
#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
200
#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
Linus Torvalds's avatar
Linus Torvalds committed
201
#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
202
#define VM_SYNC		0x00800000	/* Synchronous page faults */
203
#define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
204
#define VM_WIPEONFORK	0x02000000	/* Wipe VMA contents in child. */
205
#define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
206

207
208
209
210
211
212
#ifdef CONFIG_MEM_SOFT_DIRTY
# define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
#else
# define VM_SOFTDIRTY	0
#endif

Jared Hulbert's avatar
Jared Hulbert committed
213
#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
214
215
#define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
Hugh Dickins's avatar
Hugh Dickins committed
216
#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
Linus Torvalds's avatar
Linus Torvalds committed
217

218
219
220
221
222
#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
#define VM_HIGH_ARCH_BIT_0	32	/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_1	33	/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_2	34	/* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3	35	/* bit only usable on 64-bit architectures */
223
#define VM_HIGH_ARCH_BIT_4	36	/* bit only usable on 64-bit architectures */
224
225
226
227
#define VM_HIGH_ARCH_0	BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1	BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2	BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3	BIT(VM_HIGH_ARCH_BIT_3)
228
#define VM_HIGH_ARCH_4	BIT(VM_HIGH_ARCH_BIT_4)
229
230
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */

231
232
#if defined(CONFIG_X86)
# define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
233
234
235
236
237
238
239
#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
# define VM_PKEY_SHIFT	VM_HIGH_ARCH_BIT_0
# define VM_PKEY_BIT0	VM_HIGH_ARCH_0	/* A protection key is a 4-bit value */
# define VM_PKEY_BIT1	VM_HIGH_ARCH_1
# define VM_PKEY_BIT2	VM_HIGH_ARCH_2
# define VM_PKEY_BIT3	VM_HIGH_ARCH_3
#endif
240
241
242
243
244
245
#elif defined(CONFIG_PPC)
# define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP	VM_ARCH_1
#elif defined(CONFIG_IA64)
# define VM_GROWSUP	VM_ARCH_1
246
247
248
#elif defined(CONFIG_SPARC64)
# define VM_SPARC_ADI	VM_ARCH_1	/* Uses ADI tag for access control */
# define VM_ARCH_CLEAR	VM_SPARC_ADI
249
250
251
252
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
#endif

253
#if defined(CONFIG_X86_INTEL_MPX)
254
/* MPX specific bounds table or bounds directory */
255
# define VM_MPX		VM_HIGH_ARCH_4
256
257
#else
# define VM_MPX		VM_NONE
258
259
#endif

260
261
262
263
#ifndef VM_GROWSUP
# define VM_GROWSUP	VM_NONE
#endif

264
265
266
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)

Linus Torvalds's avatar
Linus Torvalds committed
267
268
269
270
271
#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif

#ifdef CONFIG_STACK_GROWSUP
272
#define VM_STACK	VM_GROWSUP
Linus Torvalds's avatar
Linus Torvalds committed
273
#else
274
#define VM_STACK	VM_GROWSDOWN
Linus Torvalds's avatar
Linus Torvalds committed
275
276
#endif

277
278
#define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)

279
/*
280
281
 * Special vmas that are non-mergable, non-mlock()able.
 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
282
 */
283
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
284

285
286
287
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK	VM_NOHUGEPAGE

Eric B Munson's avatar
Eric B Munson committed
288
289
290
/* This mask is used to clear all the VMA flags used by mlock */
#define VM_LOCKED_CLEAR_MASK	(~(VM_LOCKED | VM_LOCKONFAULT))

291
292
293
294
295
296
/* Arch-specific flags to clear when updating VM flags on protection change */
#ifndef VM_ARCH_CLEAR
# define VM_ARCH_CLEAR	VM_NONE
#endif
#define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)

Linus Torvalds's avatar
Linus Torvalds committed
297
298
299
300
301
302
/*
 * mapping from the currently active vm_flags protection bits (the
 * low four bits) to a page protection mask..
 */
extern pgprot_t protection_map[16];

Nick Piggin's avatar
Nick Piggin committed
303
#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
304
305
306
307
308
309
#define FAULT_FLAG_MKWRITE	0x02	/* Fault was mkwrite of existing pte */
#define FAULT_FLAG_ALLOW_RETRY	0x04	/* Retry fault if blocking */
#define FAULT_FLAG_RETRY_NOWAIT	0x08	/* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_KILLABLE	0x10	/* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED	0x20	/* Second try */
#define FAULT_FLAG_USER		0x40	/* The fault originated in userspace */
310
#define FAULT_FLAG_REMOTE	0x80	/* faulting for non current tsk/mm */
311
#define FAULT_FLAG_INSTRUCTION  0x100	/* The fault was during an instruction fetch */
Nick Piggin's avatar
Nick Piggin committed
312

313
314
315
316
317
318
319
320
321
322
323
#define FAULT_FLAG_TRACE \
	{ FAULT_FLAG_WRITE,		"WRITE" }, \
	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
	{ FAULT_FLAG_TRIED,		"TRIED" }, \
	{ FAULT_FLAG_USER,		"USER" }, \
	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }

324
/*
Nick Piggin's avatar
Nick Piggin committed
325
 * vm_fault is filled by the the pagefault handler and passed to the vma's
Nick Piggin's avatar
Nick Piggin committed
326
327
 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 * of VM_FAULT_xxx flags that give details about how the fault was handled.
328
 *
329
330
331
 * MM layer fills up gfp_mask for page allocations but fault handler might
 * alter it if its implementation requires a different allocation context.
 *
332
 * pgoff should be used in favour of virtual_address, if possible.
333
 */
Nick Piggin's avatar
Nick Piggin committed
334
struct vm_fault {
335
	struct vm_area_struct *vma;	/* Target VMA */
Nick Piggin's avatar
Nick Piggin committed
336
	unsigned int flags;		/* FAULT_FLAG_xxx flags */
337
	gfp_t gfp_mask;			/* gfp mask to be used for allocations */
Nick Piggin's avatar
Nick Piggin committed
338
	pgoff_t pgoff;			/* Logical page offset based on vma */
339
340
	unsigned long address;		/* Faulting virtual address */
	pmd_t *pmd;			/* Pointer to pmd entry matching
341
					 * the 'address' */
342
343
344
	pud_t *pud;			/* Pointer to pud entry matching
					 * the 'address'
					 */
345
	pte_t orig_pte;			/* Value of PTE at the time of fault */
Nick Piggin's avatar
Nick Piggin committed
346

347
348
	struct page *cow_page;		/* Page handler may use for COW fault */
	struct mem_cgroup *memcg;	/* Cgroup cow_page belongs to */
Nick Piggin's avatar
Nick Piggin committed
349
	struct page *page;		/* ->fault handlers should return a
Nick Piggin's avatar
Nick Piggin committed
350
					 * page here, unless VM_FAULT_NOPAGE
Nick Piggin's avatar
Nick Piggin committed
351
					 * is set (which is also implied by
Nick Piggin's avatar
Nick Piggin committed
352
					 * VM_FAULT_ERROR).
Nick Piggin's avatar
Nick Piggin committed
353
					 */
354
	/* These three entries are valid only while holding ptl lock */
Kirill A. Shutemov's avatar
Kirill A. Shutemov committed
355
356
357
358
359
360
361
362
	pte_t *pte;			/* Pointer to pte entry matching
					 * the 'address'. NULL if the page
					 * table hasn't been allocated.
					 */
	spinlock_t *ptl;		/* Page table lock.
					 * Protects pte page table if 'pte'
					 * is not NULL, otherwise pmd.
					 */
363
364
365
366
367
368
369
	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
					 * vm_ops->map_pages() calls
					 * alloc_set_pte() from atomic context.
					 * do_fault_around() pre-allocates
					 * page table to avoid allocation from
					 * atomic context.
					 */
370
};
Linus Torvalds's avatar
Linus Torvalds committed
371

372
373
374
375
376
377
378
/* page entry size for vm->huge_fault() */
enum page_entry_size {
	PE_SIZE_PTE = 0,
	PE_SIZE_PMD,
	PE_SIZE_PUD,
};

Linus Torvalds's avatar
Linus Torvalds committed
379
380
381
382
383
384
385
386
/*
 * These are the virtual MM functions - opening of an area, closing and
 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 * to the functions called when a no-page or a wp-page exception occurs. 
 */
struct vm_operations_struct {
	void (*open)(struct vm_area_struct * area);
	void (*close)(struct vm_area_struct * area);
387
	int (*split)(struct vm_area_struct * area, unsigned long addr);
388
	int (*mremap)(struct vm_area_struct * area);
389
390
391
	vm_fault_t (*fault)(struct vm_fault *vmf);
	vm_fault_t (*huge_fault)(struct vm_fault *vmf,
			enum page_entry_size pe_size);
392
	void (*map_pages)(struct vm_fault *vmf,
Kirill A. Shutemov's avatar
Kirill A. Shutemov committed
393
			pgoff_t start_pgoff, pgoff_t end_pgoff);
394
	unsigned long (*pagesize)(struct vm_area_struct * area);
395
396
397

	/* notification that a previously read-only page is about to become
	 * writable, if an error is returned it will cause a SIGBUS */
398
	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
399

400
	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
401
	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
402

403
404
405
406
407
	/* called by access_process_vm when get_user_pages() fails, typically
	 * for use by special VMAs that can switch between memory and hardware
	 */
	int (*access)(struct vm_area_struct *vma, unsigned long addr,
		      void *buf, int len, int write);
408
409
410
411
412
413

	/* Called by the /proc/PID/maps code to ask the vma whether it
	 * has a special name.  Returning non-NULL will also cause this
	 * vma to be dumped unconditionally. */
	const char *(*name)(struct vm_area_struct *vma);

Linus Torvalds's avatar
Linus Torvalds committed
414
#ifdef CONFIG_NUMA
415
416
417
418
419
420
421
	/*
	 * set_policy() op must add a reference to any non-NULL @new mempolicy
	 * to hold the policy upon return.  Caller should pass NULL @new to
	 * remove a policy and fall back to surrounding context--i.e. do not
	 * install a MPOL_DEFAULT policy, nor the task or system default
	 * mempolicy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
422
	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
423
424
425
426
427
428
429
430
431
432
433

	/*
	 * get_policy() op must add reference [mpol_get()] to any policy at
	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
	 * in mm/mempolicy.c will do this automatically.
	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
	 * must return NULL--i.e., do not "fallback" to task or system default
	 * policy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
434
435
436
	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
					unsigned long addr);
#endif
437
438
439
440
441
442
443
	/*
	 * Called by vm_normal_page() for special PTEs to find the
	 * page for @addr.  This is useful if the default behavior
	 * (using pte_page()) would not find the correct page.
	 */
	struct page *(*find_special_page)(struct vm_area_struct *vma,
					  unsigned long addr);
Linus Torvalds's avatar
Linus Torvalds committed
444
445
446
447
448
};

struct mmu_gather;
struct inode;

Andrew Morton's avatar
Andrew Morton committed
449
450
#define page_private(page)		((page)->private)
#define set_page_private(page, v)	((page)->private = (v))
451

452
453
454
455
456
#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd)
{
	return 0;
}
457
458
459
460
static inline int pud_devmap(pud_t pud)
{
	return 0;
}
461
462
463
464
static inline int pgd_devmap(pgd_t pgd)
{
	return 0;
}
465
466
#endif

Linus Torvalds's avatar
Linus Torvalds committed
467
468
469
470
471
/*
 * FIXME: take this include out, include page-flags.h in
 * files which need it (119 of them)
 */
#include <linux/page-flags.h>
472
#include <linux/huge_mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487

/*
 * Methods to modify the page usage count.
 *
 * What counts for a page usage:
 * - cache mapping   (page->mapping)
 * - private data    (page->private)
 * - page mapped in a task's page tables, each mapping
 *   is counted separately
 *
 * Also, many kernel routines increase the page count before a critical
 * routine so they can be sure the page doesn't go away from under them.
 */

/*
Nick Piggin's avatar
Nick Piggin committed
488
 * Drop a ref, return true if the refcount fell to zero (the page has no users)
Linus Torvalds's avatar
Linus Torvalds committed
489
 */
490
491
static inline int put_page_testzero(struct page *page)
{
492
493
	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
	return page_ref_dec_and_test(page);
494
}
Linus Torvalds's avatar
Linus Torvalds committed
495
496

/*
497
498
 * Try to grab a ref unless the page has a refcount of zero, return false if
 * that is the case.
499
500
 * This can be called when MMU is off so it must not access
 * any of the virtual mappings.
Linus Torvalds's avatar
Linus Torvalds committed
501
 */
502
503
static inline int get_page_unless_zero(struct page *page)
{
504
	return page_ref_add_unless(page, 1, 0);
505
}
Linus Torvalds's avatar
Linus Torvalds committed
506

507
extern int page_is_ram(unsigned long pfn);
508
509
510
511
512
513
514

enum {
	REGION_INTERSECTS,
	REGION_DISJOINT,
	REGION_MIXED,
};

515
516
int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
		      unsigned long desc);
517

518
/* Support for virtually mapped pages */
519
520
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
521

522
523
524
525
526
527
/*
 * Determine if an address is within the vmalloc range
 *
 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 * is no special casing required.
 */
528
static inline bool is_vmalloc_addr(const void *x)
529
{
530
#ifdef CONFIG_MMU
531
532
533
	unsigned long addr = (unsigned long)x;

	return addr >= VMALLOC_START && addr < VMALLOC_END;
534
#else
535
	return false;
536
#endif
537
}
538
539
540
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
541
static inline int is_vmalloc_or_module_addr(const void *x)
542
543
544
545
{
	return 0;
}
#endif
546

547
548
549
550
551
552
553
554
555
556
557
558
559
560
extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
static inline void *kvmalloc(size_t size, gfp_t flags)
{
	return kvmalloc_node(size, flags, NUMA_NO_NODE);
}
static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
{
	return kvmalloc_node(size, flags | __GFP_ZERO, node);
}
static inline void *kvzalloc(size_t size, gfp_t flags)
{
	return kvmalloc(size, flags | __GFP_ZERO);
}

561
562
563
564
565
566
567
568
static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
{
	if (size != 0 && n > SIZE_MAX / size)
		return NULL;

	return kvmalloc(n * size, flags);
}

Al Viro's avatar
Al Viro committed
569
570
extern void kvfree(const void *addr);

571
572
573
574
575
576
577
static inline atomic_t *compound_mapcount_ptr(struct page *page)
{
	return &page[1].compound_mapcount;
}

static inline int compound_mapcount(struct page *page)
{
578
	VM_BUG_ON_PAGE(!PageCompound(page), page);
579
580
581
582
	page = compound_head(page);
	return atomic_read(compound_mapcount_ptr(page)) + 1;
}

583
584
585
586
587
/*
 * The atomic page->_mapcount, starts from -1: so that transitions
 * both from it and to it can be tracked, using atomic_inc_and_test
 * and atomic_add_negative(-1).
 */
588
static inline void page_mapcount_reset(struct page *page)
589
590
591
592
{
	atomic_set(&(page)->_mapcount, -1);
}

593
594
int __page_mapcount(struct page *page);

595
596
static inline int page_mapcount(struct page *page)
{
597
	VM_BUG_ON_PAGE(PageSlab(page), page);
598

599
600
601
602
603
604
605
	if (unlikely(PageCompound(page)))
		return __page_mapcount(page);
	return atomic_read(&page->_mapcount) + 1;
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int total_mapcount(struct page *page);
606
int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
607
608
609
610
#else
static inline int total_mapcount(struct page *page)
{
	return page_mapcount(page);
611
}
612
613
614
615
616
617
618
619
static inline int page_trans_huge_mapcount(struct page *page,
					   int *total_mapcount)
{
	int mapcount = page_mapcount(page);
	if (total_mapcount)
		*total_mapcount = mapcount;
	return mapcount;
}
620
#endif
621

622
623
624
static inline struct page *virt_to_head_page(const void *x)
{
	struct page *page = virt_to_page(x);
625

626
	return compound_head(page);
627
628
}

629
630
void __put_page(struct page *page);

631
void put_pages_list(struct list_head *pages);
Linus Torvalds's avatar
Linus Torvalds committed
632

633
634
void split_page(struct page *page, unsigned int order);

635
636
637
/*
 * Compound pages have a destructor function.  Provide a
 * prototype for that function and accessor functions.
638
 * These are _only_ valid on the head of a compound page.
639
 */
640
641
642
643
644
645
646
647
typedef void compound_page_dtor(struct page *);

/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
enum compound_dtor_id {
	NULL_COMPOUND_DTOR,
	COMPOUND_PAGE_DTOR,
#ifdef CONFIG_HUGETLB_PAGE
	HUGETLB_PAGE_DTOR,
648
649
650
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	TRANSHUGE_PAGE_DTOR,
651
652
653
654
#endif
	NR_COMPOUND_DTORS,
};
extern compound_page_dtor * const compound_page_dtors[];
655
656

static inline void set_compound_page_dtor(struct page *page,
657
		enum compound_dtor_id compound_dtor)
658
{
659
660
	VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
	page[1].compound_dtor = compound_dtor;
661
662
663
664
}

static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
665
666
	VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
	return compound_page_dtors[page[1].compound_dtor];
667
668
}

669
static inline unsigned int compound_order(struct page *page)
670
{
671
	if (!PageHead(page))
672
		return 0;
673
	return page[1].compound_order;
674
675
}

676
static inline void set_compound_order(struct page *page, unsigned int order)
677
{
678
	page[1].compound_order = order;
679
680
}

681
682
void free_compound_page(struct page *page);

683
#ifdef CONFIG_MMU
Andrea Arcangeli's avatar
Andrea Arcangeli committed
684
685
686
687
688
689
690
691
692
693
694
695
/*
 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 * servicing faults for write access.  In the normal case, do always want
 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 * that do not have writing enabled, when used by access_process_vm.
 */
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
	if (likely(vma->vm_flags & VM_WRITE))
		pte = pte_mkwrite(pte);
	return pte;
}
696

697
int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
698
		struct page *page);
699
int finish_fault(struct vm_fault *vmf);
700
int finish_mkwrite_fault(struct vm_fault *vmf);
701
#endif
Andrea Arcangeli's avatar
Andrea Arcangeli committed
702

Linus Torvalds's avatar
Linus Torvalds committed
703
704
705
706
707
708
709
/*
 * Multiple processes may "see" the same page. E.g. for untouched
 * mappings of /dev/null, all processes see the same page full of
 * zeroes, and text pages of executables and shared libraries have
 * only one copy in memory, at most, normally.
 *
 * For the non-reserved pages, page_count(page) denotes a reference count.
710
711
 *   page_count() == 0 means the page is free. page->lru is then used for
 *   freelist management in the buddy allocator.
Nick Piggin's avatar
Nick Piggin committed
712
 *   page_count() > 0  means the page has been allocated.
Linus Torvalds's avatar
Linus Torvalds committed
713
 *
Nick Piggin's avatar
Nick Piggin committed
714
715
716
717
718
 * Pages are allocated by the slab allocator in order to provide memory
 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 * unless a particular usage is carefully commented. (the responsibility of
 * freeing the kmalloc memory is the caller's, of course).
Linus Torvalds's avatar
Linus Torvalds committed
719
 *
Nick Piggin's avatar
Nick Piggin committed
720
721
722
723
724
725
726
727
728
 * A page may be used by anyone else who does a __get_free_page().
 * In this case, page_count still tracks the references, and should only
 * be used through the normal accessor functions. The top bits of page->flags
 * and page->virtual store page management information, but all other fields
 * are unused and could be used privately, carefully. The management of this
 * page is the responsibility of the one who allocated it, and those who have
 * subsequently been given references to it.
 *
 * The other pages (we may call them "pagecache pages") are completely
Linus Torvalds's avatar
Linus Torvalds committed
729
730
731
 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 * The following discussion applies only to them.
 *
Nick Piggin's avatar
Nick Piggin committed
732
733
734
735
 * A pagecache page contains an opaque `private' member, which belongs to the
 * page's address_space. Usually, this is the address of a circular list of
 * the page's disk buffers. PG_private must be set to tell the VM to call
 * into the filesystem to release these pages.
Linus Torvalds's avatar
Linus Torvalds committed
736
 *
Nick Piggin's avatar
Nick Piggin committed
737
738
 * A page may belong to an inode's memory mapping. In this case, page->mapping
 * is the pointer to the inode, and page->index is the file offset of the page,
739
 * in units of PAGE_SIZE.
Linus Torvalds's avatar
Linus Torvalds committed
740
 *
Nick Piggin's avatar
Nick Piggin committed
741
742
743
 * If pagecache pages are not associated with an inode, they are said to be
 * anonymous pages. These may become associated with the swapcache, and in that
 * case PG_swapcache is set, and page->private is an offset into the swapcache.
Linus Torvalds's avatar
Linus Torvalds committed
744
 *
Nick Piggin's avatar
Nick Piggin committed
745
746
747
 * In either case (swapcache or inode backed), the pagecache itself holds one
 * reference to the page. Setting PG_private should also increment the
 * refcount. The each user mapping also has a reference to the page.
Linus Torvalds's avatar
Linus Torvalds committed
748
 *
Nick Piggin's avatar
Nick Piggin committed
749
750
751
752
 * The pagecache pages are stored in a per-mapping radix tree, which is
 * rooted at mapping->page_tree, and indexed by offset.
 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 * lists, we instead now tag pages as dirty/writeback in the radix tree.
Linus Torvalds's avatar
Linus Torvalds committed
753
 *
Nick Piggin's avatar
Nick Piggin committed
754
 * All pagecache pages may be subject to I/O:
Linus Torvalds's avatar
Linus Torvalds committed
755
756
 * - inode pages may need to be read from disk,
 * - inode pages which have been modified and are MAP_SHARED may need
Nick Piggin's avatar
Nick Piggin committed
757
758
759
760
 *   to be written back to the inode on disk,
 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 *   modified may need to be swapped out to swap space and (later) to be read
 *   back into memory.
Linus Torvalds's avatar
Linus Torvalds committed
761
762
763
764
765
766
 */

/*
 * The zone field is never updated after free_area_init_core()
 * sets it, so none of the operations on it need to be atomic.
 */
767

768
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
769
#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
770
771
#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
772
#define LAST_CPUPID_PGOFF	(ZONES_PGOFF - LAST_CPUPID_WIDTH)
773

774
/*
Lucas De Marchi's avatar
Lucas De Marchi committed
775
 * Define the bit shifts to access each section.  For non-existent
776
777
778
 * sections we define the shift as 0; that plus a 0 mask ensures
 * the compiler will optimise away reference to them.
 */
779
780
781
#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
782
#define LAST_CPUPID_PGSHIFT	(LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
783

784
785
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
786
#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
787
788
#define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
						SECTIONS_PGOFF : ZONES_PGOFF)
789
#else
790
#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
791
792
#define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
						NODES_PGOFF : ZONES_PGOFF)
793
794
#endif

795
#define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
796

797
798
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
799
800
#endif

801
802
803
#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
804
#define LAST_CPUPID_MASK	((1UL << LAST_CPUPID_SHIFT) - 1)
805
#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
806

807
static inline enum zone_type page_zonenum(const struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
808
{
809
	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
810
811
}

812
813
814
815
816
817
818
819
820
821
#ifdef CONFIG_ZONE_DEVICE
static inline bool is_zone_device_page(const struct page *page)
{
	return page_zonenum(page) == ZONE_DEVICE;
}
#else
static inline bool is_zone_device_page(const struct page *page)
{
	return false;
}
822
#endif
823

824
#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
825
void put_zone_device_private_or_public_page(struct page *page);
826
827
828
829
830
DECLARE_STATIC_KEY_FALSE(device_private_key);
#define IS_HMM_ENABLED static_branch_unlikely(&device_private_key)
static inline bool is_device_private_page(const struct page *page);
static inline bool is_device_public_page(const struct page *page);
#else /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
831
static inline void put_zone_device_private_or_public_page(struct page *page)
832
833
{
}
834
835
836
837
838
839
840
841
842
#define IS_HMM_ENABLED 0
static inline bool is_device_private_page(const struct page *page)
{
	return false;
}
static inline bool is_device_public_page(const struct page *page)
{
	return false;
}
843
#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
844

845

846
847
848
849
850
static inline void get_page(struct page *page)
{
	page = compound_head(page);
	/*
	 * Getting a normal page or the head of a compound page
851
	 * requires to already have an elevated page->_refcount.
852
	 */
853
854
	VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
	page_ref_inc(page);
855
856
857
858
859
860
}

static inline void put_page(struct page *page)
{
	page = compound_head(page);

861
862
863
864
865
866
	/*
	 * For private device pages we need to catch refcount transition from
	 * 2 to 1, when refcount reach one it means the private device page is
	 * free and we need to inform the device driver through callback. See
	 * include/linux/memremap.h and HMM for details.
	 */
867
868
	if (IS_HMM_ENABLED && unlikely(is_device_private_page(page) ||
	    unlikely(is_device_public_page(page)))) {
869
		put_zone_device_private_or_public_page(page);
870
871
872
		return;
	}

873
874
875
876
	if (put_page_testzero(page))
		__put_page(page);
}

Cody P Schafer's avatar
Cody P Schafer committed
877
878
879
880
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif

881
/*
882
883
884
885
886
887
 * The identification function is mainly used by the buddy allocator for
 * determining if two pages could be buddies. We are not really identifying
 * the zone since we could be using the section number id if we do not have
 * node id available in page flags.
 * We only guarantee that it will return the same value for two combinable
 * pages in a zone.
888
 */
889
890
static inline int page_zone_id(struct page *page)
{
891
	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
892
893
}

894
static inline int zone_to_nid(struct zone *zone)
895
{
896
897
898
899
900
#ifdef CONFIG_NUMA
	return zone->node;
#else
	return 0;
#endif
901
902
}

903
#ifdef NODE_NOT_IN_PAGE_FLAGS
904
extern int page_to_nid(const struct page *page);
905
#else
906
static inline int page_to_nid(const struct page *page)
907
{
908
909
910
	struct page *p = (struct page *)page;

	return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
911
}
912
913
#endif

914
#ifdef CONFIG_NUMA_BALANCING
915
static inline int cpu_pid_to_cpupid(int cpu, int pid)
916
{
917
	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
918
919
}

920
static inline int cpupid_to_pid(int cpupid)
921
{
922
	return cpupid & LAST__PID_MASK;
923
}
924

925
static inline int cpupid_to_cpu(int cpupid)
926
{
927
	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
928
929
}

930
static inline int cpupid_to_nid(int cpupid)
931
{
932
	return cpu_to_node(cpupid_to_cpu(cpupid));
933
934
}

935
static inline bool cpupid_pid_unset(int cpupid)
936
{
937
	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
938
939
}

940
static inline bool cpupid_cpu_unset(int cpupid)
941
{
942
	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
943
944
}

945
946
947
948
949
950
static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
{
	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
}

#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
951
952
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
953
{
954
	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
955
}
956
957
958
959
960
961

static inline int page_cpupid_last(struct page *page)
{
	return page->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
962
{
963
	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
964
965
}
#else
966
static inline int page_cpupid_last(struct page *page)
967
{
968
	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
969
970
}

971
extern int page_cpupid_xchg_last(struct page *page, int cpupid);
972

973
static inline void page_cpupid_reset_last(struct page *page)
974
{
975
	page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
976
}
977
978
979
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
#else /* !CONFIG_NUMA_BALANCING */
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
980
{
981
	return page_to_nid(page); /* XXX */
982
983
}

984
static inline int page_cpupid_last(struct page *page)
985
{
986
	return page_to_nid(page); /* XXX */
987
988
}

989
static inline int cpupid_to_nid(int cpupid)
990
991
992
993
{
	return -1;
}

994
static inline int cpupid_to_pid(int cpupid)
995
996
997
998
{
	return -1;
}

999
static inline int cpupid_to_cpu(int cpupid)
1000
1001
1002
1003
{
	return -1;
}

1004
1005
1006
1007
1008
1009
static inline int cpu_pid_to_cpupid(int nid, int pid)
{
	return -1;
}

static inline bool cpupid_pid_unset(int cpupid)
1010
1011
1012
1013
{
	return 1;
}

1014
static inline void page_cpupid_reset_last(struct page *page)
1015
1016
{
}
1017
1018
1019
1020
1021

static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
	return false;
}
1022
#endif /* CONFIG_NUMA_BALANCING */
1023

1024
static inline struct zone *page_zone(const struct page *page)
1025
1026
1027
1028
{
	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}

1029
1030
1031
1032
1033
static inline pg_data_t *page_pgdat(const struct page *page)
{
	return NODE_DATA(page_to_nid(page));
}

Cody P Schafer's avatar
Cody P Schafer committed
1034
#ifdef SECTION_IN_PAGE_FLAGS
1035
1036
1037
1038
1039
1040
static inline void set_page_section(struct page *page, unsigned long section)
{
	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}

1041
static inline unsigned long page_to_section(const struct page *page)
1042
1043
1044
{
	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
1045
#endif