huge_mm.h 11.6 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2
3
4
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H

5
#include <linux/sched/coredump.h>
6
#include <linux/mm_types.h>
7

8
9
#include <linux/fs.h> /* only for vma_is_dax() */

10
extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11
12
13
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
			 struct vm_area_struct *vma);
14
extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15
16
17
18
19
20
21
22
23
24
25
26
extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
			 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
			 struct vm_area_struct *vma);

#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
#else
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
}
#endif

27
extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29
30
31
					  unsigned long addr,
					  pmd_t *pmd,
					  unsigned int flags);
32
extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
33
34
			struct vm_area_struct *vma,
			pmd_t *pmd, unsigned long addr, unsigned long next);
35
36
extern int zap_huge_pmd(struct mmu_gather *tlb,
			struct vm_area_struct *vma,
Shaohua Li's avatar
Shaohua Li committed
37
			pmd_t *pmd, unsigned long addr);
38
39
40
extern int zap_huge_pud(struct mmu_gather *tlb,
			struct vm_area_struct *vma,
			pud_t *pud, unsigned long addr);
41
42
43
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, unsigned long end,
			unsigned char *vec);
44
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
45
			 unsigned long new_addr, unsigned long old_end,
46
			 pmd_t *old_pmd, pmd_t *new_pmd);
47
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
48
49
			unsigned long addr, pgprot_t newprot,
			int prot_numa);
50
51
vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
52
53
54
enum transparent_hugepage_flag {
	TRANSPARENT_HUGEPAGE_FLAG,
	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
55
56
	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
57
	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
58
	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
Andrea Arcangeli's avatar
Andrea Arcangeli committed
59
	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
60
	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
61
62
63
64
65
#ifdef CONFIG_DEBUG_VM
	TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
#endif
};

66
67
68
69
70
71
72
73
74
75
struct kobject;
struct kobj_attribute;

extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
				 struct kobj_attribute *attr,
				 const char *buf, size_t count,
				 enum transparent_hugepage_flag flag);
extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
				struct kobj_attribute *attr, char *buf,
				enum transparent_hugepage_flag flag);
76
77
extern struct kobj_attribute shmem_enabled_attr;

78
79
80
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)

81
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
82
83
84
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
#define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
85

86
87
88
89
#define HPAGE_PUD_SHIFT PUD_SHIFT
#define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
#define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))

90
91
extern bool is_vma_temporary_stack(struct vm_area_struct *vma);

92
93
extern unsigned long transparent_hugepage_flags;

94
95
96
97
98
/*
 * to be used on vmas which are known to support THP.
 * Use transparent_hugepage_enabled otherwise
 */
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
99
100
101
102
103
104
105
106
107
108
109
110
{
	if (vma->vm_flags & VM_NOHUGEPAGE)
		return false;

	if (is_vma_temporary_stack(vma))
		return false;

	if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
		return false;

	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
		return true;
111
112
113
114
115
116
	/*
	 * For dax vmas, try to always use hugepage mappings. If the kernel does
	 * not support hugepages, fsdax mappings will fallback to PAGE_SIZE
	 * mappings, and device-dax namespaces, that try to guarantee a given
	 * mapping size, will fail to enable
	 */
117
118
119
	if (vma_is_dax(vma))
		return true;

120
121
122
123
124
125
126
	if (transparent_hugepage_flags &
				(1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
		return !!(vma->vm_flags & VM_HUGEPAGE);

	return false;
}

127
128
bool transparent_hugepage_enabled(struct vm_area_struct *vma);

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
#define HPAGE_CACHE_INDEX_MASK (HPAGE_PMD_NR - 1)

static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
		unsigned long haddr)
{
	/* Don't have to check pgoff for anonymous vma */
	if (!vma_is_anonymous(vma)) {
		if (((vma->vm_start >> PAGE_SHIFT) & HPAGE_CACHE_INDEX_MASK) !=
			(vma->vm_pgoff & HPAGE_CACHE_INDEX_MASK))
			return false;
	}

	if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
		return false;
	return true;
}

146
147
148
#define transparent_hugepage_use_zero_page()				\
	(transparent_hugepage_flags &					\
	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
149
150
151
152
153
154
155
156
#ifdef CONFIG_DEBUG_VM
#define transparent_hugepage_debug_cow()				\
	(transparent_hugepage_flags &					\
	 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
#else /* CONFIG_DEBUG_VM */
#define transparent_hugepage_debug_cow() 0
#endif /* CONFIG_DEBUG_VM */

157
158
159
160
extern unsigned long thp_get_unmapped_area(struct file *filp,
		unsigned long addr, unsigned long len, unsigned long pgoff,
		unsigned long flags);

161
162
163
extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);

164
bool can_split_huge_page(struct page *page, int *pextra_pins);
165
166
167
168
169
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
	return split_huge_page_to_list(page, NULL);
}
170
void deferred_split_huge_page(struct page *page);
171
172

void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
173
		unsigned long address, bool freeze, struct page *page);
174
175
176
177

#define split_huge_pmd(__vma, __pmd, __address)				\
	do {								\
		pmd_t *____pmd = (__pmd);				\
178
		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
179
					|| pmd_devmap(*____pmd))	\
180
			__split_huge_pmd(__vma, __pmd, __address,	\
181
						false, NULL);		\
182
	}  while (0)
183

184

185
186
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
		bool freeze, struct page *page);
187

188
189
190
191
192
193
194
195
196
197
198
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
		unsigned long address);

#define split_huge_pud(__vma, __pud, __address)				\
	do {								\
		pud_t *____pud = (__pud);				\
		if (pud_trans_huge(*____pud)				\
					|| pud_devmap(*____pud))	\
			__split_huge_pud(__vma, __pud, __address);	\
	}  while (0)

199
200
extern int hugepage_madvise(struct vm_area_struct *vma,
			    unsigned long *vm_flags, int advice);
201
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
202
203
204
				    unsigned long start,
				    unsigned long end,
				    long adjust_next);
205
206
extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
		struct vm_area_struct *vma);
207
208
extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
		struct vm_area_struct *vma);
209
210
211
212
213
214

static inline int is_swap_pmd(pmd_t pmd)
{
	return !pmd_none(pmd) && !pmd_present(pmd);
}

215
/* mmap_sem must be held on entry */
216
217
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
		struct vm_area_struct *vma)
218
{
219
	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
220
	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
221
		return __pmd_trans_huge_lock(pmd, vma);
222
	else
223
		return NULL;
224
}
225
226
227
228
229
230
231
232
233
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
		struct vm_area_struct *vma)
{
	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
	if (pud_trans_huge(*pud) || pud_devmap(*pud))
		return __pud_trans_huge_lock(pud, vma);
	else
		return NULL;
}
234
235
236
237
238
239
static inline int hpage_nr_pages(struct page *page)
{
	if (unlikely(PageTransHuge(page)))
		return HPAGE_PMD_NR;
	return 1;
}
240

241
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
242
		pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
243
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
244
		pud_t *pud, int flags, struct dev_pagemap **pgmap);
245

246
extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
247

248
249
250
251
extern struct page *huge_zero_page;

static inline bool is_huge_zero_page(struct page *page)
{
252
	return READ_ONCE(huge_zero_page) == page;
253
254
}

255
256
257
258
259
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
	return is_huge_zero_page(pmd_page(pmd));
}

260
261
262
263
264
static inline bool is_huge_zero_pud(pud_t pud)
{
	return false;
}

265
266
struct page *mm_get_huge_zero_page(struct mm_struct *mm);
void mm_put_huge_zero_page(struct mm_struct *mm);
267

268
269
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))

270
271
272
273
274
static inline bool thp_migration_supported(void)
{
	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}

275
276
277
278
279
280
281
282
283
static inline struct list_head *page_deferred_list(struct page *page)
{
	/*
	 * Global or memcg deferred list in the second tail pages is
	 * occupied by compound_head.
	 */
	return &page[2].deferred_list;
}

284
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
285
286
287
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
288

289
290
291
292
#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })

293
294
#define hpage_nr_pages(x) 1

295
296
297
298
299
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{
	return false;
}

300
301
302
303
static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
{
	return false;
}
304

305
306
307
308
309
310
static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
		unsigned long haddr)
{
	return false;
}

311
312
static inline void prep_transhuge_page(struct page *page) {}

313
#define transparent_hugepage_flags 0UL
314
315
316

#define thp_get_unmapped_area	NULL

317
318
319
320
321
322
static inline bool
can_split_huge_page(struct page *page, int *pextra_pins)
{
	BUILD_BUG();
	return false;
}
323
324
325
326
327
static inline int
split_huge_page_to_list(struct page *page, struct list_head *list)
{
	return 0;
}
328
329
330
331
static inline int split_huge_page(struct page *page)
{
	return 0;
}
332
static inline void deferred_split_huge_page(struct page *page) {}
333
#define split_huge_pmd(__vma, __pmd, __address)	\
334
	do { } while (0)
335

336
337
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
		unsigned long address, bool freeze, struct page *page) {}
338
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
339
		unsigned long address, bool freeze, struct page *page) {}
340

341
342
343
#define split_huge_pud(__vma, __pmd, __address)	\
	do { } while (0)

344
345
static inline int hugepage_madvise(struct vm_area_struct *vma,
				   unsigned long *vm_flags, int advice)
Andrea Arcangeli's avatar
Andrea Arcangeli committed
346
347
348
349
{
	BUG();
	return 0;
}
350
351
352
353
354
355
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
					 unsigned long start,
					 unsigned long end,
					 long adjust_next)
{
}
356
357
358
359
static inline int is_swap_pmd(pmd_t pmd)
{
	return 0;
}
360
361
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
		struct vm_area_struct *vma)
362
{
363
	return NULL;
364
}
365
366
367
368
369
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
		struct vm_area_struct *vma)
{
	return NULL;
}
370

371
372
static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
		pmd_t orig_pmd)
373
{
374
	return 0;
375
376
}

377
378
379
380
381
static inline bool is_huge_zero_page(struct page *page)
{
	return false;
}

382
383
384
385
386
static inline bool is_huge_zero_pud(pud_t pud)
{
	return false;
}

387
static inline void mm_put_huge_zero_page(struct mm_struct *mm)
388
{
389
	return;
390
}
391
392

static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
393
	unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
394
395
396
{
	return NULL;
}
397
398

static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
399
	unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
400
401
402
{
	return NULL;
}
403
404
405
406
407

static inline bool thp_migration_supported(void)
{
	return false;
}
408
409
410
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#endif /* _LINUX_HUGE_MM_H */