huge_mm.h 10.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H

5 6
#include <linux/sched/coredump.h>

7 8
#include <linux/fs.h> /* only for vma_is_dax() */

9
extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
10 11 12
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
			 struct vm_area_struct *vma);
J
Jan Kara 已提交
13
extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
14 15 16 17 18 19 20 21 22 23 24 25
extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
			 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
			 struct vm_area_struct *vma);

#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
#else
static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
{
}
#endif

26
extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
27
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
28 29 30
					  unsigned long addr,
					  pmd_t *pmd,
					  unsigned int flags);
31
extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
32 33
			struct vm_area_struct *vma,
			pmd_t *pmd, unsigned long addr, unsigned long next);
34 35
extern int zap_huge_pmd(struct mmu_gather *tlb,
			struct vm_area_struct *vma,
S
Shaohua Li 已提交
36
			pmd_t *pmd, unsigned long addr);
37 38 39
extern int zap_huge_pud(struct mmu_gather *tlb,
			struct vm_area_struct *vma,
			pud_t *pud, unsigned long addr);
40 41 42
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, unsigned long end,
			unsigned char *vec);
43
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
44
			 unsigned long new_addr, unsigned long old_end,
45
			 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
46
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
47 48
			unsigned long addr, pgprot_t newprot,
			int prot_numa);
49 50 51 52
int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
			pmd_t *pmd, pfn_t pfn, bool write);
int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
			pud_t *pud, pfn_t pfn, bool write);
53 54 55
enum transparent_hugepage_flag {
	TRANSPARENT_HUGEPAGE_FLAG,
	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
56 57
	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
58
	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
59
	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
A
Andrea Arcangeli 已提交
60
	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
61
	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
62 63 64 65 66
#ifdef CONFIG_DEBUG_VM
	TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
#endif
};

67 68 69 70 71 72 73 74 75 76
struct kobject;
struct kobj_attribute;

extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
				 struct kobj_attribute *attr,
				 const char *buf, size_t count,
				 enum transparent_hugepage_flag flag);
extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
				struct kobj_attribute *attr, char *buf,
				enum transparent_hugepage_flag flag);
77 78
extern struct kobj_attribute shmem_enabled_attr;

79 80 81
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)

82
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
83 84 85
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
#define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
86

87 88 89 90
#define HPAGE_PUD_SHIFT PUD_SHIFT
#define HPAGE_PUD_SIZE	((1UL) << HPAGE_PUD_SHIFT)
#define HPAGE_PUD_MASK	(~(HPAGE_PUD_SIZE - 1))

91 92
extern bool is_vma_temporary_stack(struct vm_area_struct *vma);

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
extern unsigned long transparent_hugepage_flags;

static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
{
	if (vma->vm_flags & VM_NOHUGEPAGE)
		return false;

	if (is_vma_temporary_stack(vma))
		return false;

	if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
		return false;

	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
		return true;

109 110 111
	if (vma_is_dax(vma))
		return true;

112 113 114 115 116 117 118
	if (transparent_hugepage_flags &
				(1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
		return !!(vma->vm_flags & VM_HUGEPAGE);

	return false;
}

119 120 121
#define transparent_hugepage_use_zero_page()				\
	(transparent_hugepage_flags &					\
	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
122 123 124 125 126 127 128 129
#ifdef CONFIG_DEBUG_VM
#define transparent_hugepage_debug_cow()				\
	(transparent_hugepage_flags &					\
	 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
#else /* CONFIG_DEBUG_VM */
#define transparent_hugepage_debug_cow() 0
#endif /* CONFIG_DEBUG_VM */

130 131 132 133
extern unsigned long thp_get_unmapped_area(struct file *filp,
		unsigned long addr, unsigned long len, unsigned long pgoff,
		unsigned long flags);

134 135 136
extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);

137
bool can_split_huge_page(struct page *page, int *pextra_pins);
138 139 140 141 142
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
	return split_huge_page_to_list(page, NULL);
}
143
void deferred_split_huge_page(struct page *page);
144 145

void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
146
		unsigned long address, bool freeze, struct page *page);
147 148 149 150

#define split_huge_pmd(__vma, __pmd, __address)				\
	do {								\
		pmd_t *____pmd = (__pmd);				\
151
		if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)	\
152
					|| pmd_devmap(*____pmd))	\
153
			__split_huge_pmd(__vma, __pmd, __address,	\
154
						false, NULL);		\
155
	}  while (0)
156

157

158 159
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
		bool freeze, struct page *page);
160

161 162 163 164 165 166 167 168 169 170 171
void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
		unsigned long address);

#define split_huge_pud(__vma, __pud, __address)				\
	do {								\
		pud_t *____pud = (__pud);				\
		if (pud_trans_huge(*____pud)				\
					|| pud_devmap(*____pud))	\
			__split_huge_pud(__vma, __pud, __address);	\
	}  while (0)

172 173
extern int hugepage_madvise(struct vm_area_struct *vma,
			    unsigned long *vm_flags, int advice);
174
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
175 176 177
				    unsigned long start,
				    unsigned long end,
				    long adjust_next);
178 179
extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
		struct vm_area_struct *vma);
180 181
extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
		struct vm_area_struct *vma);
182 183 184 185 186 187

static inline int is_swap_pmd(pmd_t pmd)
{
	return !pmd_none(pmd) && !pmd_present(pmd);
}

188
/* mmap_sem must be held on entry */
189 190
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
		struct vm_area_struct *vma)
191
{
192
	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
193
	if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
194
		return __pmd_trans_huge_lock(pmd, vma);
195
	else
196
		return NULL;
197
}
198 199 200 201 202 203 204 205 206
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
		struct vm_area_struct *vma)
{
	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
	if (pud_trans_huge(*pud) || pud_devmap(*pud))
		return __pud_trans_huge_lock(pud, vma);
	else
		return NULL;
}
207 208 209 210 211 212
static inline int hpage_nr_pages(struct page *page)
{
	if (unlikely(PageTransHuge(page)))
		return HPAGE_PMD_NR;
	return 1;
}
213

214 215 216 217 218
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
		pmd_t *pmd, int flags);
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
		pud_t *pud, int flags);

219
extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
220

221 222 223 224
extern struct page *huge_zero_page;

static inline bool is_huge_zero_page(struct page *page)
{
225
	return READ_ONCE(huge_zero_page) == page;
226 227
}

228 229 230 231 232
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
	return is_huge_zero_page(pmd_page(pmd));
}

233 234 235 236 237
static inline bool is_huge_zero_pud(pud_t pud)
{
	return false;
}

238 239
struct page *mm_get_huge_zero_page(struct mm_struct *mm);
void mm_put_huge_zero_page(struct mm_struct *mm);
240

K
Kirill A. Shutemov 已提交
241 242
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))

243 244 245 246 247
static inline bool thp_migration_supported(void)
{
	return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
}

248
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
249 250 251
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
252

253 254 255 256
#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })

257 258
#define hpage_nr_pages(x) 1

259 260 261 262
static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
{
	return false;
}
263

264 265
static inline void prep_transhuge_page(struct page *page) {}

266
#define transparent_hugepage_flags 0UL
267 268 269

#define thp_get_unmapped_area	NULL

270 271 272 273 274 275
static inline bool
can_split_huge_page(struct page *page, int *pextra_pins)
{
	BUILD_BUG();
	return false;
}
276 277 278 279 280
static inline int
split_huge_page_to_list(struct page *page, struct list_head *list)
{
	return 0;
}
281 282 283 284
static inline int split_huge_page(struct page *page)
{
	return 0;
}
285
static inline void deferred_split_huge_page(struct page *page) {}
286
#define split_huge_pmd(__vma, __pmd, __address)	\
287
	do { } while (0)
288

289 290
static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
		unsigned long address, bool freeze, struct page *page) {}
291
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
292
		unsigned long address, bool freeze, struct page *page) {}
293

294 295 296
#define split_huge_pud(__vma, __pmd, __address)	\
	do { } while (0)

297 298
static inline int hugepage_madvise(struct vm_area_struct *vma,
				   unsigned long *vm_flags, int advice)
A
Andrea Arcangeli 已提交
299 300 301 302
{
	BUG();
	return 0;
}
303 304 305 306 307 308
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
					 unsigned long start,
					 unsigned long end,
					 long adjust_next)
{
}
309 310 311 312
static inline int is_swap_pmd(pmd_t pmd)
{
	return 0;
}
313 314
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
		struct vm_area_struct *vma)
315
{
316
	return NULL;
317
}
318 319 320 321 322
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
		struct vm_area_struct *vma)
{
	return NULL;
}
323

324 325
static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
		pmd_t orig_pmd)
326
{
327
	return 0;
328 329
}

330 331 332 333 334
static inline bool is_huge_zero_page(struct page *page)
{
	return false;
}

335 336 337 338 339
static inline bool is_huge_zero_pud(pud_t pud)
{
	return false;
}

340
static inline void mm_put_huge_zero_page(struct mm_struct *mm)
341
{
342
	return;
343
}
344 345 346 347 348 349

static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
		unsigned long addr, pmd_t *pmd, int flags)
{
	return NULL;
}
350 351 352 353 354 355

static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
		unsigned long addr, pud_t *pud, int flags)
{
	return NULL;
}
356 357 358 359 360

static inline bool thp_migration_supported(void)
{
	return false;
}
361 362 363
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#endif /* _LINUX_HUGE_MM_H */