huge_mm.h 6.5 KB
Newer Older
1 2 3
#ifndef _LINUX_HUGE_MM_H
#define _LINUX_HUGE_MM_H

K
Kirill A. Shutemov 已提交
4
extern int do_huge_pmd_anonymous_page(struct fault_env *fe);
5 6 7
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
			 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
			 struct vm_area_struct *vma);
K
Kirill A. Shutemov 已提交
8 9
extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd);
extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd);
10
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
11 12 13
					  unsigned long addr,
					  pmd_t *pmd,
					  unsigned int flags);
14 15 16
extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
			struct vm_area_struct *vma,
			pmd_t *pmd, unsigned long addr, unsigned long next);
17 18
extern int zap_huge_pmd(struct mmu_gather *tlb,
			struct vm_area_struct *vma,
S
Shaohua Li 已提交
19
			pmd_t *pmd, unsigned long addr);
20 21 22
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
			unsigned long addr, unsigned long end,
			unsigned char *vec);
23
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
24 25
			 unsigned long new_addr, unsigned long old_end,
			 pmd_t *old_pmd, pmd_t *new_pmd);
26
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
27 28
			unsigned long addr, pgprot_t newprot,
			int prot_numa);
M
Matthew Wilcox 已提交
29
int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
30
			pfn_t pfn, bool write);
31 32 33
enum transparent_hugepage_flag {
	TRANSPARENT_HUGEPAGE_FLAG,
	TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
34 35
	TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
	TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
36
	TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
A
Andrea Arcangeli 已提交
37
	TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
38
	TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
39 40 41 42 43
#ifdef CONFIG_DEBUG_VM
	TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
#endif
};

44 45
extern struct kobj_attribute shmem_enabled_attr;

46 47 48
#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)

49
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
50 51 52
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
		pmd_t *pmd, int flags);

53 54 55
#define HPAGE_PMD_SHIFT PMD_SHIFT
#define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
#define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
56

57 58
extern bool is_vma_temporary_stack(struct vm_area_struct *vma);

59
#define transparent_hugepage_enabled(__vma)				\
A
Andrea Arcangeli 已提交
60 61 62 63 64
	((transparent_hugepage_flags &					\
	  (1<<TRANSPARENT_HUGEPAGE_FLAG) ||				\
	  (transparent_hugepage_flags &					\
	   (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) &&			\
	   ((__vma)->vm_flags & VM_HUGEPAGE))) &&			\
65 66
	 !((__vma)->vm_flags & VM_NOHUGEPAGE) &&			\
	 !is_vma_temporary_stack(__vma))
67 68 69
#define transparent_hugepage_use_zero_page()				\
	(transparent_hugepage_flags &					\
	 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
70 71 72 73 74 75 76 77 78
#ifdef CONFIG_DEBUG_VM
#define transparent_hugepage_debug_cow()				\
	(transparent_hugepage_flags &					\
	 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
#else /* CONFIG_DEBUG_VM */
#define transparent_hugepage_debug_cow() 0
#endif /* CONFIG_DEBUG_VM */

extern unsigned long transparent_hugepage_flags;
79

80 81 82
extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);

83 84 85 86 87
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
	return split_huge_page_to_list(page, NULL);
}
88
void deferred_split_huge_page(struct page *page);
89 90

void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
91
		unsigned long address, bool freeze, struct page *page);
92 93 94 95

#define split_huge_pmd(__vma, __pmd, __address)				\
	do {								\
		pmd_t *____pmd = (__pmd);				\
96 97
		if (pmd_trans_huge(*____pmd)				\
					|| pmd_devmap(*____pmd))	\
98
			__split_huge_pmd(__vma, __pmd, __address,	\
99
						false, NULL);		\
100
	}  while (0)
101

102

103 104
void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
		bool freeze, struct page *page);
105

106 107
extern int hugepage_madvise(struct vm_area_struct *vma,
			    unsigned long *vm_flags, int advice);
108
extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
109 110 111
				    unsigned long start,
				    unsigned long end,
				    long adjust_next);
112 113
extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
		struct vm_area_struct *vma);
114
/* mmap_sem must be held on entry */
115 116
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
		struct vm_area_struct *vma)
117
{
118
	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
119
	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
120
		return __pmd_trans_huge_lock(pmd, vma);
121
	else
122
		return NULL;
123
}
124 125 126 127 128 129
static inline int hpage_nr_pages(struct page *page)
{
	if (unlikely(PageTransHuge(page)))
		return HPAGE_PMD_NR;
	return 1;
}
130

K
Kirill A. Shutemov 已提交
131
extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd);
132

133 134 135 136 137 138 139
extern struct page *huge_zero_page;

static inline bool is_huge_zero_page(struct page *page)
{
	return ACCESS_ONCE(huge_zero_page) == page;
}

140 141 142 143 144 145
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
	return is_huge_zero_page(pmd_page(pmd));
}

struct page *get_huge_zero_page(void);
146
void put_huge_zero_page(void);
147

K
Kirill A. Shutemov 已提交
148 149
#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))

150
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
151 152 153
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
154

155 156
#define hpage_nr_pages(x) 1

157 158
#define transparent_hugepage_enabled(__vma) 0

159 160
static inline void prep_transhuge_page(struct page *page) {}

161
#define transparent_hugepage_flags 0UL
162 163 164 165 166
static inline int
split_huge_page_to_list(struct page *page, struct list_head *list)
{
	return 0;
}
167 168 169 170
static inline int split_huge_page(struct page *page)
{
	return 0;
}
171
static inline void deferred_split_huge_page(struct page *page) {}
172
#define split_huge_pmd(__vma, __pmd, __address)	\
173
	do { } while (0)
174 175

static inline void split_huge_pmd_address(struct vm_area_struct *vma,
176
		unsigned long address, bool freeze, struct page *page) {}
177

178 179
static inline int hugepage_madvise(struct vm_area_struct *vma,
				   unsigned long *vm_flags, int advice)
A
Andrea Arcangeli 已提交
180 181 182 183
{
	BUG();
	return 0;
}
184 185 186 187 188 189
static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
					 unsigned long start,
					 unsigned long end,
					 long adjust_next)
{
}
190 191
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
		struct vm_area_struct *vma)
192
{
193
	return NULL;
194
}
195

K
Kirill A. Shutemov 已提交
196
static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd)
197
{
198
	return 0;
199 200
}

201 202 203 204 205
static inline bool is_huge_zero_page(struct page *page)
{
	return false;
}

206 207 208 209
static inline void put_huge_zero_page(void)
{
	BUILD_BUG();
}
210 211 212 213 214 215

static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
		unsigned long addr, pmd_t *pmd, int flags)
{
	return NULL;
}
216 217 218
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

#endif /* _LINUX_HUGE_MM_H */