migrate.h 6.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
C
Christoph Lameter 已提交
2 3 4 5
#ifndef _LINUX_MIGRATE_H
#define _LINUX_MIGRATE_H

#include <linux/mm.h>
6
#include <linux/mempolicy.h>
7
#include <linux/migrate_mode.h>
8
#include <linux/hugetlb.h>
C
Christoph Lameter 已提交
9

10
typedef struct page *new_page_t(struct page *page, unsigned long private);
11
typedef void free_page_t(struct page *page, unsigned long private);
12

13 14 15 16 17 18
/*
 * Return values from addresss_space_operations.migratepage():
 * - negative errno on page migration failure;
 * - zero on page migration success;
 */
#define MIGRATEPAGE_SUCCESS		0
19

20 21 22 23 24 25
enum migrate_reason {
	MR_COMPACTION,
	MR_MEMORY_FAILURE,
	MR_MEMORY_HOTPLUG,
	MR_SYSCALL,		/* also applies to cpusets */
	MR_MEMPOLICY_MBIND,
26
	MR_NUMA_MISPLACED,
27
	MR_CONTIG_RANGE,
28
	MR_TYPES
29
};
30

31
/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
32
extern const char *migrate_reason_names[MR_TYPES];
33

34 35 36
static inline struct page *new_page_nodemask(struct page *page,
				int preferred_nid, nodemask_t *nodemask)
{
37
	gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
38 39
	unsigned int order = 0;
	struct page *new_page = NULL;
40 41 42

	if (PageHuge(page))
		return alloc_huge_page_nodemask(page_hstate(compound_head(page)),
43
				preferred_nid, nodemask);
44

M
Michal Hocko 已提交
45
	if (PageTransHuge(page)) {
46
		gfp_mask |= GFP_TRANSHUGE;
M
Michal Hocko 已提交
47
		order = HPAGE_PMD_ORDER;
48 49
	}

50 51 52
	if (PageHighMem(page) || (zone_idx(page_zone(page)) == ZONE_MOVABLE))
		gfp_mask |= __GFP_HIGHMEM;

53 54 55
	new_page = __alloc_pages_nodemask(gfp_mask, order,
				preferred_nid, nodemask);

56
	if (new_page && PageTransHuge(new_page))
57 58 59
		prep_transhuge_page(new_page);

	return new_page;
60 61
}

62
#ifdef CONFIG_MIGRATION
63

64
extern void putback_movable_pages(struct list_head *l);
65 66 67
extern int migrate_page(struct address_space *mapping,
			struct page *newpage, struct page *page,
			enum migrate_mode mode);
68
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
69
		unsigned long private, enum migrate_mode mode, int reason);
70
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
71
extern void putback_movable_page(struct page *page);
72

C
Christoph Lameter 已提交
73
extern int migrate_prep(void);
74
extern int migrate_prep_local(void);
75
extern void migrate_page_states(struct page *newpage, struct page *page);
N
Naoya Horiguchi 已提交
76 77 78
extern void migrate_page_copy(struct page *newpage, struct page *page);
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
				  struct page *newpage, struct page *page);
79
extern int migrate_page_move_mapping(struct address_space *mapping,
80
		struct page *newpage, struct page *page, int extra_count);
C
Christoph Lameter 已提交
81
#else
82

83
static inline void putback_movable_pages(struct list_head *l) {}
84 85 86
static inline int migrate_pages(struct list_head *l, new_page_t new,
		free_page_t free, unsigned long private, enum migrate_mode mode,
		int reason)
87
	{ return -ENOSYS; }
88 89
static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
	{ return -EBUSY; }
90

C
Christoph Lameter 已提交
91
static inline int migrate_prep(void) { return -ENOSYS; }
92
static inline int migrate_prep_local(void) { return -ENOSYS; }
C
Christoph Lameter 已提交
93

94 95 96 97
static inline void migrate_page_states(struct page *newpage, struct page *page)
{
}

N
Naoya Horiguchi 已提交
98 99 100
static inline void migrate_page_copy(struct page *newpage,
				     struct page *page) {}

101
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
N
Naoya Horiguchi 已提交
102 103 104 105 106
				  struct page *newpage, struct page *page)
{
	return -ENOSYS;
}

C
Christoph Lameter 已提交
107
#endif /* CONFIG_MIGRATION */
108

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
#ifdef CONFIG_COMPACTION
extern int PageMovable(struct page *page);
extern void __SetPageMovable(struct page *page, struct address_space *mapping);
extern void __ClearPageMovable(struct page *page);
#else
static inline int PageMovable(struct page *page) { return 0; };
static inline void __SetPageMovable(struct page *page,
				struct address_space *mapping)
{
}
static inline void __ClearPageMovable(struct page *page)
{
}
#endif

124
#ifdef CONFIG_NUMA_BALANCING
125
extern bool pmd_trans_migrating(pmd_t pmd);
126 127
extern int migrate_misplaced_page(struct page *page,
				  struct vm_area_struct *vma, int node);
128
#else
129 130 131 132
static inline bool pmd_trans_migrating(pmd_t pmd)
{
	return false;
}
133 134
static inline int migrate_misplaced_page(struct page *page,
					 struct vm_area_struct *vma, int node)
135 136 137
{
	return -EAGAIN; /* can't migrate now */
}
138
#endif /* CONFIG_NUMA_BALANCING */
139

140 141 142 143 144 145 146
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
			struct vm_area_struct *vma,
			pmd_t *pmd, pmd_t entry,
			unsigned long address,
			struct page *page, int node);
#else
147 148 149 150 151 152 153 154
static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
			struct vm_area_struct *vma,
			pmd_t *pmd, pmd_t entry,
			unsigned long address,
			struct page *page, int node)
{
	return -EAGAIN;
}
155
#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
156

157 158 159

#ifdef CONFIG_MIGRATION

160 161 162 163 164
/*
 * Watch out for PAE architecture, which has an unsigned long, and might not
 * have enough bits to store all physical address and flags. So far we have
 * enough room for all our flags.
 */
165 166 167 168
#define MIGRATE_PFN_VALID	(1UL << 0)
#define MIGRATE_PFN_MIGRATE	(1UL << 1)
#define MIGRATE_PFN_LOCKED	(1UL << 2)
#define MIGRATE_PFN_WRITE	(1UL << 3)
169
#define MIGRATE_PFN_SHIFT	6
170 171 172 173 174 175 176 177 178 179 180 181 182

static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
{
	if (!(mpfn & MIGRATE_PFN_VALID))
		return NULL;
	return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
}

static inline unsigned long migrate_pfn(unsigned long pfn)
{
	return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
}

183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
struct migrate_vma {
	struct vm_area_struct	*vma;
	/*
	 * Both src and dst array must be big enough for
	 * (end - start) >> PAGE_SHIFT entries.
	 *
	 * The src array must not be modified by the caller after
	 * migrate_vma_setup(), and must not change the dst array after
	 * migrate_vma_pages() returns.
	 */
	unsigned long		*dst;
	unsigned long		*src;
	unsigned long		cpages;
	unsigned long		npages;
	unsigned long		start;
	unsigned long		end;
199 200 201 202 203 204 205 206

	/*
	 * Set to the owner value also stored in page->pgmap->owner for
	 * migrating out of device private memory.  If set only device
	 * private pages with this owner are migrated.  If not set
	 * device private pages are not migrated at all.
	 */
	void			*src_owner;
207 208
};

209 210 211
int migrate_vma_setup(struct migrate_vma *args);
void migrate_vma_pages(struct migrate_vma *migrate);
void migrate_vma_finalize(struct migrate_vma *migrate);
212 213 214

#endif /* CONFIG_MIGRATION */

C
Christoph Lameter 已提交
215
#endif /* _LINUX_MIGRATE_H */