rmap.h 4.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
#ifndef _LINUX_RMAP_H
#define _LINUX_RMAP_H
/*
 * Declarations for Reverse Mapping functions in mm/rmap.c
 */

#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
11
#include <linux/memcontrol.h>
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28

/*
 * The anon_vma heads a list of private "related" vmas, to scan if
 * an anonymous page pointing to this anon_vma needs to be unmapped:
 * the vmas on the list will be related by forking, or by splitting.
 *
 * Since vmas come and go as they are split and merged (particularly
 * in mprotect), the mapping field of an anonymous page cannot point
 * directly to a vma: instead it points to an anon_vma, on whose list
 * the related vmas can be easily linked or unlinked.
 *
 * After unlinking the last vma on the list, we must garbage collect
 * the anon_vma object itself: we're guaranteed no page can be
 * pointing to this anon_vma once its vma list is empty.
 */
struct anon_vma {
	spinlock_t lock;	/* Serialize access to vma list */
29 30 31 32 33 34 35 36
	/*
	 * NOTE: the LSB of the head.next is set by
	 * mm_take_all_locks() _after_ taking the above lock. So the
	 * head must only be read/written after taking the above lock
	 * to be sure to see a valid next pointer. The LSB bit itself
	 * is serialized by a system wide lock only visible to
	 * mm_take_all_locks() (mm_all_locks_mutex).
	 */
L
Linus Torvalds 已提交
37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
	struct list_head head;	/* List of private "related" vmas */
};

#ifdef CONFIG_MMU

static inline void anon_vma_lock(struct vm_area_struct *vma)
{
	struct anon_vma *anon_vma = vma->anon_vma;
	if (anon_vma)
		spin_lock(&anon_vma->lock);
}

static inline void anon_vma_unlock(struct vm_area_struct *vma)
{
	struct anon_vma *anon_vma = vma->anon_vma;
	if (anon_vma)
		spin_unlock(&anon_vma->lock);
}

/*
 * anon_vma helper functions.
 */
void anon_vma_init(void);	/* create anon_vma_cachep */
int  anon_vma_prepare(struct vm_area_struct *);
void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
void anon_vma_unlink(struct vm_area_struct *);
void anon_vma_link(struct vm_area_struct *);
void __anon_vma_link(struct vm_area_struct *);

/*
 * rmap interfaces called when adding or removing pte of page
 */
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
N
Nick Piggin 已提交
70
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
L
Linus Torvalds 已提交
71
void page_add_file_rmap(struct page *);
72
void page_remove_rmap(struct page *);
L
Linus Torvalds 已提交
73

H
Hugh Dickins 已提交
74
static inline void page_dup_rmap(struct page *page)
L
Linus Torvalds 已提交
75 76 77 78 79 80 81
{
	atomic_inc(&page->_mapcount);
}

/*
 * Called from mm/vmscan.c to handle paging out
 */
82 83
int page_referenced(struct page *, int is_locked,
			struct mem_cgroup *cnt, unsigned long *vm_flags);
84 85 86 87 88 89 90 91
enum ttu_flags {
	TTU_UNMAP = 0,			/* unmap mode */
	TTU_MIGRATION = 1,		/* migration mode */
	TTU_MUNLOCK = 2,		/* munlock mode */
	TTU_ACTION_MASK = 0xff,

	TTU_IGNORE_MLOCK = (1 << 8),	/* ignore mlock */
	TTU_IGNORE_ACCESS = (1 << 9),	/* don't age */
92
	TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
93 94 95 96
};
#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)

int try_to_unmap(struct page *, enum ttu_flags flags);
L
Linus Torvalds 已提交
97

98 99 100
/*
 * Called from mm/filemap_xip.c to unmap empty zero page
 */
H
Hugh Dickins 已提交
101
pte_t *page_check_address(struct page *, struct mm_struct *,
N
Nick Piggin 已提交
102
				unsigned long, spinlock_t **, int);
103

L
Linus Torvalds 已提交
104 105 106 107 108
/*
 * Used by swapoff to help locate where page is expected in vma.
 */
unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);

109 110 111 112 113 114 115 116
/*
 * Cleans the PTEs of shared mappings.
 * (and since clean PTEs should also be readonly, write protects them too)
 *
 * returns the number of cleaned PTEs.
 */
int page_mkclean(struct page *);

N
Nick Piggin 已提交
117 118 119 120 121 122
/*
 * called in munlock()/munmap() path to check for other vmas holding
 * the page mlocked.
 */
int try_to_munlock(struct page *);

123 124 125 126 127
/*
 * Called by memory-failure.c to kill processes.
 */
struct anon_vma *page_lock_anon_vma(struct page *page);
void page_unlock_anon_vma(struct anon_vma *anon_vma);
128
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
129

L
Linus Torvalds 已提交
130 131 132 133 134 135
#else	/* !CONFIG_MMU */

#define anon_vma_init()		do {} while (0)
#define anon_vma_prepare(vma)	(0)
#define anon_vma_link(vma)	do {} while (0)

136 137 138 139 140 141 142 143
static inline int page_referenced(struct page *page, int is_locked,
				  struct mem_cgroup *cnt,
				  unsigned long *vm_flags)
{
	*vm_flags = 0;
	return TestClearPageReferenced(page);
}

144
#define try_to_unmap(page, refs) SWAP_FAIL
L
Linus Torvalds 已提交
145

146 147 148 149 150 151
static inline int page_mkclean(struct page *page)
{
	return 0;
}


L
Linus Torvalds 已提交
152 153 154 155 156 157 158 159
#endif	/* CONFIG_MMU */

/*
 * Return values of try_to_unmap
 */
#define SWAP_SUCCESS	0
#define SWAP_AGAIN	1
#define SWAP_FAIL	2
N
Nick Piggin 已提交
160
#define SWAP_MLOCK	3
L
Linus Torvalds 已提交
161 162

#endif	/* _LINUX_RMAP_H */