internal.h 7.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/* internal.h: mm/ internal definitions
 *
 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
11 12 13 14
#ifndef __MM_INTERNAL_H
#define __MM_INTERNAL_H

#include <linux/mm.h>
L
Linus Torvalds 已提交
15

16 17 18
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
		unsigned long floor, unsigned long ceiling);

19
static inline void set_page_count(struct page *page, int v)
N
Nick Piggin 已提交
20
{
21 22 23 24 25 26 27 28 29
	atomic_set(&page->_count, v);
}

/*
 * Turn a non-refcounted page (->_count == 0) into refcounted with
 * a count of one.
 */
static inline void set_page_refcounted(struct page *page)
{
Q
Qi Yong 已提交
30
	VM_BUG_ON(PageTail(page));
N
Nick Piggin 已提交
31
	VM_BUG_ON(atomic_read(&page->_count));
N
Nick Piggin 已提交
32 33 34
	set_page_count(page, 1);
}

35 36 37 38 39
static inline void __put_page(struct page *page)
{
	atomic_dec(&page->_count);
}

H
Hugh Dickins 已提交
40 41
extern unsigned long highest_memmap_pfn;

L
Lee Schermerhorn 已提交
42 43 44
/*
 * in mm/vmscan.c:
 */
45
extern int isolate_lru_page(struct page *page);
L
Lee Schermerhorn 已提交
46
extern void putback_lru_page(struct page *page);
47

L
Lee Schermerhorn 已提交
48 49 50
/*
 * in mm/page_alloc.c
 */
51
extern void __free_pages_bootmem(struct page *page, unsigned int order);
52
extern void prep_compound_page(struct page *page, unsigned long order);
53 54 55
#ifdef CONFIG_MEMORY_FAILURE
extern bool is_free_buddy_page(struct page *page);
#endif
56

57

58 59 60 61 62 63 64
/*
 * function for dealing with page's order in buddy system.
 * zone->lock is already acquired when we use these.
 * So, we don't need atomic page->flags operations here.
 */
static inline unsigned long page_order(struct page *page)
{
65
	/* PageBuddy() must be checked by the caller */
66 67
	return page_private(page);
}
68

H
Hugh Dickins 已提交
69 70 71 72 73 74 75 76 77 78
#ifdef CONFIG_MMU
extern long mlock_vma_pages_range(struct vm_area_struct *vma,
			unsigned long start, unsigned long end);
extern void munlock_vma_pages_range(struct vm_area_struct *vma,
			unsigned long start, unsigned long end);
static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
{
	munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
}

N
Nick Piggin 已提交
79 80 81 82 83 84 85 86 87 88 89 90
/*
 * Called only in fault path via page_evictable() for a new page
 * to determine if it's being mapped into a LOCKED vma.
 * If so, mark page as mlocked.
 */
static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
{
	VM_BUG_ON(PageLRU(page));

	if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
		return 0;

N
Nick Piggin 已提交
91 92 93 94
	if (!TestSetPageMlocked(page)) {
		inc_zone_page_state(page, NR_MLOCK);
		count_vm_event(UNEVICTABLE_PGMLOCKED);
	}
N
Nick Piggin 已提交
95 96 97 98
	return 1;
}

/*
H
Hugh Dickins 已提交
99
 * must be called with vma's mmap_sem held for read or write, and page locked.
N
Nick Piggin 已提交
100 101
 */
extern void mlock_vma_page(struct page *page);
H
Hugh Dickins 已提交
102
extern void munlock_vma_page(struct page *page);
N
Nick Piggin 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121

/*
 * Clear the page's PageMlocked().  This can be useful in a situation where
 * we want to unconditionally remove a page from the pagecache -- e.g.,
 * on truncation or freeing.
 *
 * It is legal to call this function for any page, mlocked or not.
 * If called for a page that is still mapped by mlocked vmas, all we do
 * is revert to lazy LRU behaviour -- semantics are not broken.
 */
extern void __clear_page_mlock(struct page *page);
static inline void clear_page_mlock(struct page *page)
{
	if (unlikely(TestClearPageMlocked(page)))
		__clear_page_mlock(page);
}

/*
 * mlock_migrate_page - called only from migrate_page_copy() to
N
Nick Piggin 已提交
122
 * migrate the Mlocked page flag; update statistics.
N
Nick Piggin 已提交
123 124 125
 */
static inline void mlock_migrate_page(struct page *newpage, struct page *page)
{
N
Nick Piggin 已提交
126 127 128 129 130
	if (TestClearPageMlocked(page)) {
		unsigned long flags;

		local_irq_save(flags);
		__dec_zone_page_state(page, NR_MLOCK);
N
Nick Piggin 已提交
131
		SetPageMlocked(newpage);
N
Nick Piggin 已提交
132 133 134
		__inc_zone_page_state(newpage, NR_MLOCK);
		local_irq_restore(flags);
	}
N
Nick Piggin 已提交
135 136
}

137 138 139 140
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern unsigned long vma_address(struct page *page,
				 struct vm_area_struct *vma);
#endif
H
Hugh Dickins 已提交
141
#else /* !CONFIG_MMU */
N
Nick Piggin 已提交
142 143 144 145 146 147 148 149
static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
{
	return 0;
}
static inline void clear_page_mlock(struct page *page) { }
static inline void mlock_vma_page(struct page *page) { }
static inline void mlock_migrate_page(struct page *new, struct page *old) { }

H
Hugh Dickins 已提交
150
#endif /* !CONFIG_MMU */
L
Lee Schermerhorn 已提交
151

152 153 154 155 156 157 158 159 160 161 162 163 164
/*
 * Return the mem_map entry representing the 'offset' subpage within
 * the maximally aligned gigantic page 'base'.  Handle any discontiguity
 * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
 */
static inline struct page *mem_map_offset(struct page *base, int offset)
{
	if (unlikely(offset >= MAX_ORDER_NR_PAGES))
		return pfn_to_page(page_to_pfn(base) + offset);
	return base + offset;
}

/*
L
Lucas De Marchi 已提交
165
 * Iterator over all subpages within the maximally aligned gigantic
166 167 168 169 170 171 172 173 174 175 176 177 178 179
 * page 'base'.  Handle any discontiguity in the mem_map.
 */
static inline struct page *mem_map_next(struct page *iter,
						struct page *base, int offset)
{
	if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
		unsigned long pfn = page_to_pfn(base) + offset;
		if (!pfn_valid(pfn))
			return NULL;
		return pfn_to_page(pfn);
	}
	return iter + 1;
}

180 181 182 183 184 185 186 187 188 189 190 191
/*
 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
 * so all functions starting at paging_init should be marked __init
 * in those cases. SPARSEMEM, however, allows for memory hotplug,
 * and alloc_bootmem_node is not used.
 */
#ifdef CONFIG_SPARSEMEM
#define __paginginit __meminit
#else
#define __paginginit __init
#endif

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
/* Memory initialisation debug and verification */
enum mminit_level {
	MMINIT_WARNING,
	MMINIT_VERIFY,
	MMINIT_TRACE
};

#ifdef CONFIG_DEBUG_MEMORY_INIT

extern int mminit_loglevel;

#define mminit_dprintk(level, prefix, fmt, arg...) \
do { \
	if (level < mminit_loglevel) { \
		printk(level <= MMINIT_WARNING ? KERN_WARNING : KERN_DEBUG); \
		printk(KERN_CONT "mminit::" prefix " " fmt, ##arg); \
	} \
} while (0)

211 212 213
extern void mminit_verify_pageflags_layout(void);
extern void mminit_verify_page_links(struct page *page,
		enum zone_type zone, unsigned long nid, unsigned long pfn);
214
extern void mminit_verify_zonelist(void);
215

216 217 218 219 220 221 222
#else

static inline void mminit_dprintk(enum mminit_level level,
				const char *prefix, const char *fmt, ...)
{
}

223 224 225 226 227 228 229 230
static inline void mminit_verify_pageflags_layout(void)
{
}

static inline void mminit_verify_page_links(struct page *page,
		enum zone_type zone, unsigned long nid, unsigned long pfn)
{
}
231 232 233 234

static inline void mminit_verify_zonelist(void)
{
}
235
#endif /* CONFIG_DEBUG_MEMORY_INIT */
236 237 238 239 240 241 242 243 244 245 246 247

/* mminit_validate_memmodel_limits is independent of CONFIG_DEBUG_MEMORY_INIT */
#if defined(CONFIG_SPARSEMEM)
extern void mminit_validate_memmodel_limits(unsigned long *start_pfn,
				unsigned long *end_pfn);
#else
static inline void mminit_validate_memmodel_limits(unsigned long *start_pfn,
				unsigned long *end_pfn)
{
}
#endif /* CONFIG_SPARSEMEM */

248 249 250 251
#define ZONE_RECLAIM_NOSCAN	-2
#define ZONE_RECLAIM_FULL	-1
#define ZONE_RECLAIM_SOME	0
#define ZONE_RECLAIM_SUCCESS	1
252
#endif
W
Wu Fengguang 已提交
253

254 255
extern int hwpoison_filter(struct page *p);

W
Wu Fengguang 已提交
256 257
extern u32 hwpoison_filter_dev_major;
extern u32 hwpoison_filter_dev_minor;
W
Wu Fengguang 已提交
258 259
extern u64 hwpoison_filter_flags_mask;
extern u64 hwpoison_filter_flags_value;
A
Andi Kleen 已提交
260
extern u64 hwpoison_filter_memcg;
261
extern u32 hwpoison_filter_enable;