highmem.h 4.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
#ifndef _LINUX_HIGHMEM_H
#define _LINUX_HIGHMEM_H

#include <linux/fs.h>
#include <linux/mm.h>
6
#include <linux/uaccess.h>
L
Linus Torvalds 已提交
7 8 9

#include <asm/cacheflush.h>

10
#ifndef ARCH_HAS_FLUSH_ANON_PAGE
11
static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
12 13 14 15
{
}
#endif

16 17 18 19 20 21
#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
{
}
#endif

L
Linus Torvalds 已提交
22 23 24 25 26 27
#ifdef CONFIG_HIGHMEM

#include <asm/highmem.h>

/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
28
extern unsigned long totalhigh_pages;
L
Linus Torvalds 已提交
29

30 31
void kmap_flush_unused(void);

L
Linus Torvalds 已提交
32 33 34 35
#else /* CONFIG_HIGHMEM */

static inline unsigned int nr_free_highpages(void) { return 0; }

36 37
#define totalhigh_pages 0

38
#ifndef ARCH_HAS_KMAP
L
Linus Torvalds 已提交
39 40 41 42 43 44 45 46
static inline void *kmap(struct page *page)
{
	might_sleep();
	return page_address(page);
}

#define kunmap(page) do { (void) (page); } while (0)

47 48 49 50 51 52 53
#include <asm/kmap_types.h>

static inline void *kmap_atomic(struct page *page, enum km_type idx)
{
	pagefault_disable();
	return page_address(page);
}
54
#define kmap_atomic_prot(page, idx, prot)	kmap_atomic(page, idx)
55

56 57
#define kunmap_atomic(addr, idx)	do { pagefault_enable(); } while (0)
#define kmap_atomic_pfn(pfn, idx)	kmap_atomic(pfn_to_page(pfn), (idx))
L
Linus Torvalds 已提交
58
#define kmap_atomic_to_page(ptr)	virt_to_page(ptr)
59 60

#define kmap_flush_unused()	do {} while(0)
61
#endif
L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70 71 72 73

#endif /* CONFIG_HIGHMEM */

/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
	void *addr = kmap_atomic(page, KM_USER0);
	clear_user_page(addr, vaddr, page);
	kunmap_atomic(addr, KM_USER0);
}

#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
74 75 76 77 78 79 80 81 82 83 84 85 86 87
/**
 * __alloc_zeroed_user_highpage - Allocate a zeroed HIGHMEM page for a VMA with caller-specified movable GFP flags
 * @movableflags: The GFP flags related to the pages future ability to move like __GFP_MOVABLE
 * @vma: The VMA the page is to be allocated for
 * @vaddr: The virtual address the page will be inserted into
 *
 * This function will allocate a page for a VMA but the caller is expected
 * to specify via movableflags whether the page will be movable in the
 * future or not
 *
 * An architecture may override this function by defining
 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE and providing their own
 * implementation.
 */
L
Linus Torvalds 已提交
88
static inline struct page *
89 90 91
__alloc_zeroed_user_highpage(gfp_t movableflags,
			struct vm_area_struct *vma,
			unsigned long vaddr)
L
Linus Torvalds 已提交
92
{
93 94
	struct page *page = alloc_page_vma(GFP_HIGHUSER | movableflags,
			vma, vaddr);
L
Linus Torvalds 已提交
95 96 97 98 99 100 101 102

	if (page)
		clear_user_highpage(page, vaddr);

	return page;
}
#endif

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
/**
 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move
 * @vma: The VMA the page is to be allocated for
 * @vaddr: The virtual address the page will be inserted into
 *
 * This function will allocate a page for a VMA that the caller knows will
 * be able to migrate in the future using move_pages() or reclaimed
 */
static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
					unsigned long vaddr)
{
	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
}

L
Linus Torvalds 已提交
118 119 120 121 122 123 124
static inline void clear_highpage(struct page *page)
{
	void *kaddr = kmap_atomic(page, KM_USER0);
	clear_page(kaddr);
	kunmap_atomic(kaddr, KM_USER0);
}

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
static inline void zero_user_segments(struct page *page,
	unsigned start1, unsigned end1,
	unsigned start2, unsigned end2)
{
	void *kaddr = kmap_atomic(page, KM_USER0);

	BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);

	if (end1 > start1)
		memset(kaddr + start1, 0, end1 - start1);

	if (end2 > start2)
		memset(kaddr + start2, 0, end2 - start2);

	kunmap_atomic(kaddr, KM_USER0);
	flush_dcache_page(page);
}

static inline void zero_user_segment(struct page *page,
	unsigned start, unsigned end)
{
	zero_user_segments(page, start, end, 0, 0);
}

static inline void zero_user(struct page *page,
	unsigned start, unsigned size)
{
	zero_user_segments(page, start, start + size, 0, 0);
}
154

155
static inline void __deprecated memclear_highpage_flush(struct page *page,
156
			unsigned int offset, unsigned int size)
L
Linus Torvalds 已提交
157
{
158
	zero_user(page, offset, size);
L
Linus Torvalds 已提交
159 160
}

161 162
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE

163 164
static inline void copy_user_highpage(struct page *to, struct page *from,
	unsigned long vaddr, struct vm_area_struct *vma)
L
Linus Torvalds 已提交
165 166 167 168 169 170 171 172 173 174
{
	char *vfrom, *vto;

	vfrom = kmap_atomic(from, KM_USER0);
	vto = kmap_atomic(to, KM_USER1);
	copy_user_page(vto, vfrom, vaddr, to);
	kunmap_atomic(vfrom, KM_USER0);
	kunmap_atomic(vto, KM_USER1);
}

175 176
#endif

L
Linus Torvalds 已提交
177 178 179 180 181 182 183 184 185 186 187 188
static inline void copy_highpage(struct page *to, struct page *from)
{
	char *vfrom, *vto;

	vfrom = kmap_atomic(from, KM_USER0);
	vto = kmap_atomic(to, KM_USER1);
	copy_page(vto, vfrom);
	kunmap_atomic(vfrom, KM_USER0);
	kunmap_atomic(vto, KM_USER1);
}

#endif /* _LINUX_HIGHMEM_H */