hugetlbpage.c 4.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * IA-32 Huge TLB Page Support for Kernel.
 *
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
17
#include <asm/pgalloc.h>
L
Linus Torvalds 已提交
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39

#if 0	/* This is just for testing */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
	unsigned long start = address;
	int length = 1;
	int nr;
	struct page *page;
	struct vm_area_struct *vma;

	vma = find_vma(mm, addr);
	if (!vma || !is_vm_hugetlb_page(vma))
		return ERR_PTR(-EINVAL);

	pte = huge_pte_offset(mm, address);

	/* hugetlb should be locked, and hence, prefaulted */
	WARN_ON(!pte || pte_none(*pte));

	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];

40
	WARN_ON(!PageHead(page));
L
Linus Torvalds 已提交
41 42 43 44 45 46 47 48 49

	return page;
}

int pmd_huge(pmd_t pmd)
{
	return 0;
}

A
Andi Kleen 已提交
50 51 52 53 54
int pud_huge(pud_t pud)
{
	return 0;
}

L
Linus Torvalds 已提交
55 56
#else

57 58 59 60 61
/*
 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
 * Otherwise, returns 0.
 */
L
Linus Torvalds 已提交
62 63
int pmd_huge(pmd_t pmd)
{
64 65
	return !pmd_none(pmd) &&
		(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
L
Linus Torvalds 已提交
66 67
}

A
Andi Kleen 已提交
68 69
int pud_huge(pud_t pud)
{
A
Andi Kleen 已提交
70
	return !!(pud_val(pud) & _PAGE_PSE);
A
Andi Kleen 已提交
71
}
L
Linus Torvalds 已提交
72 73
#endif

74
#ifdef CONFIG_HUGETLB_PAGE
L
Linus Torvalds 已提交
75 76 77 78
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
		unsigned long addr, unsigned long len,
		unsigned long pgoff, unsigned long flags)
{
A
Andi Kleen 已提交
79
	struct hstate *h = hstate_file(file);
80 81 82 83
	struct vm_unmapped_area_info info;

	info.flags = 0;
	info.length = len;
84
	info.low_limit = current->mm->mmap_legacy_base;
85 86 87 88
	info.high_limit = TASK_SIZE;
	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
	info.align_offset = 0;
	return vm_unmapped_area(&info);
L
Linus Torvalds 已提交
89 90 91 92 93 94
}

static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
		unsigned long addr0, unsigned long len,
		unsigned long pgoff, unsigned long flags)
{
A
Andi Kleen 已提交
95
	struct hstate *h = hstate_file(file);
96 97
	struct vm_unmapped_area_info info;
	unsigned long addr;
L
Linus Torvalds 已提交
98

99 100 101 102 103 104 105
	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
	info.length = len;
	info.low_limit = PAGE_SIZE;
	info.high_limit = current->mm->mmap_base;
	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
	info.align_offset = 0;
	addr = vm_unmapped_area(&info);
L
Linus Torvalds 已提交
106 107 108 109 110 111 112

	/*
	 * A failed mmap() very likely causes application failure,
	 * so fall back to the bottom-up function here. This scenario
	 * can happen with large stack limits and large mmap()
	 * allocations.
	 */
113 114 115 116 117 118 119
	if (addr & ~PAGE_MASK) {
		VM_BUG_ON(addr != -ENOMEM);
		info.flags = 0;
		info.low_limit = TASK_UNMAPPED_BASE;
		info.high_limit = TASK_SIZE;
		addr = vm_unmapped_area(&info);
	}
L
Linus Torvalds 已提交
120 121 122 123 124 125 126 127

	return addr;
}

unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
A
Andi Kleen 已提交
128
	struct hstate *h = hstate_file(file);
L
Linus Torvalds 已提交
129 130 131
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;

A
Andi Kleen 已提交
132
	if (len & ~huge_page_mask(h))
L
Linus Torvalds 已提交
133 134 135 136
		return -EINVAL;
	if (len > TASK_SIZE)
		return -ENOMEM;

137
	if (flags & MAP_FIXED) {
138
		if (prepare_hugepage_range(file, addr, len))
139 140 141 142
			return -EINVAL;
		return addr;
	}

L
Linus Torvalds 已提交
143
	if (addr) {
A
Andi Kleen 已提交
144
		addr = ALIGN(addr, huge_page_size(h));
L
Linus Torvalds 已提交
145 146 147 148 149 150 151 152 153 154 155 156
		vma = find_vma(mm, addr);
		if (TASK_SIZE - len >= addr &&
		    (!vma || addr + len <= vma->vm_start))
			return addr;
	}
	if (mm->get_unmapped_area == arch_get_unmapped_area)
		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
				pgoff, flags);
	else
		return hugetlb_get_unmapped_area_topdown(file, addr, len,
				pgoff, flags);
}
157
#endif /* CONFIG_HUGETLB_PAGE */
L
Linus Torvalds 已提交
158

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
#ifdef CONFIG_X86_64
static __init int setup_hugepagesz(char *opt)
{
	unsigned long ps = memparse(opt, &opt);
	if (ps == PMD_SIZE) {
		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
	} else if (ps == PUD_SIZE && cpu_has_gbpages) {
		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
	} else {
		printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
			ps >> 20);
		return 0;
	}
	return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
175 176 177 178 179 180 181 182 183 184 185

#ifdef CONFIG_CMA
static __init int gigantic_pages_init(void)
{
	/* With CMA we can allocate gigantic pages at runtime */
	if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
	return 0;
}
arch_initcall(gigantic_pages_init);
#endif
186
#endif