hugetlbpage.c 8.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3
/*
 * SPARC64 Huge TLB page support.
 *
4
 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>

#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
/* Slightly simplified from the non-hugepage variant because by
 * definition we don't have to worry about any page coloring stuff
 */
#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))

static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
							unsigned long addr,
							unsigned long len,
							unsigned long pgoff,
							unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct * vma;
	unsigned long task_size = TASK_SIZE;
	unsigned long start_addr;

	if (test_thread_flag(TIF_32BIT))
		task_size = STACK_TOP32;
	if (unlikely(len >= VA_EXCLUDE_START))
		return -ENOMEM;

	if (len > mm->cached_hole_size) {
	        start_addr = addr = mm->free_area_cache;
	} else {
	        start_addr = addr = TASK_UNMAPPED_BASE;
	        mm->cached_hole_size = 0;
	}

	task_size -= len;

full_search:
	addr = ALIGN(addr, HPAGE_SIZE);

	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
		/* At this point:  (!vma || addr < vma->vm_end). */
		if (addr < VA_EXCLUDE_START &&
		    (addr + len) >= VA_EXCLUDE_START) {
			addr = VA_EXCLUDE_END;
			vma = find_vma(mm, VA_EXCLUDE_END);
		}
		if (unlikely(task_size < addr)) {
			if (start_addr != TASK_UNMAPPED_BASE) {
				start_addr = addr = TASK_UNMAPPED_BASE;
				mm->cached_hole_size = 0;
				goto full_search;
			}
			return -ENOMEM;
		}
		if (likely(!vma || addr + len <= vma->vm_start)) {
			/*
			 * Remember the place where we stopped the search:
			 */
			mm->free_area_cache = addr + len;
			return addr;
		}
		if (addr + mm->cached_hole_size < vma->vm_start)
		        mm->cached_hole_size = vma->vm_start - addr;

		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
	}
}

static unsigned long
hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
				  const unsigned long len,
				  const unsigned long pgoff,
				  const unsigned long flags)
{
	struct vm_area_struct *vma;
	struct mm_struct *mm = current->mm;
	unsigned long addr = addr0;

	/* This should only ever run for 32-bit processes.  */
	BUG_ON(!test_thread_flag(TIF_32BIT));

	/* check if free_area_cache is useful for us */
	if (len <= mm->cached_hole_size) {
 	        mm->cached_hole_size = 0;
 		mm->free_area_cache = mm->mmap_base;
 	}

	/* either no address requested or can't fit in requested address hole */
	addr = mm->free_area_cache & HPAGE_MASK;

	/* make sure it can fit in the remaining address space */
	if (likely(addr > len)) {
		vma = find_vma(mm, addr-len);
		if (!vma || addr <= vma->vm_start) {
			/* remember the address as a hint for next time */
			return (mm->free_area_cache = addr-len);
		}
	}

	if (unlikely(mm->mmap_base < len))
		goto bottomup;

	addr = (mm->mmap_base-len) & HPAGE_MASK;

	do {
		/*
		 * Lookup failure means no vma is above this address,
		 * else if new region fits below vma->vm_start,
		 * return with success:
		 */
		vma = find_vma(mm, addr);
		if (likely(!vma || addr+len <= vma->vm_start)) {
			/* remember the address as a hint for next time */
			return (mm->free_area_cache = addr);
		}

 		/* remember the largest hole we saw so far */
 		if (addr + mm->cached_hole_size < vma->vm_start)
 		        mm->cached_hole_size = vma->vm_start - addr;

		/* try just below the current vma->vm_start */
		addr = (vma->vm_start-len) & HPAGE_MASK;
	} while (likely(len < vma->vm_start));

bottomup:
	/*
	 * A failed mmap() very likely causes application failure,
	 * so fall back to the bottom-up function here. This scenario
	 * can happen with large stack limits and large mmap()
	 * allocations.
	 */
	mm->cached_hole_size = ~0UL;
  	mm->free_area_cache = TASK_UNMAPPED_BASE;
	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
	/*
	 * Restore the topdown base:
	 */
	mm->free_area_cache = mm->mmap_base;
	mm->cached_hole_size = ~0UL;

	return addr;
}

unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long task_size = TASK_SIZE;

	if (test_thread_flag(TIF_32BIT))
		task_size = STACK_TOP32;

	if (len & ~HPAGE_MASK)
		return -EINVAL;
	if (len > task_size)
		return -ENOMEM;

175
	if (flags & MAP_FIXED) {
176
		if (prepare_hugepage_range(file, addr, len))
177 178 179 180
			return -EINVAL;
		return addr;
	}

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
	if (addr) {
		addr = ALIGN(addr, HPAGE_SIZE);
		vma = find_vma(mm, addr);
		if (task_size - len >= addr &&
		    (!vma || addr + len <= vma->vm_start))
			return addr;
	}
	if (mm->get_unmapped_area == arch_get_unmapped_area)
		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
				pgoff, flags);
	else
		return hugetlb_get_unmapped_area_topdown(file, addr, len,
				pgoff, flags);
}

196 197
pte_t *huge_pte_alloc(struct mm_struct *mm,
			unsigned long addr, unsigned long sz)
L
Linus Torvalds 已提交
198 199 200 201 202 203
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte = NULL;

204 205 206 207 208 209 210
	/* We must align the address, because our caller will run
	 * set_huge_pte_at() on whatever we return, which writes out
	 * all of the sub-ptes for the hugepage range.  So we have
	 * to give it the first such sub-pte.
	 */
	addr &= HPAGE_MASK;

L
Linus Torvalds 已提交
211
	pgd = pgd_offset(mm, addr);
212 213 214 215
	pud = pud_alloc(mm, pgd, addr);
	if (pud) {
		pmd = pmd_alloc(mm, pud, addr);
		if (pmd)
216
			pte = pte_alloc_map(mm, NULL, pmd, addr);
L
Linus Torvalds 已提交
217 218 219 220
	}
	return pte;
}

D
David Gibson 已提交
221
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
222 223 224 225 226 227
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte = NULL;

228 229
	addr &= HPAGE_MASK;

L
Linus Torvalds 已提交
230
	pgd = pgd_offset(mm, addr);
231
	if (!pgd_none(*pgd)) {
L
Linus Torvalds 已提交
232
		pud = pud_offset(pgd, addr);
233
		if (!pud_none(*pud)) {
L
Linus Torvalds 已提交
234
			pmd = pmd_offset(pud, addr);
235
			if (!pmd_none(*pmd))
L
Linus Torvalds 已提交
236 237 238 239 240 241
				pte = pte_offset_map(pmd, addr);
		}
	}
	return pte;
}

242 243 244 245 246
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
	return 0;
}

D
David Gibson 已提交
247 248
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t entry)
L
Linus Torvalds 已提交
249
{
D
David Gibson 已提交
250 251
	int i;

252 253 254
	if (!pte_present(*ptep) && pte_present(entry))
		mm->context.huge_pte_count++;

255
	addr &= HPAGE_MASK;
D
David Gibson 已提交
256 257 258 259 260 261 262
	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
		set_pte_at(mm, addr, ptep, entry);
		ptep++;
		addr += PAGE_SIZE;
		pte_val(entry) += PAGE_SIZE;
	}
}
L
Linus Torvalds 已提交
263

D
David Gibson 已提交
264 265 266 267 268
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep)
{
	pte_t entry;
	int i;
L
Linus Torvalds 已提交
269

D
David Gibson 已提交
270
	entry = *ptep;
271 272
	if (pte_present(entry))
		mm->context.huge_pte_count--;
L
Linus Torvalds 已提交
273

274 275
	addr &= HPAGE_MASK;

L
Linus Torvalds 已提交
276
	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
D
David Gibson 已提交
277
		pte_clear(mm, addr, ptep);
L
Linus Torvalds 已提交
278
		addr += PAGE_SIZE;
D
David Gibson 已提交
279
		ptep++;
L
Linus Torvalds 已提交
280
	}
D
David Gibson 已提交
281 282

	return entry;
L
Linus Torvalds 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295
}

struct page *follow_huge_addr(struct mm_struct *mm,
			      unsigned long address, int write)
{
	return ERR_PTR(-EINVAL);
}

int pmd_huge(pmd_t pmd)
{
	return 0;
}

A
Andi Kleen 已提交
296 297 298 299 300
int pud_huge(pud_t pud)
{
	return 0;
}

L
Linus Torvalds 已提交
301 302 303 304 305 306 307 308 309 310 311 312 313 314
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
			     pmd_t *pmd, int write)
{
	return NULL;
}

static void context_reload(void *__data)
{
	struct mm_struct *mm = __data;

	if (mm == current->mm)
		load_secondary_context(mm);
}

D
David Gibson 已提交
315
void hugetlb_prefault_arch_hook(struct mm_struct *mm)
L
Linus Torvalds 已提交
316
{
317 318 319 320 321 322 323 324 325
	struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];

	if (likely(tp->tsb != NULL))
		return;

	tsb_grow(mm, MM_TSB_HUGE, 0);
	tsb_context_switch(mm);
	smp_tsb_sync(mm);

L
Linus Torvalds 已提交
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	/* On UltraSPARC-III+ and later, configure the second half of
	 * the Data-TLB for huge pages.
	 */
	if (tlb_type == cheetah_plus) {
		unsigned long ctx;

		spin_lock(&ctx_alloc_lock);
		ctx = mm->context.sparc64_ctx_val;
		ctx &= ~CTX_PGSZ_MASK;
		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
		ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;

		if (ctx != mm->context.sparc64_ctx_val) {
			/* When changing the page size fields, we
			 * must perform a context flush so that no
			 * stale entries match.  This flush must
			 * occur with the original context register
			 * settings.
			 */
			do_flush_tlb_mm(mm);

			/* Reload the context register of all processors
			 * also executing in this address space.
			 */
			mm->context.sparc64_ctx_val = ctx;
351
			on_each_cpu(context_reload, mm, 0);
L
Linus Torvalds 已提交
352 353 354 355
		}
		spin_unlock(&ctx_alloc_lock);
	}
}