hugetlbpage.c 21.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
 *
 * Based on the IA-32 version:
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/tlb.h>

#include <linux/sysctl.h>

30 31 32
#define NUM_LOW_AREAS	(0x100000000UL >> SID_SHIFT)
#define NUM_HIGH_AREAS	(PGTABLE_RANGE >> HTLB_AREA_SHIFT)

33 34
/* Modelled after find_linux_pte() */
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
35
{
36 37 38 39
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	pte_t *pt;
L
Linus Torvalds 已提交
40

41
	BUG_ON(! in_hugepage_area(mm->context, addr));
L
Linus Torvalds 已提交
42

43 44 45 46 47 48 49
	addr &= HPAGE_MASK;

	pg = pgd_offset(mm, addr);
	if (!pgd_none(*pg)) {
		pu = pud_offset(pg, addr);
		if (!pud_none(*pu)) {
			pm = pmd_offset(pu, addr);
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
#ifdef CONFIG_PPC_64K_PAGES
			/* Currently, we use the normal PTE offset within full
			 * size PTE pages, thus our huge PTEs are scattered in
			 * the PTE page and we do waste some. We may change
			 * that in the future, but the current mecanism keeps
			 * things much simpler
			 */
			if (!pmd_none(*pm)) {
				/* Note: pte_offset_* are all equivalent on
				 * ppc64 as we don't have HIGHMEM
				 */
				pt = pte_offset_kernel(pm, addr);
				return pt;
			}
#else /* CONFIG_PPC_64K_PAGES */
			/* On 4k pages, we put huge PTEs in the PMD page */
66 67
			pt = (pte_t *)pm;
			return pt;
68
#endif /* CONFIG_PPC_64K_PAGES */
69 70
		}
	}
L
Linus Torvalds 已提交
71

72
	return NULL;
L
Linus Torvalds 已提交
73 74
}

75
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
76
{
77 78 79 80
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	pte_t *pt;
L
Linus Torvalds 已提交
81 82 83

	BUG_ON(! in_hugepage_area(mm->context, addr));

84
	addr &= HPAGE_MASK;
L
Linus Torvalds 已提交
85

86 87
	pg = pgd_offset(mm, addr);
	pu = pud_alloc(mm, pg, addr);
L
Linus Torvalds 已提交
88

89 90 91
	if (pu) {
		pm = pmd_alloc(mm, pu, addr);
		if (pm) {
92 93 94 95 96 97 98 99
#ifdef CONFIG_PPC_64K_PAGES
			/* See comment in huge_pte_offset. Note that if we ever
			 * want to put the page size in the PMD, we would have
			 * to open code our own pte_alloc* function in order
			 * to populate and set the size atomically
			 */
			pt = pte_alloc_map(mm, pm, addr);
#else /* CONFIG_PPC_64K_PAGES */
100
			pt = (pte_t *)pm;
101
#endif /* CONFIG_PPC_64K_PAGES */
102
			return pt;
L
Linus Torvalds 已提交
103 104 105
		}
	}

106
	return NULL;
L
Linus Torvalds 已提交
107 108
}

109 110 111 112
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t pte)
{
	if (pte_present(*ptep)) {
113 114 115 116 117 118
		/* We open-code pte_clear because we need to pass the right
		 * argument to hpte_update (huge / !huge)
		 */
		unsigned long old = pte_update(ptep, ~0UL);
		if (old & _PAGE_HASHPTE)
			hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
119 120
		flush_tlb_pending();
	}
121
	*ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
L
Linus Torvalds 已提交
122 123
}

124 125
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep)
L
Linus Torvalds 已提交
126
{
127
	unsigned long old = pte_update(ptep, ~0UL);
L
Linus Torvalds 已提交
128

129
	if (old & _PAGE_HASHPTE)
130 131
		hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
	*ptep = __pte(0);
L
Linus Torvalds 已提交
132

133
	return __pte(old);
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
}

/*
 * This function checks for proper alignment of input addr and len parameters.
 */
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
	if (len & ~HPAGE_MASK)
		return -EINVAL;
	if (addr & ~HPAGE_MASK)
		return -EINVAL;
	if (! (within_hugepage_low_range(addr, len)
	       || within_hugepage_high_range(addr, len)) )
		return -EINVAL;
	return 0;
}

151 152 153 154 155
struct slb_flush_info {
	struct mm_struct *mm;
	u16 newareas;
};

156
static void flush_low_segments(void *parm)
L
Linus Torvalds 已提交
157
{
158
	struct slb_flush_info *fi = parm;
L
Linus Torvalds 已提交
159 160
	unsigned long i;

161 162 163 164
	BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_LOW_AREAS);

	if (current->active_mm != fi->mm)
		return;
L
Linus Torvalds 已提交
165

166 167 168 169 170
	/* Only need to do anything if this CPU is working in the same
	 * mm as the one which has changed */

	/* update the paca copy of the context struct */
	get_paca()->context = current->active_mm->context;
171

172
	asm volatile("isync" : : : "memory");
173
	for (i = 0; i < NUM_LOW_AREAS; i++) {
174
		if (! (fi->newareas & (1U << i)))
L
Linus Torvalds 已提交
175
			continue;
176 177
		asm volatile("slbie %0"
			     : : "r" ((i << SID_SHIFT) | SLBIE_C));
L
Linus Torvalds 已提交
178 179 180 181
	}
	asm volatile("isync" : : : "memory");
}

182 183
static void flush_high_segments(void *parm)
{
184
	struct slb_flush_info *fi = parm;
185 186 187
	unsigned long i, j;


188 189 190 191 192 193 194
	BUILD_BUG_ON((sizeof(fi->newareas)*8) != NUM_HIGH_AREAS);

	if (current->active_mm != fi->mm)
		return;

	/* Only need to do anything if this CPU is working in the same
	 * mm as the one which has changed */
195

196 197 198 199
	/* update the paca copy of the context struct */
	get_paca()->context = current->active_mm->context;

	asm volatile("isync" : : : "memory");
200
	for (i = 0; i < NUM_HIGH_AREAS; i++) {
201
		if (! (fi->newareas & (1U << i)))
202 203 204
			continue;
		for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
			asm volatile("slbie %0"
205
				     :: "r" (((i << HTLB_AREA_SHIFT)
206
					      + (j << SID_SHIFT)) | SLBIE_C));
207 208 209 210 211
	}
	asm volatile("isync" : : : "memory");
}

static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
L
Linus Torvalds 已提交
212
{
213 214
	unsigned long start = area << SID_SHIFT;
	unsigned long end = (area+1) << SID_SHIFT;
L
Linus Torvalds 已提交
215 216
	struct vm_area_struct *vma;

217
	BUG_ON(area >= NUM_LOW_AREAS);
L
Linus Torvalds 已提交
218 219 220 221 222 223 224 225 226

	/* Check no VMAs are in the region */
	vma = find_vma(mm, start);
	if (vma && (vma->vm_start < end))
		return -EBUSY;

	return 0;
}

227 228 229 230 231 232 233 234
static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
{
	unsigned long start = area << HTLB_AREA_SHIFT;
	unsigned long end = (area+1) << HTLB_AREA_SHIFT;
	struct vm_area_struct *vma;

	BUG_ON(area >= NUM_HIGH_AREAS);

235 236 237 238 239 240
	/* Hack, so that each addresses is controlled by exactly one
	 * of the high or low area bitmaps, the first high area starts
	 * at 4GB, not 0 */
	if (start == 0)
		start = 0x100000000UL;

241 242 243 244 245 246 247 248 249
	/* Check no VMAs are in the region */
	vma = find_vma(mm, start);
	if (vma && (vma->vm_start < end))
		return -EBUSY;

	return 0;
}

static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
L
Linus Torvalds 已提交
250 251
{
	unsigned long i;
252
	struct slb_flush_info fi;
L
Linus Torvalds 已提交
253

254 255 256 257 258
	BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
	BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);

	newareas &= ~(mm->context.low_htlb_areas);
	if (! newareas)
L
Linus Torvalds 已提交
259 260
		return 0; /* The segments we want are already open */

261 262 263 264 265 266 267 268 269 270
	for (i = 0; i < NUM_LOW_AREAS; i++)
		if ((1 << i) & newareas)
			if (prepare_low_area_for_htlb(mm, i) != 0)
				return -EBUSY;

	mm->context.low_htlb_areas |= newareas;

	/* the context change must make it to memory before the flush,
	 * so that further SLB misses do the right thing. */
	mb();
271 272 273 274

	fi.mm = mm;
	fi.newareas = newareas;
	on_each_cpu(flush_low_segments, &fi, 0, 1);
275 276 277 278 279 280

	return 0;
}

static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
{
281
	struct slb_flush_info fi;
282 283 284 285 286 287 288 289 290 291 292 293 294
	unsigned long i;

	BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
	BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
		     != NUM_HIGH_AREAS);

	newareas &= ~(mm->context.high_htlb_areas);
	if (! newareas)
		return 0; /* The areas we want are already open */

	for (i = 0; i < NUM_HIGH_AREAS; i++)
		if ((1 << i) & newareas)
			if (prepare_high_area_for_htlb(mm, i) != 0)
L
Linus Torvalds 已提交
295 296
				return -EBUSY;

297
	mm->context.high_htlb_areas |= newareas;
L
Linus Torvalds 已提交
298 299 300 301 302 303 304

	/* update the paca copy of the context struct */
	get_paca()->context = mm->context;

	/* the context change must make it to memory before the flush,
	 * so that further SLB misses do the right thing. */
	mb();
305 306 307 308

	fi.mm = mm;
	fi.newareas = newareas;
	on_each_cpu(flush_high_segments, &fi, 0, 1);
L
Linus Torvalds 已提交
309 310 311 312 313 314

	return 0;
}

int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
315
	int err = 0;
316 317 318 319

	if ( (addr+len) < addr )
		return -EINVAL;

320
	if (addr < 0x100000000UL)
321
		err = open_low_hpage_areas(current->mm,
L
Linus Torvalds 已提交
322
					  LOW_ESID_MASK(addr, len));
323
	if ((addr + len) > 0x100000000UL)
324 325 326 327 328 329 330
		err = open_high_hpage_areas(current->mm,
					    HTLB_AREA_MASK(addr, len));
	if (err) {
		printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
		       " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
		       addr, len,
		       LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
L
Linus Torvalds 已提交
331 332 333
		return err;
	}

334
	return 0;
L
Linus Torvalds 已提交
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
}

struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
	pte_t *ptep;
	struct page *page;

	if (! in_hugepage_area(mm->context, address))
		return ERR_PTR(-EINVAL);

	ptep = huge_pte_offset(mm, address);
	page = pte_page(*ptep);
	if (page)
		page += (address % HPAGE_SIZE) / PAGE_SIZE;

	return page;
}

int pmd_huge(pmd_t pmd)
{
	return 0;
}

struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
		pmd_t *pmd, int write)
{
	BUG();
	return NULL;
}

/* Because we have an exclusive hugepage region which lies within the
 * normal user address space, we have to take special measures to make
 * non-huge mmap()s evade the hugepage reserved regions. */
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
				     unsigned long len, unsigned long pgoff,
				     unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long start_addr;

	if (len > TASK_SIZE)
		return -ENOMEM;

	if (addr) {
		addr = PAGE_ALIGN(addr);
		vma = find_vma(mm, addr);
		if (((TASK_SIZE - len) >= addr)
		    && (!vma || (addr+len) <= vma->vm_start)
		    && !is_hugepage_only_range(mm, addr,len))
			return addr;
	}
389 390 391 392 393 394
	if (len > mm->cached_hole_size) {
	        start_addr = addr = mm->free_area_cache;
	} else {
	        start_addr = addr = TASK_UNMAPPED_BASE;
	        mm->cached_hole_size = 0;
	}
L
Linus Torvalds 已提交
395 396 397 398 399 400 401 402 403 404 405

full_search:
	vma = find_vma(mm, addr);
	while (TASK_SIZE - len >= addr) {
		BUG_ON(vma && (addr >= vma->vm_end));

		if (touches_hugepage_low_range(mm, addr, len)) {
			addr = ALIGN(addr+1, 1<<SID_SHIFT);
			vma = find_vma(mm, addr);
			continue;
		}
406 407
		if (touches_hugepage_high_range(mm, addr, len)) {
			addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
L
Linus Torvalds 已提交
408 409 410 411 412 413 414 415 416 417
			vma = find_vma(mm, addr);
			continue;
		}
		if (!vma || addr + len <= vma->vm_start) {
			/*
			 * Remember the place where we stopped the search:
			 */
			mm->free_area_cache = addr + len;
			return addr;
		}
418 419
		if (addr + mm->cached_hole_size < vma->vm_start)
		        mm->cached_hole_size = vma->vm_start - addr;
L
Linus Torvalds 已提交
420 421 422 423 424 425 426
		addr = vma->vm_end;
		vma = vma->vm_next;
	}

	/* Make sure we didn't miss any holes */
	if (start_addr != TASK_UNMAPPED_BASE) {
		start_addr = addr = TASK_UNMAPPED_BASE;
427
		mm->cached_hole_size = 0;
L
Linus Torvalds 已提交
428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
		goto full_search;
	}
	return -ENOMEM;
}

/*
 * This mmap-allocator allocates new areas top-down from below the
 * stack's low limit (the base):
 *
 * Because we have an exclusive hugepage region which lies within the
 * normal user address space, we have to take special measures to make
 * non-huge mmap()s evade the hugepage reserved regions.
 */
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
			  const unsigned long len, const unsigned long pgoff,
			  const unsigned long flags)
{
	struct vm_area_struct *vma, *prev_vma;
	struct mm_struct *mm = current->mm;
	unsigned long base = mm->mmap_base, addr = addr0;
449
	unsigned long largest_hole = mm->cached_hole_size;
L
Linus Torvalds 已提交
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
	int first_time = 1;

	/* requested length too big for entire address space */
	if (len > TASK_SIZE)
		return -ENOMEM;

	/* dont allow allocations above current base */
	if (mm->free_area_cache > base)
		mm->free_area_cache = base;

	/* requesting a specific address */
	if (addr) {
		addr = PAGE_ALIGN(addr);
		vma = find_vma(mm, addr);
		if (TASK_SIZE - len >= addr &&
				(!vma || addr + len <= vma->vm_start)
				&& !is_hugepage_only_range(mm, addr,len))
			return addr;
	}

470 471 472 473
	if (len <= largest_hole) {
	        largest_hole = 0;
		mm->free_area_cache = base;
	}
L
Linus Torvalds 已提交
474 475 476 477 478 479 480 481 482 483 484 485
try_again:
	/* make sure it can fit in the remaining address space */
	if (mm->free_area_cache < len)
		goto fail;

	/* either no address requested or cant fit in requested address hole */
	addr = (mm->free_area_cache - len) & PAGE_MASK;
	do {
hugepage_recheck:
		if (touches_hugepage_low_range(mm, addr, len)) {
			addr = (addr & ((~0) << SID_SHIFT)) - len;
			goto hugepage_recheck;
486 487 488
		} else if (touches_hugepage_high_range(mm, addr, len)) {
			addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
			goto hugepage_recheck;
L
Linus Torvalds 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501 502
		}

		/*
		 * Lookup failure means no vma is above this address,
		 * i.e. return with success:
		 */
 	 	if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
			return addr;

		/*
		 * new region fits between prev_vma->vm_end and
		 * vma->vm_start, use it:
		 */
		if (addr+len <= vma->vm_start &&
503
		          (!prev_vma || (addr >= prev_vma->vm_end))) {
L
Linus Torvalds 已提交
504
			/* remember the address as a hint for next time */
505 506 507
		        mm->cached_hole_size = largest_hole;
		        return (mm->free_area_cache = addr);
		} else {
L
Linus Torvalds 已提交
508
			/* pull free_area_cache down to the first hole */
509
		        if (mm->free_area_cache == vma->vm_end) {
L
Linus Torvalds 已提交
510
				mm->free_area_cache = vma->vm_start;
511 512 513 514 515 516 517
				mm->cached_hole_size = largest_hole;
			}
		}

		/* remember the largest hole we saw so far */
		if (addr + largest_hole < vma->vm_start)
		        largest_hole = vma->vm_start - addr;
L
Linus Torvalds 已提交
518 519 520 521 522 523 524 525 526 527 528 529

		/* try just below the current vma->vm_start */
		addr = vma->vm_start-len;
	} while (len <= vma->vm_start);

fail:
	/*
	 * if hint left us with no space for the requested
	 * mapping then try again:
	 */
	if (first_time) {
		mm->free_area_cache = base;
530
		largest_hole = 0;
L
Linus Torvalds 已提交
531 532 533 534 535 536 537 538 539 540
		first_time = 0;
		goto try_again;
	}
	/*
	 * A failed mmap() very likely causes application failure,
	 * so fall back to the bottom-up function here. This scenario
	 * can happen with large stack limits and large mmap()
	 * allocations.
	 */
	mm->free_area_cache = TASK_UNMAPPED_BASE;
541
	mm->cached_hole_size = ~0UL;
L
Linus Torvalds 已提交
542 543 544 545 546
	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
	/*
	 * Restore the topdown base:
	 */
	mm->free_area_cache = base;
547
	mm->cached_hole_size = ~0UL;
L
Linus Torvalds 已提交
548 549 550 551

	return addr;
}

552 553 554 555 556 557 558 559 560 561 562
static int htlb_check_hinted_area(unsigned long addr, unsigned long len)
{
	struct vm_area_struct *vma;

	vma = find_vma(current->mm, addr);
	if (!vma || ((addr + len) <= vma->vm_start))
		return 0;

	return -ENOMEM;
}

L
Linus Torvalds 已提交
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
{
	unsigned long addr = 0;
	struct vm_area_struct *vma;

	vma = find_vma(current->mm, addr);
	while (addr + len <= 0x100000000UL) {
		BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */

		if (! __within_hugepage_low_range(addr, len, segmask)) {
			addr = ALIGN(addr+1, 1<<SID_SHIFT);
			vma = find_vma(current->mm, addr);
			continue;
		}

		if (!vma || (addr + len) <= vma->vm_start)
			return addr;
		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
		/* Depending on segmask this might not be a confirmed
		 * hugepage region, so the ALIGN could have skipped
		 * some VMAs */
		vma = find_vma(current->mm, addr);
	}

	return -ENOMEM;
}

590
static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
L
Linus Torvalds 已提交
591
{
592
	unsigned long addr = 0x100000000UL;
L
Linus Torvalds 已提交
593 594 595
	struct vm_area_struct *vma;

	vma = find_vma(current->mm, addr);
596
	while (addr + len <= TASK_SIZE_USER64) {
L
Linus Torvalds 已提交
597
		BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
598 599 600 601 602 603

		if (! __within_hugepage_high_range(addr, len, areamask)) {
			addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
			vma = find_vma(current->mm, addr);
			continue;
		}
L
Linus Torvalds 已提交
604 605 606 607

		if (!vma || (addr + len) <= vma->vm_start)
			return addr;
		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
608 609 610 611
		/* Depending on segmask this might not be a confirmed
		 * hugepage region, so the ALIGN could have skipped
		 * some VMAs */
		vma = find_vma(current->mm, addr);
L
Linus Torvalds 已提交
612 613 614 615 616 617 618 619 620
	}

	return -ENOMEM;
}

unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags)
{
621 622
	int lastshift;
	u16 areamask, curareas;
623
	struct vm_area_struct *vma;
624

625 626
	if (HPAGE_SHIFT == 0)
		return -EINVAL;
L
Linus Torvalds 已提交
627 628 629 630 631 632
	if (len & ~HPAGE_MASK)
		return -EINVAL;

	if (!cpu_has_feature(CPU_FTR_16M_PAGE))
		return -EINVAL;

633 634 635
	/* Paranoia, caller should have dealt with this */
	BUG_ON((addr + len)  < addr);

L
Linus Torvalds 已提交
636
	if (test_thread_flag(TIF_32BIT)) {
637 638 639
		/* Paranoia, caller should have dealt with this */
		BUG_ON((addr + len) > 0x100000000UL);

640
		curareas = current->mm->context.low_htlb_areas;
L
Linus Torvalds 已提交
641

642 643 644 645 646 647 648 649
		/* First see if we can use the hint address */
		if (addr && (htlb_check_hinted_area(addr, len) == 0)) {
			areamask = LOW_ESID_MASK(addr, len);
			if (open_low_hpage_areas(current->mm, areamask) == 0)
				return addr;
		}

		/* Next see if we can map in the existing low areas */
650
		addr = htlb_get_low_area(len, curareas);
L
Linus Torvalds 已提交
651 652 653
		if (addr != -ENOMEM)
			return addr;

654
		/* Finally go looking for areas to open */
655 656 657 658
		lastshift = 0;
		for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
		     ! lastshift; areamask >>=1) {
			if (areamask & 1)
L
Linus Torvalds 已提交
659 660
				lastshift = 1;

661
			addr = htlb_get_low_area(len, curareas | areamask);
L
Linus Torvalds 已提交
662
			if ((addr != -ENOMEM)
663
			    && open_low_hpage_areas(current->mm, areamask) == 0)
L
Linus Torvalds 已提交
664 665 666
				return addr;
		}
	} else {
667 668
		curareas = current->mm->context.high_htlb_areas;

669 670 671 672 673 674 675 676 677 678 679
		/* First see if we can use the hint address */
		/* We discourage 64-bit processes from doing hugepage
		 * mappings below 4GB (must use MAP_FIXED) */
		if ((addr >= 0x100000000UL)
		    && (htlb_check_hinted_area(addr, len) == 0)) {
			areamask = HTLB_AREA_MASK(addr, len);
			if (open_high_hpage_areas(current->mm, areamask) == 0)
				return addr;
		}

		/* Next see if we can map in the existing high areas */
680 681 682 683
		addr = htlb_get_high_area(len, curareas);
		if (addr != -ENOMEM)
			return addr;

684
		/* Finally go looking for areas to open */
685 686 687 688 689 690 691 692 693 694 695
		lastshift = 0;
		for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
		     ! lastshift; areamask >>=1) {
			if (areamask & 1)
				lastshift = 1;

			addr = htlb_get_high_area(len, curareas | areamask);
			if ((addr != -ENOMEM)
			    && open_high_hpage_areas(current->mm, areamask) == 0)
				return addr;
		}
L
Linus Torvalds 已提交
696
	}
697 698 699
	printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
	       " enough areas\n");
	return -ENOMEM;
L
Linus Torvalds 已提交
700 701
}

702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
/*
 * Called by asm hashtable.S for doing lazy icache flush
 */
static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
						  pte_t pte, int trap)
{
	struct page *page;
	int i;

	if (!pfn_valid(pte_pfn(pte)))
		return rflags;

	page = pte_page(pte);

	/* page is dirty */
	if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
		if (trap == 0x400) {
			for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++)
				__flush_dcache_icache(page_address(page+i));
			set_bit(PG_arch_1, &page->flags);
		} else {
			rflags |= HPTE_R_N;
		}
	}
	return rflags;
}

L
Linus Torvalds 已提交
729
int hash_huge_page(struct mm_struct *mm, unsigned long access,
730 731
		   unsigned long ea, unsigned long vsid, int local,
		   unsigned long trap)
L
Linus Torvalds 已提交
732 733
{
	pte_t *ptep;
734 735
	unsigned long old_pte, new_pte;
	unsigned long va, rflags, pa;
L
Linus Torvalds 已提交
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	long slot;
	int err = 1;

	ptep = huge_pte_offset(mm, ea);

	/* Search the Linux page table for a match with va */
	va = (vsid << 28) | (ea & 0x0fffffff);

	/*
	 * If no pte found or not present, send the problem up to
	 * do_page_fault
	 */
	if (unlikely(!ptep || pte_none(*ptep)))
		goto out;

	/* 
	 * Check the user's access rights to the page.  If access should be
	 * prevented then send the problem up to do_page_fault.
	 */
	if (unlikely(access & ~pte_val(*ptep)))
		goto out;
	/*
	 * At this point, we have a pte (old_pte) which can be used to build
	 * or update an HPTE. There are 2 cases:
	 *
	 * 1. There is a valid (present) pte with no associated HPTE (this is 
	 *	the most common case)
	 * 2. There is a valid (present) pte with an associated HPTE. The
	 *	current values of the pp bits in the HPTE prevent access
	 *	because we are doing software DIRTY bit management and the
	 *	page is currently not DIRTY. 
	 */


770 771 772 773 774 775 776 777 778 779
	do {
		old_pte = pte_val(*ptep);
		if (old_pte & _PAGE_BUSY)
			goto out;
		new_pte = old_pte | _PAGE_BUSY |
			_PAGE_ACCESSED | _PAGE_HASHPTE;
	} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
					 old_pte, new_pte));

	rflags = 0x2 | (!(new_pte & _PAGE_RW));
L
Linus Torvalds 已提交
780
 	/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
781
	rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
782 783 784 785 786
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
		/* No CPU has hugepages but lacks no execute, so we
		 * don't need to worry about that case */
		rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
						       trap);
L
Linus Torvalds 已提交
787 788

	/* Check if pte already has an hpte (case 2) */
789
	if (unlikely(old_pte & _PAGE_HASHPTE)) {
L
Linus Torvalds 已提交
790 791 792
		/* There MIGHT be an HPTE for this pte */
		unsigned long hash, slot;

793 794
		hash = hpt_hash(va, HPAGE_SHIFT);
		if (old_pte & _PAGE_F_SECOND)
L
Linus Torvalds 已提交
795 796
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
797
		slot += (old_pte & _PAGE_F_GIX) >> 12;
L
Linus Torvalds 已提交
798

799 800
		if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_huge_psize,
					 local) == -1)
801
			old_pte &= ~_PAGE_HPTEFLAGS;
L
Linus Torvalds 已提交
802 803
	}

804 805
	if (likely(!(old_pte & _PAGE_HASHPTE))) {
		unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
L
Linus Torvalds 已提交
806 807
		unsigned long hpte_group;

808
		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
L
Linus Torvalds 已提交
809 810 811 812 813

repeat:
		hpte_group = ((hash & htab_hash_mask) *
			      HPTES_PER_GROUP) & ~0x7UL;

814 815
		/* clear HPTE slot informations in new PTE */
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
L
Linus Torvalds 已提交
816 817 818

		/* Add in WIMG bits */
		/* XXX We should store these in the pte */
819
		/* --BenH: I think they are ... */
820
		rflags |= _PAGE_COHERENT;
L
Linus Torvalds 已提交
821

822 823 824
		/* Insert into the hash table, primary slot */
		slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
					  mmu_huge_psize);
L
Linus Torvalds 已提交
825 826 827

		/* Primary is full, try the secondary */
		if (unlikely(slot == -1)) {
828
			new_pte |= _PAGE_F_SECOND;
L
Linus Torvalds 已提交
829 830
			hpte_group = ((~hash & htab_hash_mask) *
				      HPTES_PER_GROUP) & ~0x7UL; 
831
			slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
832
						  HPTE_V_SECONDARY,
833
						  mmu_huge_psize);
L
Linus Torvalds 已提交
834 835
			if (slot == -1) {
				if (mftb() & 0x1)
836 837
					hpte_group = ((hash & htab_hash_mask) *
						      HPTES_PER_GROUP)&~0x7UL;
L
Linus Torvalds 已提交
838 839 840 841 842 843 844 845 846

				ppc_md.hpte_remove(hpte_group);
				goto repeat;
                        }
		}

		if (unlikely(slot == -2))
			panic("hash_huge_page: pte_insert failed\n");

847
		new_pte |= (slot << 12) & _PAGE_F_GIX;
L
Linus Torvalds 已提交
848 849
	}

850
	/*
H
Hugh Dickins 已提交
851
	 * No need to use ldarx/stdcx here
852 853 854
	 */
	*ptep = __pte(new_pte & ~_PAGE_BUSY);

L
Linus Torvalds 已提交
855 856 857 858 859
	err = 0;

 out:
	return err;
}