hugetlbpage.c 19.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * PPC64 (POWER4) Huge TLB Page Support for Kernel.
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
 *
 * Based on the IA-32 version:
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/sysctl.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/tlb.h>

#include <linux/sysctl.h>

30 31 32
#define NUM_LOW_AREAS	(0x100000000UL >> SID_SHIFT)
#define NUM_HIGH_AREAS	(PGTABLE_RANGE >> HTLB_AREA_SHIFT)

33 34
/* Modelled after find_linux_pte() */
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
35
{
36 37 38 39
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	pte_t *pt;
L
Linus Torvalds 已提交
40

41
	BUG_ON(! in_hugepage_area(mm->context, addr));
L
Linus Torvalds 已提交
42

43 44 45 46 47 48 49
	addr &= HPAGE_MASK;

	pg = pgd_offset(mm, addr);
	if (!pgd_none(*pg)) {
		pu = pud_offset(pg, addr);
		if (!pud_none(*pu)) {
			pm = pmd_offset(pu, addr);
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
#ifdef CONFIG_PPC_64K_PAGES
			/* Currently, we use the normal PTE offset within full
			 * size PTE pages, thus our huge PTEs are scattered in
			 * the PTE page and we do waste some. We may change
			 * that in the future, but the current mecanism keeps
			 * things much simpler
			 */
			if (!pmd_none(*pm)) {
				/* Note: pte_offset_* are all equivalent on
				 * ppc64 as we don't have HIGHMEM
				 */
				pt = pte_offset_kernel(pm, addr);
				return pt;
			}
#else /* CONFIG_PPC_64K_PAGES */
			/* On 4k pages, we put huge PTEs in the PMD page */
66 67
			pt = (pte_t *)pm;
			return pt;
68
#endif /* CONFIG_PPC_64K_PAGES */
69 70
		}
	}
L
Linus Torvalds 已提交
71

72
	return NULL;
L
Linus Torvalds 已提交
73 74
}

75
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
L
Linus Torvalds 已提交
76
{
77 78 79 80
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	pte_t *pt;
L
Linus Torvalds 已提交
81 82 83

	BUG_ON(! in_hugepage_area(mm->context, addr));

84
	addr &= HPAGE_MASK;
L
Linus Torvalds 已提交
85

86 87
	pg = pgd_offset(mm, addr);
	pu = pud_alloc(mm, pg, addr);
L
Linus Torvalds 已提交
88

89 90 91
	if (pu) {
		pm = pmd_alloc(mm, pu, addr);
		if (pm) {
92 93 94 95 96 97 98 99
#ifdef CONFIG_PPC_64K_PAGES
			/* See comment in huge_pte_offset. Note that if we ever
			 * want to put the page size in the PMD, we would have
			 * to open code our own pte_alloc* function in order
			 * to populate and set the size atomically
			 */
			pt = pte_alloc_map(mm, pm, addr);
#else /* CONFIG_PPC_64K_PAGES */
100
			pt = (pte_t *)pm;
101
#endif /* CONFIG_PPC_64K_PAGES */
102
			return pt;
L
Linus Torvalds 已提交
103 104 105
		}
	}

106
	return NULL;
L
Linus Torvalds 已提交
107 108
}

109 110 111 112
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
		     pte_t *ptep, pte_t pte)
{
	if (pte_present(*ptep)) {
113 114 115 116 117 118
		/* We open-code pte_clear because we need to pass the right
		 * argument to hpte_update (huge / !huge)
		 */
		unsigned long old = pte_update(ptep, ~0UL);
		if (old & _PAGE_HASHPTE)
			hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
119 120
		flush_tlb_pending();
	}
121
	*ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
L
Linus Torvalds 已提交
122 123
}

124 125
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep)
L
Linus Torvalds 已提交
126
{
127
	unsigned long old = pte_update(ptep, ~0UL);
L
Linus Torvalds 已提交
128

129
	if (old & _PAGE_HASHPTE)
130 131
		hpte_update(mm, addr & HPAGE_MASK, ptep, old, 1);
	*ptep = __pte(0);
L
Linus Torvalds 已提交
132

133
	return __pte(old);
L
Linus Torvalds 已提交
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
}

/*
 * This function checks for proper alignment of input addr and len parameters.
 */
int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
{
	if (len & ~HPAGE_MASK)
		return -EINVAL;
	if (addr & ~HPAGE_MASK)
		return -EINVAL;
	if (! (within_hugepage_low_range(addr, len)
	       || within_hugepage_high_range(addr, len)) )
		return -EINVAL;
	return 0;
}

151
static void flush_low_segments(void *parm)
L
Linus Torvalds 已提交
152
{
153
	u16 areas = (unsigned long) parm;
L
Linus Torvalds 已提交
154 155 156 157
	unsigned long i;

	asm volatile("isync" : : : "memory");

158 159 160 161
	BUILD_BUG_ON((sizeof(areas)*8) != NUM_LOW_AREAS);

	for (i = 0; i < NUM_LOW_AREAS; i++) {
		if (! (areas & (1U << i)))
L
Linus Torvalds 已提交
162
			continue;
163 164
		asm volatile("slbie %0"
			     : : "r" ((i << SID_SHIFT) | SLBIE_C));
L
Linus Torvalds 已提交
165 166 167 168 169
	}

	asm volatile("isync" : : : "memory");
}

170 171 172 173 174 175 176 177 178 179 180 181 182 183
static void flush_high_segments(void *parm)
{
	u16 areas = (unsigned long) parm;
	unsigned long i, j;

	asm volatile("isync" : : : "memory");

	BUILD_BUG_ON((sizeof(areas)*8) != NUM_HIGH_AREAS);

	for (i = 0; i < NUM_HIGH_AREAS; i++) {
		if (! (areas & (1U << i)))
			continue;
		for (j = 0; j < (1UL << (HTLB_AREA_SHIFT-SID_SHIFT)); j++)
			asm volatile("slbie %0"
184 185
				     :: "r" (((i << HTLB_AREA_SHIFT)
					     + (j << SID_SHIFT)) | SLBIE_C));
186 187 188 189 190 191
	}

	asm volatile("isync" : : : "memory");
}

static int prepare_low_area_for_htlb(struct mm_struct *mm, unsigned long area)
L
Linus Torvalds 已提交
192
{
193 194
	unsigned long start = area << SID_SHIFT;
	unsigned long end = (area+1) << SID_SHIFT;
L
Linus Torvalds 已提交
195 196
	struct vm_area_struct *vma;

197
	BUG_ON(area >= NUM_LOW_AREAS);
L
Linus Torvalds 已提交
198 199 200 201 202 203 204 205 206

	/* Check no VMAs are in the region */
	vma = find_vma(mm, start);
	if (vma && (vma->vm_start < end))
		return -EBUSY;

	return 0;
}

207 208 209 210 211 212 213 214
static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
{
	unsigned long start = area << HTLB_AREA_SHIFT;
	unsigned long end = (area+1) << HTLB_AREA_SHIFT;
	struct vm_area_struct *vma;

	BUG_ON(area >= NUM_HIGH_AREAS);

215 216 217 218 219 220
	/* Hack, so that each addresses is controlled by exactly one
	 * of the high or low area bitmaps, the first high area starts
	 * at 4GB, not 0 */
	if (start == 0)
		start = 0x100000000UL;

221 222 223 224 225 226 227 228 229
	/* Check no VMAs are in the region */
	vma = find_vma(mm, start);
	if (vma && (vma->vm_start < end))
		return -EBUSY;

	return 0;
}

static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
L
Linus Torvalds 已提交
230 231 232
{
	unsigned long i;

233 234 235 236 237
	BUILD_BUG_ON((sizeof(newareas)*8) != NUM_LOW_AREAS);
	BUILD_BUG_ON((sizeof(mm->context.low_htlb_areas)*8) != NUM_LOW_AREAS);

	newareas &= ~(mm->context.low_htlb_areas);
	if (! newareas)
L
Linus Torvalds 已提交
238 239
		return 0; /* The segments we want are already open */

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
	for (i = 0; i < NUM_LOW_AREAS; i++)
		if ((1 << i) & newareas)
			if (prepare_low_area_for_htlb(mm, i) != 0)
				return -EBUSY;

	mm->context.low_htlb_areas |= newareas;

	/* update the paca copy of the context struct */
	get_paca()->context = mm->context;

	/* the context change must make it to memory before the flush,
	 * so that further SLB misses do the right thing. */
	mb();
	on_each_cpu(flush_low_segments, (void *)(unsigned long)newareas, 0, 1);

	return 0;
}

static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
{
	unsigned long i;

	BUILD_BUG_ON((sizeof(newareas)*8) != NUM_HIGH_AREAS);
	BUILD_BUG_ON((sizeof(mm->context.high_htlb_areas)*8)
		     != NUM_HIGH_AREAS);

	newareas &= ~(mm->context.high_htlb_areas);
	if (! newareas)
		return 0; /* The areas we want are already open */

	for (i = 0; i < NUM_HIGH_AREAS; i++)
		if ((1 << i) & newareas)
			if (prepare_high_area_for_htlb(mm, i) != 0)
L
Linus Torvalds 已提交
273 274
				return -EBUSY;

275
	mm->context.high_htlb_areas |= newareas;
L
Linus Torvalds 已提交
276 277 278 279 280 281 282

	/* update the paca copy of the context struct */
	get_paca()->context = mm->context;

	/* the context change must make it to memory before the flush,
	 * so that further SLB misses do the right thing. */
	mb();
283
	on_each_cpu(flush_high_segments, (void *)(unsigned long)newareas, 0, 1);
L
Linus Torvalds 已提交
284 285 286 287 288 289

	return 0;
}

int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
290 291 292 293 294 295 296
	int err;

	if ( (addr+len) < addr )
		return -EINVAL;

	if ((addr + len) < 0x100000000UL)
		err = open_low_hpage_areas(current->mm,
L
Linus Torvalds 已提交
297
					  LOW_ESID_MASK(addr, len));
298 299 300 301 302 303 304 305
	else
		err = open_high_hpage_areas(current->mm,
					    HTLB_AREA_MASK(addr, len));
	if (err) {
		printk(KERN_DEBUG "prepare_hugepage_range(%lx, %lx)"
		       " failed (lowmask: 0x%04hx, highmask: 0x%04hx)\n",
		       addr, len,
		       LOW_ESID_MASK(addr, len), HTLB_AREA_MASK(addr, len));
L
Linus Torvalds 已提交
306 307 308
		return err;
	}

309
	return 0;
L
Linus Torvalds 已提交
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
}

struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
{
	pte_t *ptep;
	struct page *page;

	if (! in_hugepage_area(mm->context, address))
		return ERR_PTR(-EINVAL);

	ptep = huge_pte_offset(mm, address);
	page = pte_page(*ptep);
	if (page)
		page += (address % HPAGE_SIZE) / PAGE_SIZE;

	return page;
}

int pmd_huge(pmd_t pmd)
{
	return 0;
}

struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
		pmd_t *pmd, int write)
{
	BUG();
	return NULL;
}

/* Because we have an exclusive hugepage region which lies within the
 * normal user address space, we have to take special measures to make
 * non-huge mmap()s evade the hugepage reserved regions. */
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
				     unsigned long len, unsigned long pgoff,
				     unsigned long flags)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long start_addr;

	if (len > TASK_SIZE)
		return -ENOMEM;

	if (addr) {
		addr = PAGE_ALIGN(addr);
		vma = find_vma(mm, addr);
		if (((TASK_SIZE - len) >= addr)
		    && (!vma || (addr+len) <= vma->vm_start)
		    && !is_hugepage_only_range(mm, addr,len))
			return addr;
	}
364 365 366 367 368 369
	if (len > mm->cached_hole_size) {
	        start_addr = addr = mm->free_area_cache;
	} else {
	        start_addr = addr = TASK_UNMAPPED_BASE;
	        mm->cached_hole_size = 0;
	}
L
Linus Torvalds 已提交
370 371 372 373 374 375 376 377 378 379 380

full_search:
	vma = find_vma(mm, addr);
	while (TASK_SIZE - len >= addr) {
		BUG_ON(vma && (addr >= vma->vm_end));

		if (touches_hugepage_low_range(mm, addr, len)) {
			addr = ALIGN(addr+1, 1<<SID_SHIFT);
			vma = find_vma(mm, addr);
			continue;
		}
381 382
		if (touches_hugepage_high_range(mm, addr, len)) {
			addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
L
Linus Torvalds 已提交
383 384 385 386 387 388 389 390 391 392
			vma = find_vma(mm, addr);
			continue;
		}
		if (!vma || addr + len <= vma->vm_start) {
			/*
			 * Remember the place where we stopped the search:
			 */
			mm->free_area_cache = addr + len;
			return addr;
		}
393 394
		if (addr + mm->cached_hole_size < vma->vm_start)
		        mm->cached_hole_size = vma->vm_start - addr;
L
Linus Torvalds 已提交
395 396 397 398 399 400 401
		addr = vma->vm_end;
		vma = vma->vm_next;
	}

	/* Make sure we didn't miss any holes */
	if (start_addr != TASK_UNMAPPED_BASE) {
		start_addr = addr = TASK_UNMAPPED_BASE;
402
		mm->cached_hole_size = 0;
L
Linus Torvalds 已提交
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
		goto full_search;
	}
	return -ENOMEM;
}

/*
 * This mmap-allocator allocates new areas top-down from below the
 * stack's low limit (the base):
 *
 * Because we have an exclusive hugepage region which lies within the
 * normal user address space, we have to take special measures to make
 * non-huge mmap()s evade the hugepage reserved regions.
 */
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
			  const unsigned long len, const unsigned long pgoff,
			  const unsigned long flags)
{
	struct vm_area_struct *vma, *prev_vma;
	struct mm_struct *mm = current->mm;
	unsigned long base = mm->mmap_base, addr = addr0;
424
	unsigned long largest_hole = mm->cached_hole_size;
L
Linus Torvalds 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
	int first_time = 1;

	/* requested length too big for entire address space */
	if (len > TASK_SIZE)
		return -ENOMEM;

	/* dont allow allocations above current base */
	if (mm->free_area_cache > base)
		mm->free_area_cache = base;

	/* requesting a specific address */
	if (addr) {
		addr = PAGE_ALIGN(addr);
		vma = find_vma(mm, addr);
		if (TASK_SIZE - len >= addr &&
				(!vma || addr + len <= vma->vm_start)
				&& !is_hugepage_only_range(mm, addr,len))
			return addr;
	}

445 446 447 448
	if (len <= largest_hole) {
	        largest_hole = 0;
		mm->free_area_cache = base;
	}
L
Linus Torvalds 已提交
449 450 451 452 453 454 455 456 457 458 459 460
try_again:
	/* make sure it can fit in the remaining address space */
	if (mm->free_area_cache < len)
		goto fail;

	/* either no address requested or cant fit in requested address hole */
	addr = (mm->free_area_cache - len) & PAGE_MASK;
	do {
hugepage_recheck:
		if (touches_hugepage_low_range(mm, addr, len)) {
			addr = (addr & ((~0) << SID_SHIFT)) - len;
			goto hugepage_recheck;
461 462 463
		} else if (touches_hugepage_high_range(mm, addr, len)) {
			addr = (addr & ((~0UL) << HTLB_AREA_SHIFT)) - len;
			goto hugepage_recheck;
L
Linus Torvalds 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477
		}

		/*
		 * Lookup failure means no vma is above this address,
		 * i.e. return with success:
		 */
 	 	if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
			return addr;

		/*
		 * new region fits between prev_vma->vm_end and
		 * vma->vm_start, use it:
		 */
		if (addr+len <= vma->vm_start &&
478
		          (!prev_vma || (addr >= prev_vma->vm_end))) {
L
Linus Torvalds 已提交
479
			/* remember the address as a hint for next time */
480 481 482
		        mm->cached_hole_size = largest_hole;
		        return (mm->free_area_cache = addr);
		} else {
L
Linus Torvalds 已提交
483
			/* pull free_area_cache down to the first hole */
484
		        if (mm->free_area_cache == vma->vm_end) {
L
Linus Torvalds 已提交
485
				mm->free_area_cache = vma->vm_start;
486 487 488 489 490 491 492
				mm->cached_hole_size = largest_hole;
			}
		}

		/* remember the largest hole we saw so far */
		if (addr + largest_hole < vma->vm_start)
		        largest_hole = vma->vm_start - addr;
L
Linus Torvalds 已提交
493 494 495 496 497 498 499 500 501 502 503 504

		/* try just below the current vma->vm_start */
		addr = vma->vm_start-len;
	} while (len <= vma->vm_start);

fail:
	/*
	 * if hint left us with no space for the requested
	 * mapping then try again:
	 */
	if (first_time) {
		mm->free_area_cache = base;
505
		largest_hole = 0;
L
Linus Torvalds 已提交
506 507 508 509 510 511 512 513 514 515
		first_time = 0;
		goto try_again;
	}
	/*
	 * A failed mmap() very likely causes application failure,
	 * so fall back to the bottom-up function here. This scenario
	 * can happen with large stack limits and large mmap()
	 * allocations.
	 */
	mm->free_area_cache = TASK_UNMAPPED_BASE;
516
	mm->cached_hole_size = ~0UL;
L
Linus Torvalds 已提交
517 518 519 520 521
	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
	/*
	 * Restore the topdown base:
	 */
	mm->free_area_cache = base;
522
	mm->cached_hole_size = ~0UL;
L
Linus Torvalds 已提交
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553

	return addr;
}

static unsigned long htlb_get_low_area(unsigned long len, u16 segmask)
{
	unsigned long addr = 0;
	struct vm_area_struct *vma;

	vma = find_vma(current->mm, addr);
	while (addr + len <= 0x100000000UL) {
		BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */

		if (! __within_hugepage_low_range(addr, len, segmask)) {
			addr = ALIGN(addr+1, 1<<SID_SHIFT);
			vma = find_vma(current->mm, addr);
			continue;
		}

		if (!vma || (addr + len) <= vma->vm_start)
			return addr;
		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
		/* Depending on segmask this might not be a confirmed
		 * hugepage region, so the ALIGN could have skipped
		 * some VMAs */
		vma = find_vma(current->mm, addr);
	}

	return -ENOMEM;
}

554
static unsigned long htlb_get_high_area(unsigned long len, u16 areamask)
L
Linus Torvalds 已提交
555
{
556
	unsigned long addr = 0x100000000UL;
L
Linus Torvalds 已提交
557 558 559
	struct vm_area_struct *vma;

	vma = find_vma(current->mm, addr);
560
	while (addr + len <= TASK_SIZE_USER64) {
L
Linus Torvalds 已提交
561
		BUG_ON(vma && (addr >= vma->vm_end)); /* invariant */
562 563 564 565 566 567

		if (! __within_hugepage_high_range(addr, len, areamask)) {
			addr = ALIGN(addr+1, 1UL<<HTLB_AREA_SHIFT);
			vma = find_vma(current->mm, addr);
			continue;
		}
L
Linus Torvalds 已提交
568 569 570 571

		if (!vma || (addr + len) <= vma->vm_start)
			return addr;
		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
572 573 574 575
		/* Depending on segmask this might not be a confirmed
		 * hugepage region, so the ALIGN could have skipped
		 * some VMAs */
		vma = find_vma(current->mm, addr);
L
Linus Torvalds 已提交
576 577 578 579 580 581 582 583 584
	}

	return -ENOMEM;
}

unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags)
{
585 586 587
	int lastshift;
	u16 areamask, curareas;

588 589
	if (HPAGE_SHIFT == 0)
		return -EINVAL;
L
Linus Torvalds 已提交
590 591 592 593 594 595 596
	if (len & ~HPAGE_MASK)
		return -EINVAL;

	if (!cpu_has_feature(CPU_FTR_16M_PAGE))
		return -EINVAL;

	if (test_thread_flag(TIF_32BIT)) {
597
		curareas = current->mm->context.low_htlb_areas;
L
Linus Torvalds 已提交
598 599

		/* First see if we can do the mapping in the existing
600 601
		 * low areas */
		addr = htlb_get_low_area(len, curareas);
L
Linus Torvalds 已提交
602 603 604
		if (addr != -ENOMEM)
			return addr;

605 606 607 608
		lastshift = 0;
		for (areamask = LOW_ESID_MASK(0x100000000UL-len, len);
		     ! lastshift; areamask >>=1) {
			if (areamask & 1)
L
Linus Torvalds 已提交
609 610
				lastshift = 1;

611
			addr = htlb_get_low_area(len, curareas | areamask);
L
Linus Torvalds 已提交
612
			if ((addr != -ENOMEM)
613
			    && open_low_hpage_areas(current->mm, areamask) == 0)
L
Linus Torvalds 已提交
614 615 616
				return addr;
		}
	} else {
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
		curareas = current->mm->context.high_htlb_areas;

		/* First see if we can do the mapping in the existing
		 * high areas */
		addr = htlb_get_high_area(len, curareas);
		if (addr != -ENOMEM)
			return addr;

		lastshift = 0;
		for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len);
		     ! lastshift; areamask >>=1) {
			if (areamask & 1)
				lastshift = 1;

			addr = htlb_get_high_area(len, curareas | areamask);
			if ((addr != -ENOMEM)
			    && open_high_hpage_areas(current->mm, areamask) == 0)
				return addr;
		}
L
Linus Torvalds 已提交
636
	}
637 638 639
	printk(KERN_DEBUG "hugetlb_get_unmapped_area() unable to open"
	       " enough areas\n");
	return -ENOMEM;
L
Linus Torvalds 已提交
640 641 642 643 644 645
}

int hash_huge_page(struct mm_struct *mm, unsigned long access,
		   unsigned long ea, unsigned long vsid, int local)
{
	pte_t *ptep;
646 647
	unsigned long old_pte, new_pte;
	unsigned long va, rflags, pa;
L
Linus Torvalds 已提交
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
	long slot;
	int err = 1;

	ptep = huge_pte_offset(mm, ea);

	/* Search the Linux page table for a match with va */
	va = (vsid << 28) | (ea & 0x0fffffff);

	/*
	 * If no pte found or not present, send the problem up to
	 * do_page_fault
	 */
	if (unlikely(!ptep || pte_none(*ptep)))
		goto out;

	/* 
	 * Check the user's access rights to the page.  If access should be
	 * prevented then send the problem up to do_page_fault.
	 */
	if (unlikely(access & ~pte_val(*ptep)))
		goto out;
	/*
	 * At this point, we have a pte (old_pte) which can be used to build
	 * or update an HPTE. There are 2 cases:
	 *
	 * 1. There is a valid (present) pte with no associated HPTE (this is 
	 *	the most common case)
	 * 2. There is a valid (present) pte with an associated HPTE. The
	 *	current values of the pp bits in the HPTE prevent access
	 *	because we are doing software DIRTY bit management and the
	 *	page is currently not DIRTY. 
	 */


682 683 684 685 686 687 688 689 690 691
	do {
		old_pte = pte_val(*ptep);
		if (old_pte & _PAGE_BUSY)
			goto out;
		new_pte = old_pte | _PAGE_BUSY |
			_PAGE_ACCESSED | _PAGE_HASHPTE;
	} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
					 old_pte, new_pte));

	rflags = 0x2 | (!(new_pte & _PAGE_RW));
L
Linus Torvalds 已提交
692
 	/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
693
	rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
L
Linus Torvalds 已提交
694 695

	/* Check if pte already has an hpte (case 2) */
696
	if (unlikely(old_pte & _PAGE_HASHPTE)) {
L
Linus Torvalds 已提交
697 698 699
		/* There MIGHT be an HPTE for this pte */
		unsigned long hash, slot;

700 701
		hash = hpt_hash(va, HPAGE_SHIFT);
		if (old_pte & _PAGE_F_SECOND)
L
Linus Torvalds 已提交
702 703
			hash = ~hash;
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
704
		slot += (old_pte & _PAGE_F_GIX) >> 12;
L
Linus Torvalds 已提交
705

706
		if (ppc_md.hpte_updatepp(slot, rflags, va, 1, local) == -1)
707
			old_pte &= ~_PAGE_HPTEFLAGS;
L
Linus Torvalds 已提交
708 709
	}

710 711
	if (likely(!(old_pte & _PAGE_HASHPTE))) {
		unsigned long hash = hpt_hash(va, HPAGE_SHIFT);
L
Linus Torvalds 已提交
712 713
		unsigned long hpte_group;

714
		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
L
Linus Torvalds 已提交
715 716 717 718 719

repeat:
		hpte_group = ((hash & htab_hash_mask) *
			      HPTES_PER_GROUP) & ~0x7UL;

720 721
		/* clear HPTE slot informations in new PTE */
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
L
Linus Torvalds 已提交
722 723 724

		/* Add in WIMG bits */
		/* XXX We should store these in the pte */
725
		/* --BenH: I think they are ... */
726
		rflags |= _PAGE_COHERENT;
L
Linus Torvalds 已提交
727

728 729 730
		/* Insert into the hash table, primary slot */
		slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
					  mmu_huge_psize);
L
Linus Torvalds 已提交
731 732 733

		/* Primary is full, try the secondary */
		if (unlikely(slot == -1)) {
734
			new_pte |= _PAGE_F_SECOND;
L
Linus Torvalds 已提交
735 736
			hpte_group = ((~hash & htab_hash_mask) *
				      HPTES_PER_GROUP) & ~0x7UL; 
737
			slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
738
						  HPTE_V_SECONDARY,
739
						  mmu_huge_psize);
L
Linus Torvalds 已提交
740 741
			if (slot == -1) {
				if (mftb() & 0x1)
742 743
					hpte_group = ((hash & htab_hash_mask) *
						      HPTES_PER_GROUP)&~0x7UL;
L
Linus Torvalds 已提交
744 745 746 747 748 749 750 751 752

				ppc_md.hpte_remove(hpte_group);
				goto repeat;
                        }
		}

		if (unlikely(slot == -2))
			panic("hash_huge_page: pte_insert failed\n");

753
		new_pte |= (slot << 12) & _PAGE_F_GIX;
L
Linus Torvalds 已提交
754 755
	}

756 757 758 759 760 761 762
	/*
	 * No need to use ldarx/stdcx here because all who
	 * might be updating the pte will hold the
	 * page_table_lock
	 */
	*ptep = __pte(new_pte & ~_PAGE_BUSY);

L
Linus Torvalds 已提交
763 764 765 766 767
	err = 0;

 out:
	return err;
}