slice.c 20.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * address space "slices" (meta-segments) support
 *
 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
 *
 * Based on hugetlb implementation
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#undef DEBUG

#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/spinlock.h>
32
#include <linux/export.h>
33
#include <linux/hugetlb.h>
34 35
#include <asm/mman.h>
#include <asm/mmu.h>
36
#include <asm/copro.h>
37
#include <asm/hugetlb.h>
38

R
Roel Kluin 已提交
39
static DEFINE_SPINLOCK(slice_convert_lock);
40 41 42 43 44 45 46


#ifdef DEBUG
int _slice_debug = 1;

static void slice_print_mask(const char *label, struct slice_mask mask)
{
47
	char	*p, buf[SLICE_NUM_LOW + 3 + SLICE_NUM_HIGH + 1];
48 49 50 51 52 53 54 55 56 57
	int	i;

	if (!_slice_debug)
		return;
	p = buf;
	for (i = 0; i < SLICE_NUM_LOW; i++)
		*(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
	*(p++) = ' ';
	*(p++) = '-';
	*(p++) = ' ';
58 59 60 61 62 63
	for (i = 0; i < SLICE_NUM_HIGH; i++) {
		if (test_bit(i, mask.high_slices))
			*(p++) = '1';
		else
			*(p++) = '0';
	}
64 65 66 67 68 69 70 71 72 73 74 75 76 77
	*(p++) = 0;

	printk(KERN_DEBUG "%s:%s\n", label, buf);
}

#define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)

#else

static void slice_print_mask(const char *label, struct slice_mask mask) {}
#define slice_dbg(fmt...)

#endif

78 79
static void slice_range_to_mask(unsigned long start, unsigned long len,
				struct slice_mask *ret)
80 81
{
	unsigned long end = start + len - 1;
82

83 84
	ret->low_slices = 0;
	bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
85 86

	if (start < SLICE_LOW_TOP) {
87
		unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
88

89
		ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
90
			- (1u << GET_LOW_SLICE_INDEX(start));
91 92
	}

93 94 95 96
	if ((start + len) > SLICE_LOW_TOP) {
		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
97

98
		bitmap_set(ret->high_slices, start_index, count);
99
	}
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
}

static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
			      unsigned long len)
{
	struct vm_area_struct *vma;

	if ((mm->task_size - len) < addr)
		return 0;
	vma = find_vma(mm, addr);
	return (!vma || (addr + len) <= vma->vm_start);
}

static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
{
	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
				   1ul << SLICE_LOW_SHIFT);
}

static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
{
	unsigned long start = slice << SLICE_HIGH_SHIFT;
	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);

	/* Hack, so that each addresses is controlled by exactly one
	 * of the high or low area bitmaps, the first high area starts
	 * at 4GB, not 0 */
	if (start == 0)
		start = SLICE_LOW_TOP;

	return !slice_area_is_free(mm, start, end - start);
}

133
static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
134 135 136
{
	unsigned long i;

137 138
	ret->low_slices = 0;
	bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
139

140 141
	for (i = 0; i < SLICE_NUM_LOW; i++)
		if (!slice_low_has_vma(mm, i))
142
			ret->low_slices |= 1u << i;
143 144

	if (mm->task_size <= SLICE_LOW_TOP)
145
		return;
146 147 148

	for (i = 0; i < SLICE_NUM_HIGH; i++)
		if (!slice_high_has_vma(mm, i))
149
			__set_bit(i, ret->high_slices);
150 151
}

152
static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
153
{
154 155
	unsigned char *hpsizes;
	int index, mask_index;
156
	unsigned long i;
157
	u64 lpsizes;
158

159 160
	ret->low_slices = 0;
	bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
161

162
	lpsizes = mm->context.low_slices_psize;
163
	for (i = 0; i < SLICE_NUM_LOW; i++)
164
		if (((lpsizes >> (i * 4)) & 0xf) == psize)
165
			ret->low_slices |= 1u << i;
166

167 168 169 170 171
	hpsizes = mm->context.high_slices_psize;
	for (i = 0; i < SLICE_NUM_HIGH; i++) {
		mask_index = i & 0x1;
		index = i >> 1;
		if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
172
			__set_bit(i, ret->high_slices);
173
	}
174 175 176 177
}

static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
{
178 179 180 181 182
	DECLARE_BITMAP(result, SLICE_NUM_HIGH);

	bitmap_and(result, mask.high_slices,
		   available.high_slices, SLICE_NUM_HIGH);

183
	return (mask.low_slices & available.low_slices) == mask.low_slices &&
184
		bitmap_equal(result, mask.high_slices, SLICE_NUM_HIGH);
185 186 187 188 189 190 191 192 193 194
}

static void slice_flush_segments(void *parm)
{
	struct mm_struct *mm = parm;
	unsigned long flags;

	if (mm != current->active_mm)
		return;

195
	copy_mm_to_paca(current->active_mm);
196 197 198 199 200 201 202 203

	local_irq_save(flags);
	slb_flush_and_rebolt();
	local_irq_restore(flags);
}

static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
{
204
	int index, mask_index;
205
	/* Write the new slice psize bits */
206 207
	unsigned char *hpsizes;
	u64 lpsizes;
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
	unsigned long i, flags;

	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
	slice_print_mask(" mask", mask);

	/* We need to use a spinlock here to protect against
	 * concurrent 64k -> 4k demotion ...
	 */
	spin_lock_irqsave(&slice_convert_lock, flags);

	lpsizes = mm->context.low_slices_psize;
	for (i = 0; i < SLICE_NUM_LOW; i++)
		if (mask.low_slices & (1u << i))
			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
				(((unsigned long)psize) << (i * 4));

224
	/* Assign the value back */
225
	mm->context.low_slices_psize = lpsizes;
226 227 228 229 230

	hpsizes = mm->context.high_slices_psize;
	for (i = 0; i < SLICE_NUM_HIGH; i++) {
		mask_index = i & 0x1;
		index = i >> 1;
231
		if (test_bit(i, mask.high_slices))
232 233 234 235
			hpsizes[index] = (hpsizes[index] &
					  ~(0xf << (mask_index * 4))) |
				(((unsigned long)psize) << (mask_index * 4));
	}
236 237 238 239 240 241 242

	slice_dbg(" lsps=%lx, hsps=%lx\n",
		  mm->context.low_slices_psize,
		  mm->context.high_slices_psize);

	spin_unlock_irqrestore(&slice_convert_lock, flags);

243
	copro_flush_all_slbs(mm);
244 245
}

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
/*
 * Compute which slice addr is part of;
 * set *boundary_addr to the start or end boundary of that slice
 * (depending on 'end' parameter);
 * return boolean indicating if the slice is marked as available in the
 * 'available' slice_mark.
 */
static bool slice_scan_available(unsigned long addr,
				 struct slice_mask available,
				 int end,
				 unsigned long *boundary_addr)
{
	unsigned long slice;
	if (addr < SLICE_LOW_TOP) {
		slice = GET_LOW_SLICE_INDEX(addr);
		*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
		return !!(available.low_slices & (1u << slice));
	} else {
		slice = GET_HIGH_SLICE_INDEX(addr);
		*boundary_addr = (slice + end) ?
			((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
267
		return !!test_bit(slice, available.high_slices);
268 269 270
	}
}

271 272 273
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
					      unsigned long len,
					      struct slice_mask available,
274
					      int psize)
275 276
{
	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
277 278
	unsigned long addr, found, next_end;
	struct vm_unmapped_area_info info;
279

280 281 282 283
	info.flags = 0;
	info.length = len;
	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
	info.align_offset = 0;
284

285 286 287 288
	addr = TASK_UNMAPPED_BASE;
	while (addr < TASK_SIZE) {
		info.low_limit = addr;
		if (!slice_scan_available(addr, available, 1, &addr))
289
			continue;
290 291 292 293 294 295 296 297 298 299 300 301 302

 next_slice:
		/*
		 * At this point [info.low_limit; addr) covers
		 * available slices only and ends at a slice boundary.
		 * Check if we need to reduce the range, or if we can
		 * extend it to cover the next available slice.
		 */
		if (addr >= TASK_SIZE)
			addr = TASK_SIZE;
		else if (slice_scan_available(addr, available, 1, &next_end)) {
			addr = next_end;
			goto next_slice;
303
		}
304 305 306 307 308
		info.high_limit = addr;

		found = vm_unmapped_area(&info);
		if (!(found & ~PAGE_MASK))
			return found;
309 310 311 312 313 314 315 316
	}

	return -ENOMEM;
}

static unsigned long slice_find_area_topdown(struct mm_struct *mm,
					     unsigned long len,
					     struct slice_mask available,
317
					     int psize)
318 319
{
	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
320 321 322 323 324 325 326
	unsigned long addr, found, prev;
	struct vm_unmapped_area_info info;

	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
	info.length = len;
	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
	info.align_offset = 0;
327 328

	addr = mm->mmap_base;
329 330 331
	while (addr > PAGE_SIZE) {
		info.high_limit = addr;
		if (!slice_scan_available(addr - 1, available, 0, &addr))
332 333
			continue;

334
 prev_slice:
335
		/*
336 337 338 339
		 * At this point [addr; info.high_limit) covers
		 * available slices only and starts at a slice boundary.
		 * Check if we need to reduce the range, or if we can
		 * extend it to cover the previous available slice.
340
		 */
341 342 343 344 345 346 347
		if (addr < PAGE_SIZE)
			addr = PAGE_SIZE;
		else if (slice_scan_available(addr - 1, available, 0, &prev)) {
			addr = prev;
			goto prev_slice;
		}
		info.low_limit = addr;
348

349 350 351
		found = vm_unmapped_area(&info);
		if (!(found & ~PAGE_MASK))
			return found;
352 353 354 355 356 357 358 359
	}

	/*
	 * A failed mmap() very likely causes application failure,
	 * so fall back to the bottom-up function here. This scenario
	 * can happen with large stack limits and large mmap()
	 * allocations.
	 */
360
	return slice_find_area_bottomup(mm, len, available, psize);
361 362 363 364 365
}


static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
				     struct slice_mask mask, int psize,
366
				     int topdown)
367 368
{
	if (topdown)
369
		return slice_find_area_topdown(mm, len, mask, psize);
370
	else
371
		return slice_find_area_bottomup(mm, len, mask, psize);
372 373
}

374 375 376
static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
{
	DECLARE_BITMAP(result, SLICE_NUM_HIGH);
377

378 379 380 381 382 383 384 385 386 387 388 389 390 391
	dst->low_slices |= src->low_slices;
	bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
	bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
}

static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
{
	DECLARE_BITMAP(result, SLICE_NUM_HIGH);

	dst->low_slices &= ~src->low_slices;

	bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
	bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
}
392 393 394 395 396 397 398

#ifdef CONFIG_PPC_64K_PAGES
#define MMU_PAGE_BASE	MMU_PAGE_64K
#else
#define MMU_PAGE_BASE	MMU_PAGE_4K
#endif

399 400
unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
				      unsigned long flags, unsigned int psize,
401
				      int topdown)
402
{
403
	struct slice_mask mask;
404
	struct slice_mask good_mask;
405 406
	struct slice_mask potential_mask;
	struct slice_mask compat_mask;
407 408 409
	int fixed = (flags & MAP_FIXED);
	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
	struct mm_struct *mm = current->mm;
410
	unsigned long newaddr;
411

412 413 414 415 416 417 418 419 420 421 422 423 424
	/*
	 * init different masks
	 */
	mask.low_slices = 0;
	bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);

	/* silence stupid warning */;
	potential_mask.low_slices = 0;
	bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);

	compat_mask.low_slices = 0;
	bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);

425 426
	/* Sanity checks */
	BUG_ON(mm->task_size == 0);
427
	VM_BUG_ON(radix_enabled());
428 429

	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
430 431
	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
		  addr, len, flags, topdown);
432 433 434

	if (len > mm->task_size)
		return -ENOMEM;
435 436
	if (len & ((1ul << pshift) - 1))
		return -EINVAL;
437 438 439
	if (fixed && (addr & ((1ul << pshift) - 1)))
		return -EINVAL;
	if (fixed && addr > (mm->task_size - len))
440
		return -ENOMEM;
441 442 443 444 445

	/* If hint, make sure it matches our alignment restrictions */
	if (!fixed && addr) {
		addr = _ALIGN_UP(addr, 1ul << pshift);
		slice_dbg(" aligned addr=%lx\n", addr);
446 447 448 449
		/* Ignore hint if it's too large or overlaps a VMA */
		if (addr > mm->task_size - len ||
		    !slice_area_is_free(mm, addr, len))
			addr = 0;
450 451
	}

452
	/* First make up a "good" mask of slices that have the right size
453 454
	 * already
	 */
455
	slice_mask_for_size(mm, psize, &good_mask);
456 457
	slice_print_mask(" good_mask", good_mask);

458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
	/*
	 * Here "good" means slices that are already the right page size,
	 * "compat" means slices that have a compatible page size (i.e.
	 * 4k in a 64k pagesize kernel), and "free" means slices without
	 * any VMAs.
	 *
	 * If MAP_FIXED:
	 *	check if fits in good | compat => OK
	 *	check if fits in good | compat | free => convert free
	 *	else bad
	 * If have hint:
	 *	check if hint fits in good => OK
	 *	check if hint fits in good | free => convert free
	 * Otherwise:
	 *	search in good, found => OK
	 *	search in good | free, found => convert free
	 *	search in good | compat | free, found => convert free.
	 */
476

477 478 479
#ifdef CONFIG_PPC_64K_PAGES
	/* If we support combo pages, we can allow 64k pages in 4k slices */
	if (psize == MMU_PAGE_64K) {
480
		slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
481
		if (fixed)
482
			slice_or_mask(&good_mask, &compat_mask);
483 484
	}
#endif
485

486 487
	/* First check hint if it's valid or if we have MAP_FIXED */
	if (addr != 0 || fixed) {
488
		/* Build a mask for the requested range */
489
		slice_range_to_mask(addr, len, &mask);
490 491 492 493 494 495 496 497 498
		slice_print_mask(" mask", mask);

		/* Check if we fit in the good mask. If we do, we just return,
		 * nothing else to do
		 */
		if (slice_check_fit(mask, good_mask)) {
			slice_dbg(" fits good !\n");
			return addr;
		}
499 500 501
	} else {
		/* Now let's see if we can find something in the existing
		 * slices for that size
502
		 */
503
		newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
504 505 506 507 508 509
		if (newaddr != -ENOMEM) {
			/* Found within the good mask, we don't have to setup,
			 * we thus return directly
			 */
			slice_dbg(" found area at 0x%lx\n", newaddr);
			return newaddr;
510 511 512
		}
	}

513 514 515
	/* We don't fit in the good mask, check what other slices are
	 * empty and thus can be converted
	 */
516
	slice_mask_for_free(mm, &potential_mask);
517
	slice_or_mask(&potential_mask, &good_mask);
518 519 520 521 522 523 524 525
	slice_print_mask(" potential", potential_mask);

	if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
		slice_dbg(" fits potential !\n");
		goto convert;
	}

	/* If we have MAP_FIXED and failed the above steps, then error out */
526 527 528 529 530
	if (fixed)
		return -EBUSY;

	slice_dbg(" search...\n");

531 532
	/* If we had a hint that didn't work out, see if we can fit
	 * anywhere in the good area.
533
	 */
534
	if (addr) {
535
		addr = slice_find_area(mm, len, good_mask, psize, topdown);
536 537 538 539
		if (addr != -ENOMEM) {
			slice_dbg(" found area at 0x%lx\n", addr);
			return addr;
		}
540 541 542
	}

	/* Now let's see if we can find something in the existing slices
543
	 * for that size plus free slices
544
	 */
545
	addr = slice_find_area(mm, len, potential_mask, psize, topdown);
546 547 548 549

#ifdef CONFIG_PPC_64K_PAGES
	if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
		/* retry the search with 4k-page slices included */
550
		slice_or_mask(&potential_mask, &compat_mask);
551
		addr = slice_find_area(mm, len, potential_mask, psize,
552
				       topdown);
553 554 555
	}
#endif

556 557 558
	if (addr == -ENOMEM)
		return -ENOMEM;

559
	slice_range_to_mask(addr, len, &mask);
560 561 562 563
	slice_dbg(" found potential area at 0x%lx\n", addr);
	slice_print_mask(" mask", mask);

 convert:
564 565 566
	slice_andnot_mask(&mask, &good_mask);
	slice_andnot_mask(&mask, &compat_mask);
	if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
567 568
		slice_convert(mm, mask, psize);
		if (psize > MMU_PAGE_BASE)
569
			on_each_cpu(slice_flush_segments, mm, 1);
570
	}
571 572 573 574 575 576 577 578 579 580 581 582
	return addr;

}
EXPORT_SYMBOL_GPL(slice_get_unmapped_area);

unsigned long arch_get_unmapped_area(struct file *filp,
				     unsigned long addr,
				     unsigned long len,
				     unsigned long pgoff,
				     unsigned long flags)
{
	return slice_get_unmapped_area(addr, len, flags,
583
				       current->mm->context.user_psize, 0);
584 585 586 587 588 589 590 591 592
}

unsigned long arch_get_unmapped_area_topdown(struct file *filp,
					     const unsigned long addr0,
					     const unsigned long len,
					     const unsigned long pgoff,
					     const unsigned long flags)
{
	return slice_get_unmapped_area(addr0, len, flags,
593
				       current->mm->context.user_psize, 1);
594 595 596 597
}

unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
{
598 599
	unsigned char *hpsizes;
	int index, mask_index;
600

601 602 603 604 605 606 607 608 609 610
	/*
	 * Radix doesn't use slice, but can get enabled along with MMU_SLICE
	 */
	if (radix_enabled()) {
#ifdef CONFIG_PPC_64K_PAGES
		return MMU_PAGE_64K;
#else
		return MMU_PAGE_4K;
#endif
	}
611
	if (addr < SLICE_LOW_TOP) {
612 613
		u64 lpsizes;
		lpsizes = mm->context.low_slices_psize;
614
		index = GET_LOW_SLICE_INDEX(addr);
615
		return (lpsizes >> (index * 4)) & 0xf;
616
	}
617 618 619 620
	hpsizes = mm->context.high_slices_psize;
	index = GET_HIGH_SLICE_INDEX(addr);
	mask_index = index & 0x1;
	return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
621 622 623 624 625 626 627 628 629 630 631
}
EXPORT_SYMBOL_GPL(get_slice_psize);

/*
 * This is called by hash_page when it needs to do a lazy conversion of
 * an address space from real 64K pages to combo 4K pages (typically
 * when hitting a non cacheable mapping on a processor or hypervisor
 * that won't allow them for 64K pages).
 *
 * This is also called in init_new_context() to change back the user
 * psize from whatever the parent context had it set to
632
 * N.B. This may be called before mm->context.id has been set.
633 634 635 636 637 638 639
 *
 * This function will only change the content of the {low,high)_slice_psize
 * masks, it will not flush SLBs as this shall be handled lazily by the
 * caller.
 */
void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
{
640 641 642
	int index, mask_index;
	unsigned char *hpsizes;
	unsigned long flags, lpsizes;
643 644 645 646 647
	unsigned int old_psize;
	int i;

	slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);

648
	VM_BUG_ON(radix_enabled());
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
	spin_lock_irqsave(&slice_convert_lock, flags);

	old_psize = mm->context.user_psize;
	slice_dbg(" old_psize=%d\n", old_psize);
	if (old_psize == psize)
		goto bail;

	mm->context.user_psize = psize;
	wmb();

	lpsizes = mm->context.low_slices_psize;
	for (i = 0; i < SLICE_NUM_LOW; i++)
		if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
				(((unsigned long)psize) << (i * 4));
664 665
	/* Assign the value back */
	mm->context.low_slices_psize = lpsizes;
666 667

	hpsizes = mm->context.high_slices_psize;
668 669 670 671 672 673 674 675 676 677
	for (i = 0; i < SLICE_NUM_HIGH; i++) {
		mask_index = i & 0x1;
		index = i >> 1;
		if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
			hpsizes[index] = (hpsizes[index] &
					  ~(0xf << (mask_index * 4))) |
				(((unsigned long)psize) << (mask_index * 4));
	}


678 679 680 681 682 683 684 685 686 687


	slice_dbg(" lsps=%lx, hsps=%lx\n",
		  mm->context.low_slices_psize,
		  mm->context.high_slices_psize);

 bail:
	spin_unlock_irqrestore(&slice_convert_lock, flags);
}

688 689 690
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
			   unsigned long len, unsigned int psize)
{
691
	struct slice_mask mask;
692

693
	VM_BUG_ON(radix_enabled());
694 695

	slice_range_to_mask(start, len, &mask);
696 697 698
	slice_convert(mm, mask, psize);
}

699
#ifdef CONFIG_HUGETLB_PAGE
700
/*
701
 * is_hugepage_only_range() is used by generic code to verify whether
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
 *
 * until the generic code provides a more generic hook and/or starts
 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
 * here knows how to deal with), we hijack it to keep standard mappings
 * away from us.
 *
 * because of that generic code limitation, MAP_FIXED mapping cannot
 * "convert" back a slice with no VMAs to the standard page size, only
 * get_unmapped_area() can. It would be possible to fix it here but I
 * prefer working on fixing the generic code instead.
 *
 * WARNING: This will not work if hugetlbfs isn't enabled since the
 * generic code will redefine that function as 0 in that. This is ok
 * for now as we only use slices with hugetlbfs enabled. This should
 * be fixed as the generic code gets fixed.
 */
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
			   unsigned long len)
{
	struct slice_mask mask, available;
723
	unsigned int psize = mm->context.user_psize;
724

725 726 727
	if (radix_enabled())
		return 0;

728 729
	slice_range_to_mask(addr, len, &mask);
	slice_mask_for_size(mm, psize, &available);
730 731 732 733
#ifdef CONFIG_PPC_64K_PAGES
	/* We need to account for 4k slices too */
	if (psize == MMU_PAGE_64K) {
		struct slice_mask compat_mask;
734
		slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
735
		slice_or_mask(&available, &compat_mask);
736 737
	}
#endif
738 739 740 741 742 743 744 745 746

#if 0 /* too verbose */
	slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
		 mm, addr, len);
	slice_print_mask(" mask", mask);
	slice_print_mask(" available", available);
#endif
	return !slice_check_fit(mask, available);
}
747
#endif