slice.c 20.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * address space "slices" (meta-segments) support
 *
 * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
 *
 * Based on hugetlb implementation
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#undef DEBUG

#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/err.h>
#include <linux/spinlock.h>
32
#include <linux/export.h>
33
#include <linux/hugetlb.h>
34 35
#include <asm/mman.h>
#include <asm/mmu.h>
36
#include <asm/copro.h>
37
#include <asm/hugetlb.h>
38

R
Roel Kluin 已提交
39
static DEFINE_SPINLOCK(slice_convert_lock);
40 41 42 43 44 45 46


#ifdef DEBUG
int _slice_debug = 1;

static void slice_print_mask(const char *label, struct slice_mask mask)
{
47
	char	*p, buf[SLICE_NUM_LOW + 3 + SLICE_NUM_HIGH + 1];
48 49 50 51 52 53 54 55 56 57
	int	i;

	if (!_slice_debug)
		return;
	p = buf;
	for (i = 0; i < SLICE_NUM_LOW; i++)
		*(p++) = (mask.low_slices & (1 << i)) ? '1' : '0';
	*(p++) = ' ';
	*(p++) = '-';
	*(p++) = ' ';
58 59 60 61 62 63
	for (i = 0; i < SLICE_NUM_HIGH; i++) {
		if (test_bit(i, mask.high_slices))
			*(p++) = '1';
		else
			*(p++) = '0';
	}
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
	*(p++) = 0;

	printk(KERN_DEBUG "%s:%s\n", label, buf);
}

#define slice_dbg(fmt...) do { if (_slice_debug) pr_debug(fmt); } while(0)

#else

static void slice_print_mask(const char *label, struct slice_mask mask) {}
#define slice_dbg(fmt...)

#endif

static struct slice_mask slice_range_to_mask(unsigned long start,
					     unsigned long len)
{
	unsigned long end = start + len - 1;
82 83 84 85
	struct slice_mask ret;

	ret.low_slices = 0;
	bitmap_zero(ret.high_slices, SLICE_NUM_HIGH);
86 87

	if (start < SLICE_LOW_TOP) {
88
		unsigned long mend = min(end, (SLICE_LOW_TOP - 1));
89 90

		ret.low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
91
			- (1u << GET_LOW_SLICE_INDEX(start));
92 93
	}

94 95 96 97
	if ((start + len) > SLICE_LOW_TOP) {
		unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
		unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
		unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
98

99 100
		bitmap_set(ret.high_slices, start_index, count);
	}
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	return ret;
}

static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
			      unsigned long len)
{
	struct vm_area_struct *vma;

	if ((mm->task_size - len) < addr)
		return 0;
	vma = find_vma(mm, addr);
	return (!vma || (addr + len) <= vma->vm_start);
}

static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
{
	return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
				   1ul << SLICE_LOW_SHIFT);
}

static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
{
	unsigned long start = slice << SLICE_HIGH_SHIFT;
	unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);

	/* Hack, so that each addresses is controlled by exactly one
	 * of the high or low area bitmaps, the first high area starts
	 * at 4GB, not 0 */
	if (start == 0)
		start = SLICE_LOW_TOP;

	return !slice_area_is_free(mm, start, end - start);
}

static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
{
137
	struct slice_mask ret;
138 139
	unsigned long i;

140 141 142
	ret.low_slices = 0;
	bitmap_zero(ret.high_slices, SLICE_NUM_HIGH);

143 144 145 146 147 148 149 150 151
	for (i = 0; i < SLICE_NUM_LOW; i++)
		if (!slice_low_has_vma(mm, i))
			ret.low_slices |= 1u << i;

	if (mm->task_size <= SLICE_LOW_TOP)
		return ret;

	for (i = 0; i < SLICE_NUM_HIGH; i++)
		if (!slice_high_has_vma(mm, i))
152
			__set_bit(i, ret.high_slices);
153 154 155 156 157 158

	return ret;
}

static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
{
159 160
	unsigned char *hpsizes;
	int index, mask_index;
161
	struct slice_mask ret;
162
	unsigned long i;
163
	u64 lpsizes;
164

165 166 167
	ret.low_slices = 0;
	bitmap_zero(ret.high_slices, SLICE_NUM_HIGH);

168
	lpsizes = mm->context.low_slices_psize;
169
	for (i = 0; i < SLICE_NUM_LOW; i++)
170
		if (((lpsizes >> (i * 4)) & 0xf) == psize)
171 172
			ret.low_slices |= 1u << i;

173 174 175 176 177
	hpsizes = mm->context.high_slices_psize;
	for (i = 0; i < SLICE_NUM_HIGH; i++) {
		mask_index = i & 0x1;
		index = i >> 1;
		if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
178
			__set_bit(i, ret.high_slices);
179
	}
180 181 182 183 184 185

	return ret;
}

static int slice_check_fit(struct slice_mask mask, struct slice_mask available)
{
186 187 188 189 190
	DECLARE_BITMAP(result, SLICE_NUM_HIGH);

	bitmap_and(result, mask.high_slices,
		   available.high_slices, SLICE_NUM_HIGH);

191
	return (mask.low_slices & available.low_slices) == mask.low_slices &&
192
		bitmap_equal(result, mask.high_slices, SLICE_NUM_HIGH);
193 194 195 196 197 198 199 200 201 202
}

static void slice_flush_segments(void *parm)
{
	struct mm_struct *mm = parm;
	unsigned long flags;

	if (mm != current->active_mm)
		return;

203
	copy_mm_to_paca(&current->active_mm->context);
204 205 206 207 208 209 210 211

	local_irq_save(flags);
	slb_flush_and_rebolt();
	local_irq_restore(flags);
}

static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
{
212
	int index, mask_index;
213
	/* Write the new slice psize bits */
214 215
	unsigned char *hpsizes;
	u64 lpsizes;
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
	unsigned long i, flags;

	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
	slice_print_mask(" mask", mask);

	/* We need to use a spinlock here to protect against
	 * concurrent 64k -> 4k demotion ...
	 */
	spin_lock_irqsave(&slice_convert_lock, flags);

	lpsizes = mm->context.low_slices_psize;
	for (i = 0; i < SLICE_NUM_LOW; i++)
		if (mask.low_slices & (1u << i))
			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
				(((unsigned long)psize) << (i * 4));

232
	/* Assign the value back */
233
	mm->context.low_slices_psize = lpsizes;
234 235 236 237 238

	hpsizes = mm->context.high_slices_psize;
	for (i = 0; i < SLICE_NUM_HIGH; i++) {
		mask_index = i & 0x1;
		index = i >> 1;
239
		if (test_bit(i, mask.high_slices))
240 241 242 243
			hpsizes[index] = (hpsizes[index] &
					  ~(0xf << (mask_index * 4))) |
				(((unsigned long)psize) << (mask_index * 4));
	}
244 245 246 247 248 249 250

	slice_dbg(" lsps=%lx, hsps=%lx\n",
		  mm->context.low_slices_psize,
		  mm->context.high_slices_psize);

	spin_unlock_irqrestore(&slice_convert_lock, flags);

251
	copro_flush_all_slbs(mm);
252 253
}

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
/*
 * Compute which slice addr is part of;
 * set *boundary_addr to the start or end boundary of that slice
 * (depending on 'end' parameter);
 * return boolean indicating if the slice is marked as available in the
 * 'available' slice_mark.
 */
static bool slice_scan_available(unsigned long addr,
				 struct slice_mask available,
				 int end,
				 unsigned long *boundary_addr)
{
	unsigned long slice;
	if (addr < SLICE_LOW_TOP) {
		slice = GET_LOW_SLICE_INDEX(addr);
		*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
		return !!(available.low_slices & (1u << slice));
	} else {
		slice = GET_HIGH_SLICE_INDEX(addr);
		*boundary_addr = (slice + end) ?
			((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
275
		return !!test_bit(slice, available.high_slices);
276 277 278
	}
}

279 280 281
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
					      unsigned long len,
					      struct slice_mask available,
282
					      int psize)
283 284
{
	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
285 286
	unsigned long addr, found, next_end;
	struct vm_unmapped_area_info info;
287

288 289 290 291
	info.flags = 0;
	info.length = len;
	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
	info.align_offset = 0;
292

293 294 295 296
	addr = TASK_UNMAPPED_BASE;
	while (addr < TASK_SIZE) {
		info.low_limit = addr;
		if (!slice_scan_available(addr, available, 1, &addr))
297
			continue;
298 299 300 301 302 303 304 305 306 307 308 309 310

 next_slice:
		/*
		 * At this point [info.low_limit; addr) covers
		 * available slices only and ends at a slice boundary.
		 * Check if we need to reduce the range, or if we can
		 * extend it to cover the next available slice.
		 */
		if (addr >= TASK_SIZE)
			addr = TASK_SIZE;
		else if (slice_scan_available(addr, available, 1, &next_end)) {
			addr = next_end;
			goto next_slice;
311
		}
312 313 314 315 316
		info.high_limit = addr;

		found = vm_unmapped_area(&info);
		if (!(found & ~PAGE_MASK))
			return found;
317 318 319 320 321 322 323 324
	}

	return -ENOMEM;
}

static unsigned long slice_find_area_topdown(struct mm_struct *mm,
					     unsigned long len,
					     struct slice_mask available,
325
					     int psize)
326 327
{
	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
328 329 330 331 332 333 334
	unsigned long addr, found, prev;
	struct vm_unmapped_area_info info;

	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
	info.length = len;
	info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
	info.align_offset = 0;
335 336

	addr = mm->mmap_base;
337 338 339
	while (addr > PAGE_SIZE) {
		info.high_limit = addr;
		if (!slice_scan_available(addr - 1, available, 0, &addr))
340 341
			continue;

342
 prev_slice:
343
		/*
344 345 346 347
		 * At this point [addr; info.high_limit) covers
		 * available slices only and starts at a slice boundary.
		 * Check if we need to reduce the range, or if we can
		 * extend it to cover the previous available slice.
348
		 */
349 350 351 352 353 354 355
		if (addr < PAGE_SIZE)
			addr = PAGE_SIZE;
		else if (slice_scan_available(addr - 1, available, 0, &prev)) {
			addr = prev;
			goto prev_slice;
		}
		info.low_limit = addr;
356

357 358 359
		found = vm_unmapped_area(&info);
		if (!(found & ~PAGE_MASK))
			return found;
360 361 362 363 364 365 366 367
	}

	/*
	 * A failed mmap() very likely causes application failure,
	 * so fall back to the bottom-up function here. This scenario
	 * can happen with large stack limits and large mmap()
	 * allocations.
	 */
368
	return slice_find_area_bottomup(mm, len, available, psize);
369 370 371 372 373
}


static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
				     struct slice_mask mask, int psize,
374
				     int topdown)
375 376
{
	if (topdown)
377
		return slice_find_area_topdown(mm, len, mask, psize);
378
	else
379
		return slice_find_area_bottomup(mm, len, mask, psize);
380 381
}

382 383 384
static inline void slice_or_mask(struct slice_mask *dst, struct slice_mask *src)
{
	DECLARE_BITMAP(result, SLICE_NUM_HIGH);
385

386 387 388 389 390 391 392 393 394 395 396 397 398 399
	dst->low_slices |= src->low_slices;
	bitmap_or(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
	bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
}

static inline void slice_andnot_mask(struct slice_mask *dst, struct slice_mask *src)
{
	DECLARE_BITMAP(result, SLICE_NUM_HIGH);

	dst->low_slices &= ~src->low_slices;

	bitmap_andnot(result, dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
	bitmap_copy(dst->high_slices, result, SLICE_NUM_HIGH);
}
400 401 402 403 404 405 406

#ifdef CONFIG_PPC_64K_PAGES
#define MMU_PAGE_BASE	MMU_PAGE_64K
#else
#define MMU_PAGE_BASE	MMU_PAGE_4K
#endif

407 408
unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
				      unsigned long flags, unsigned int psize,
409
				      int topdown)
410
{
411
	struct slice_mask mask;
412
	struct slice_mask good_mask;
413 414
	struct slice_mask potential_mask;
	struct slice_mask compat_mask;
415 416 417
	int fixed = (flags & MAP_FIXED);
	int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
	struct mm_struct *mm = current->mm;
418
	unsigned long newaddr;
419

420 421 422 423 424 425 426 427 428 429 430 431 432
	/*
	 * init different masks
	 */
	mask.low_slices = 0;
	bitmap_zero(mask.high_slices, SLICE_NUM_HIGH);

	/* silence stupid warning */;
	potential_mask.low_slices = 0;
	bitmap_zero(potential_mask.high_slices, SLICE_NUM_HIGH);

	compat_mask.low_slices = 0;
	bitmap_zero(compat_mask.high_slices, SLICE_NUM_HIGH);

433 434
	/* Sanity checks */
	BUG_ON(mm->task_size == 0);
435
	VM_BUG_ON(radix_enabled());
436 437

	slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
438 439
	slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
		  addr, len, flags, topdown);
440 441 442

	if (len > mm->task_size)
		return -ENOMEM;
443 444
	if (len & ((1ul << pshift) - 1))
		return -EINVAL;
445 446 447
	if (fixed && (addr & ((1ul << pshift) - 1)))
		return -EINVAL;
	if (fixed && addr > (mm->task_size - len))
448
		return -ENOMEM;
449 450 451 452 453

	/* If hint, make sure it matches our alignment restrictions */
	if (!fixed && addr) {
		addr = _ALIGN_UP(addr, 1ul << pshift);
		slice_dbg(" aligned addr=%lx\n", addr);
454 455 456 457
		/* Ignore hint if it's too large or overlaps a VMA */
		if (addr > mm->task_size - len ||
		    !slice_area_is_free(mm, addr, len))
			addr = 0;
458 459
	}

460
	/* First make up a "good" mask of slices that have the right size
461 462 463 464 465
	 * already
	 */
	good_mask = slice_mask_for_size(mm, psize);
	slice_print_mask(" good_mask", good_mask);

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
	/*
	 * Here "good" means slices that are already the right page size,
	 * "compat" means slices that have a compatible page size (i.e.
	 * 4k in a 64k pagesize kernel), and "free" means slices without
	 * any VMAs.
	 *
	 * If MAP_FIXED:
	 *	check if fits in good | compat => OK
	 *	check if fits in good | compat | free => convert free
	 *	else bad
	 * If have hint:
	 *	check if hint fits in good => OK
	 *	check if hint fits in good | free => convert free
	 * Otherwise:
	 *	search in good, found => OK
	 *	search in good | free, found => convert free
	 *	search in good | compat | free, found => convert free.
	 */
484

485 486 487 488 489
#ifdef CONFIG_PPC_64K_PAGES
	/* If we support combo pages, we can allow 64k pages in 4k slices */
	if (psize == MMU_PAGE_64K) {
		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
		if (fixed)
490
			slice_or_mask(&good_mask, &compat_mask);
491 492
	}
#endif
493

494 495
	/* First check hint if it's valid or if we have MAP_FIXED */
	if (addr != 0 || fixed) {
496 497 498 499 500 501 502 503 504 505 506
		/* Build a mask for the requested range */
		mask = slice_range_to_mask(addr, len);
		slice_print_mask(" mask", mask);

		/* Check if we fit in the good mask. If we do, we just return,
		 * nothing else to do
		 */
		if (slice_check_fit(mask, good_mask)) {
			slice_dbg(" fits good !\n");
			return addr;
		}
507 508 509
	} else {
		/* Now let's see if we can find something in the existing
		 * slices for that size
510
		 */
511
		newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
512 513 514 515 516 517
		if (newaddr != -ENOMEM) {
			/* Found within the good mask, we don't have to setup,
			 * we thus return directly
			 */
			slice_dbg(" found area at 0x%lx\n", newaddr);
			return newaddr;
518 519 520
		}
	}

521 522 523 524
	/* We don't fit in the good mask, check what other slices are
	 * empty and thus can be converted
	 */
	potential_mask = slice_mask_for_free(mm);
525
	slice_or_mask(&potential_mask, &good_mask);
526 527 528 529 530 531 532 533
	slice_print_mask(" potential", potential_mask);

	if ((addr != 0 || fixed) && slice_check_fit(mask, potential_mask)) {
		slice_dbg(" fits potential !\n");
		goto convert;
	}

	/* If we have MAP_FIXED and failed the above steps, then error out */
534 535 536 537 538
	if (fixed)
		return -EBUSY;

	slice_dbg(" search...\n");

539 540
	/* If we had a hint that didn't work out, see if we can fit
	 * anywhere in the good area.
541
	 */
542
	if (addr) {
543
		addr = slice_find_area(mm, len, good_mask, psize, topdown);
544 545 546 547
		if (addr != -ENOMEM) {
			slice_dbg(" found area at 0x%lx\n", addr);
			return addr;
		}
548 549 550
	}

	/* Now let's see if we can find something in the existing slices
551
	 * for that size plus free slices
552
	 */
553
	addr = slice_find_area(mm, len, potential_mask, psize, topdown);
554 555 556 557

#ifdef CONFIG_PPC_64K_PAGES
	if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
		/* retry the search with 4k-page slices included */
558
		slice_or_mask(&potential_mask, &compat_mask);
559
		addr = slice_find_area(mm, len, potential_mask, psize,
560
				       topdown);
561 562 563
	}
#endif

564 565 566 567 568 569 570 571
	if (addr == -ENOMEM)
		return -ENOMEM;

	mask = slice_range_to_mask(addr, len);
	slice_dbg(" found potential area at 0x%lx\n", addr);
	slice_print_mask(" mask", mask);

 convert:
572 573 574
	slice_andnot_mask(&mask, &good_mask);
	slice_andnot_mask(&mask, &compat_mask);
	if (mask.low_slices || !bitmap_empty(mask.high_slices, SLICE_NUM_HIGH)) {
575 576
		slice_convert(mm, mask, psize);
		if (psize > MMU_PAGE_BASE)
577
			on_each_cpu(slice_flush_segments, mm, 1);
578
	}
579 580 581 582 583 584 585 586 587 588 589 590
	return addr;

}
EXPORT_SYMBOL_GPL(slice_get_unmapped_area);

unsigned long arch_get_unmapped_area(struct file *filp,
				     unsigned long addr,
				     unsigned long len,
				     unsigned long pgoff,
				     unsigned long flags)
{
	return slice_get_unmapped_area(addr, len, flags,
591
				       current->mm->context.user_psize, 0);
592 593 594 595 596 597 598 599 600
}

unsigned long arch_get_unmapped_area_topdown(struct file *filp,
					     const unsigned long addr0,
					     const unsigned long len,
					     const unsigned long pgoff,
					     const unsigned long flags)
{
	return slice_get_unmapped_area(addr0, len, flags,
601
				       current->mm->context.user_psize, 1);
602 603 604 605
}

unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
{
606 607
	unsigned char *hpsizes;
	int index, mask_index;
608

609 610 611 612 613 614 615 616 617 618
	/*
	 * Radix doesn't use slice, but can get enabled along with MMU_SLICE
	 */
	if (radix_enabled()) {
#ifdef CONFIG_PPC_64K_PAGES
		return MMU_PAGE_64K;
#else
		return MMU_PAGE_4K;
#endif
	}
619
	if (addr < SLICE_LOW_TOP) {
620 621
		u64 lpsizes;
		lpsizes = mm->context.low_slices_psize;
622
		index = GET_LOW_SLICE_INDEX(addr);
623
		return (lpsizes >> (index * 4)) & 0xf;
624
	}
625 626 627 628
	hpsizes = mm->context.high_slices_psize;
	index = GET_HIGH_SLICE_INDEX(addr);
	mask_index = index & 0x1;
	return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
629 630 631 632 633 634 635 636 637 638 639
}
EXPORT_SYMBOL_GPL(get_slice_psize);

/*
 * This is called by hash_page when it needs to do a lazy conversion of
 * an address space from real 64K pages to combo 4K pages (typically
 * when hitting a non cacheable mapping on a processor or hypervisor
 * that won't allow them for 64K pages).
 *
 * This is also called in init_new_context() to change back the user
 * psize from whatever the parent context had it set to
640
 * N.B. This may be called before mm->context.id has been set.
641 642 643 644 645 646 647
 *
 * This function will only change the content of the {low,high)_slice_psize
 * masks, it will not flush SLBs as this shall be handled lazily by the
 * caller.
 */
void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
{
648 649 650
	int index, mask_index;
	unsigned char *hpsizes;
	unsigned long flags, lpsizes;
651 652 653 654 655
	unsigned int old_psize;
	int i;

	slice_dbg("slice_set_user_psize(mm=%p, psize=%d)\n", mm, psize);

656
	VM_BUG_ON(radix_enabled());
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
	spin_lock_irqsave(&slice_convert_lock, flags);

	old_psize = mm->context.user_psize;
	slice_dbg(" old_psize=%d\n", old_psize);
	if (old_psize == psize)
		goto bail;

	mm->context.user_psize = psize;
	wmb();

	lpsizes = mm->context.low_slices_psize;
	for (i = 0; i < SLICE_NUM_LOW; i++)
		if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
			lpsizes = (lpsizes & ~(0xful << (i * 4))) |
				(((unsigned long)psize) << (i * 4));
672 673
	/* Assign the value back */
	mm->context.low_slices_psize = lpsizes;
674 675

	hpsizes = mm->context.high_slices_psize;
676 677 678 679 680 681 682 683 684 685
	for (i = 0; i < SLICE_NUM_HIGH; i++) {
		mask_index = i & 0x1;
		index = i >> 1;
		if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
			hpsizes[index] = (hpsizes[index] &
					  ~(0xf << (mask_index * 4))) |
				(((unsigned long)psize) << (mask_index * 4));
	}


686 687 688 689 690 691 692 693 694 695


	slice_dbg(" lsps=%lx, hsps=%lx\n",
		  mm->context.low_slices_psize,
		  mm->context.high_slices_psize);

 bail:
	spin_unlock_irqrestore(&slice_convert_lock, flags);
}

696 697 698 699 700
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
			   unsigned long len, unsigned int psize)
{
	struct slice_mask mask = slice_range_to_mask(start, len);

701
	VM_BUG_ON(radix_enabled());
702 703 704
	slice_convert(mm, mask, psize);
}

705
#ifdef CONFIG_HUGETLB_PAGE
706
/*
707
 * is_hugepage_only_range() is used by generic code to verify whether
708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
 *
 * until the generic code provides a more generic hook and/or starts
 * calling arch get_unmapped_area for MAP_FIXED (which our implementation
 * here knows how to deal with), we hijack it to keep standard mappings
 * away from us.
 *
 * because of that generic code limitation, MAP_FIXED mapping cannot
 * "convert" back a slice with no VMAs to the standard page size, only
 * get_unmapped_area() can. It would be possible to fix it here but I
 * prefer working on fixing the generic code instead.
 *
 * WARNING: This will not work if hugetlbfs isn't enabled since the
 * generic code will redefine that function as 0 in that. This is ok
 * for now as we only use slices with hugetlbfs enabled. This should
 * be fixed as the generic code gets fixed.
 */
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
			   unsigned long len)
{
	struct slice_mask mask, available;
729
	unsigned int psize = mm->context.user_psize;
730

731 732 733
	if (radix_enabled())
		return 0;

734
	mask = slice_range_to_mask(addr, len);
735 736 737 738 739 740
	available = slice_mask_for_size(mm, psize);
#ifdef CONFIG_PPC_64K_PAGES
	/* We need to account for 4k slices too */
	if (psize == MMU_PAGE_64K) {
		struct slice_mask compat_mask;
		compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
741
		slice_or_mask(&available, &compat_mask);
742 743
	}
#endif
744 745 746 747 748 749 750 751 752

#if 0 /* too verbose */
	slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
		 mm, addr, len);
	slice_print_mask(" mask", mask);
	slice_print_mask(" available", available);
#endif
	return !slice_check_fit(mask, available);
}
753
#endif