pmb.c 18.9 KB
Newer Older
1 2 3 4 5
/*
 * arch/sh/mm/pmb.c
 *
 * Privileged Space Mapping Buffer (PMB) Support.
 *
M
Matt Fleming 已提交
6 7
 * Copyright (C) 2005 - 2010  Paul Mundt
 * Copyright (C) 2010  Matt Fleming
8 9 10 11 12 13 14
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
F
Francesco VIRLINZI 已提交
15 16
#include <linux/sysdev.h>
#include <linux/cpu.h>
17 18 19 20 21 22 23
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/err.h>
P
Paul Mundt 已提交
24
#include <linux/io.h>
P
Paul Mundt 已提交
25
#include <linux/spinlock.h>
P
Paul Mundt 已提交
26
#include <linux/vmalloc.h>
27
#include <asm/cacheflush.h>
P
Paul Mundt 已提交
28
#include <asm/sizes.h>
29 30
#include <asm/system.h>
#include <asm/uaccess.h>
P
Paul Mundt 已提交
31
#include <asm/pgtable.h>
32
#include <asm/page.h>
33
#include <asm/mmu.h>
34
#include <asm/mmu_context.h>
35

P
Paul Mundt 已提交
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
struct pmb_entry;

struct pmb_entry {
	unsigned long vpn;
	unsigned long ppn;
	unsigned long flags;
	unsigned long size;

	spinlock_t lock;

	/*
	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
	 * PMB_NO_ENTRY to search for a free one
	 */
	int entry;

	/* Adjacent entry link for contiguous multi-entry mappings */
	struct pmb_entry *link;
};

P
Paul Mundt 已提交
56 57 58 59 60 61 62 63 64 65
static struct {
	unsigned long size;
	int flag;
} pmb_sizes[] = {
	{ .size	= SZ_512M, .flag = PMB_SZ_512M, },
	{ .size = SZ_128M, .flag = PMB_SZ_128M, },
	{ .size = SZ_64M,  .flag = PMB_SZ_64M,  },
	{ .size = SZ_16M,  .flag = PMB_SZ_16M,  },
};

66
static void pmb_unmap_entry(struct pmb_entry *, int depth);
M
Matt Fleming 已提交
67

P
Paul Mundt 已提交
68
static DEFINE_RWLOCK(pmb_rwlock);
M
Matt Fleming 已提交
69
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
P
Paul Mundt 已提交
70
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
71

72 73
static unsigned int pmb_iomapping_enabled;

P
Paul Mundt 已提交
74
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
75 76 77 78
{
	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
}

P
Paul Mundt 已提交
79
static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
80 81 82 83
{
	return mk_pmb_entry(entry) | PMB_ADDR;
}

P
Paul Mundt 已提交
84
static __always_inline unsigned long mk_pmb_data(unsigned int entry)
85 86 87 88
{
	return mk_pmb_entry(entry) | PMB_DATA;
}

P
Paul Mundt 已提交
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
{
	return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
}

/*
 * Ensure that the PMB entries match our cache configuration.
 *
 * When we are in 32-bit address extended mode, CCR.CB becomes
 * invalid, so care must be taken to manually adjust cacheable
 * translations.
 */
static __always_inline unsigned long pmb_cache_flags(void)
{
	unsigned long flags = 0;

#if defined(CONFIG_CACHE_OFF)
	flags |= PMB_WT | PMB_UB;
#elif defined(CONFIG_CACHE_WRITETHROUGH)
	flags |= PMB_C | PMB_WT | PMB_UB;
#elif defined(CONFIG_CACHE_WRITEBACK)
	flags |= PMB_C;
#endif

	return flags;
}

/*
 * Convert typical pgprot value to the PMB equivalent
 */
static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
{
	unsigned long pmb_flags = 0;
	u64 flags = pgprot_val(prot);

	if (flags & _PAGE_CACHABLE)
		pmb_flags |= PMB_C;
	if (flags & _PAGE_WT)
		pmb_flags |= PMB_WT | PMB_UB;

	return pmb_flags;
}

132
static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
P
Paul Mundt 已提交
133 134 135 136 137 138
{
	return (b->vpn == (a->vpn + a->size)) &&
	       (b->ppn == (a->ppn + a->size)) &&
	       (b->flags == a->flags);
}

139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
			       unsigned long size)
{
	int i;

	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe, *iter;
		unsigned long span;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		/*
		 * See if VPN and PPN are bounded by an existing mapping.
		 */
		if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
			continue;
		if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
			continue;

		/*
		 * Now see if we're in range of a simple mapping.
		 */
		if (size <= pmbe->size) {
			read_unlock(&pmb_rwlock);
			return true;
		}

		span = pmbe->size;

		/*
		 * Finally for sizes that involve compound mappings, walk
		 * the chain.
		 */
		for (iter = pmbe->link; iter; iter = iter->link)
			span += iter->size;

		/*
		 * Nothing else to do if the range requirements are met.
		 */
		if (size <= span) {
			read_unlock(&pmb_rwlock);
			return true;
		}
	}

	read_unlock(&pmb_rwlock);
	return false;
}

P
Paul Mundt 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224
static bool pmb_size_valid(unsigned long size)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (pmb_sizes[i].size == size)
			return true;

	return false;
}

static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
{
	return (addr >= P1SEG && (addr + size - 1) < P3SEG);
}

static inline bool pmb_prot_valid(pgprot_t prot)
{
	return (pgprot_val(prot) & _PAGE_USER) == 0;
}

static int pmb_size_to_flags(unsigned long size)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (pmb_sizes[i].size == size)
			return pmb_sizes[i].flag;

	return 0;
}

M
Matt Fleming 已提交
225 226
static int pmb_alloc_entry(void)
{
P
Paul Mundt 已提交
227
	int pos;
M
Matt Fleming 已提交
228

P
Paul Mundt 已提交
229
	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
P
Paul Mundt 已提交
230 231 232 233
	if (pos >= 0 && pos < NR_PMB_ENTRIES)
		__set_bit(pos, pmb_map);
	else
		pos = -ENOSPC;
M
Matt Fleming 已提交
234 235 236 237

	return pos;
}

M
Matt Fleming 已提交
238
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
239
				   unsigned long flags, int entry)
240 241
{
	struct pmb_entry *pmbe;
P
Paul Mundt 已提交
242 243
	unsigned long irqflags;
	void *ret = NULL;
M
Matt Fleming 已提交
244 245
	int pos;

P
Paul Mundt 已提交
246 247
	write_lock_irqsave(&pmb_rwlock, irqflags);

248 249
	if (entry == PMB_NO_ENTRY) {
		pos = pmb_alloc_entry();
P
Paul Mundt 已提交
250 251 252 253
		if (unlikely(pos < 0)) {
			ret = ERR_PTR(pos);
			goto out;
		}
254
	} else {
P
Paul Mundt 已提交
255 256 257 258 259
		if (__test_and_set_bit(entry, pmb_map)) {
			ret = ERR_PTR(-ENOSPC);
			goto out;
		}

260 261
		pos = entry;
	}
262

P
Paul Mundt 已提交
263 264
	write_unlock_irqrestore(&pmb_rwlock, irqflags);

M
Matt Fleming 已提交
265
	pmbe = &pmb_entry_list[pos];
P
Paul Mundt 已提交
266

267 268
	memset(pmbe, 0, sizeof(struct pmb_entry));

P
Paul Mundt 已提交
269
	spin_lock_init(&pmbe->lock);
270 271 272 273

	pmbe->vpn	= vpn;
	pmbe->ppn	= ppn;
	pmbe->flags	= flags;
M
Matt Fleming 已提交
274
	pmbe->entry	= pos;
275 276

	return pmbe;
P
Paul Mundt 已提交
277 278 279 280

out:
	write_unlock_irqrestore(&pmb_rwlock, irqflags);
	return ret;
281 282
}

M
Matt Fleming 已提交
283
static void pmb_free(struct pmb_entry *pmbe)
284
{
P
Paul Mundt 已提交
285
	__clear_bit(pmbe->entry, pmb_map);
286 287 288

	pmbe->entry	= PMB_NO_ENTRY;
	pmbe->link	= NULL;
289 290 291
}

/*
P
Paul Mundt 已提交
292
 * Must be run uncached.
293
 */
P
Paul Mundt 已提交
294
static void __set_pmb_entry(struct pmb_entry *pmbe)
295
{
296 297 298 299 300 301 302
	unsigned long addr, data;

	addr = mk_pmb_addr(pmbe->entry);
	data = mk_pmb_data(pmbe->entry);

	jump_to_uncached();

P
Paul Mundt 已提交
303
	/* Set V-bit */
304 305 306 307
	__raw_writel(pmbe->vpn | PMB_V, addr);
	__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);

	back_to_cached();
308 309
}

P
Paul Mundt 已提交
310
static void __clear_pmb_entry(struct pmb_entry *pmbe)
311
{
312 313
	unsigned long addr, data;
	unsigned long addr_val, data_val;
314

315 316
	addr = mk_pmb_addr(pmbe->entry);
	data = mk_pmb_data(pmbe->entry);
317

318 319
	addr_val = __raw_readl(addr);
	data_val = __raw_readl(data);
320

321 322 323
	/* Clear V-bit */
	writel_uncached(addr_val & ~PMB_V, addr);
	writel_uncached(data_val & ~PMB_V, data);
324 325
}

326
#ifdef CONFIG_PM
P
Paul Mundt 已提交
327 328 329 330 331 332 333 334
static void set_pmb_entry(struct pmb_entry *pmbe)
{
	unsigned long flags;

	spin_lock_irqsave(&pmbe->lock, flags);
	__set_pmb_entry(pmbe);
	spin_unlock_irqrestore(&pmbe->lock, flags);
}
335
#endif /* CONFIG_PM */
P
Paul Mundt 已提交
336

P
Paul Mundt 已提交
337 338
int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
		     unsigned long size, pgprot_t prot)
P
Paul Mundt 已提交
339
{
M
Matt Fleming 已提交
340
	struct pmb_entry *pmbp, *pmbe;
341
	unsigned long orig_addr, orig_size;
342
	unsigned long flags, pmb_flags;
P
Paul Mundt 已提交
343 344
	int i, mapped;

345 346
	if (!pmb_addr_valid(vaddr, size))
		return -EFAULT;
347 348
	if (pmb_mapping_exists(vaddr, phys, size))
		return 0;
349

350 351 352 353 354
	orig_addr = vaddr;
	orig_size = size;

	flush_tlb_kernel_range(vaddr, vaddr + size);

P
Paul Mundt 已提交
355
	pmb_flags = pgprot_to_pmb_flags(prot);
356
	pmbp = NULL;
P
Paul Mundt 已提交
357

358 359 360 361
	do {
		for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
			if (size < pmb_sizes[i].size)
				continue;
P
Paul Mundt 已提交
362

363 364 365 366 367 368
			pmbe = pmb_alloc(vaddr, phys, pmb_flags |
					 pmb_sizes[i].flag, PMB_NO_ENTRY);
			if (IS_ERR(pmbe)) {
				pmb_unmap_entry(pmbp, mapped);
				return PTR_ERR(pmbe);
			}
P
Paul Mundt 已提交
369

370
			spin_lock_irqsave(&pmbe->lock, flags);
P
Paul Mundt 已提交
371

372
			pmbe->size = pmb_sizes[i].size;
P
Paul Mundt 已提交
373

374
			__set_pmb_entry(pmbe);
375

376 377 378
			phys	+= pmbe->size;
			vaddr	+= pmbe->size;
			size	-= pmbe->size;
P
Paul Mundt 已提交
379

380 381 382 383 384 385 386 387 388
			/*
			 * Link adjacent entries that span multiple PMB
			 * entries for easier tear-down.
			 */
			if (likely(pmbp)) {
				spin_lock(&pmbp->lock);
				pmbp->link = pmbe;
				spin_unlock(&pmbp->lock);
			}
389

390
			pmbp = pmbe;
P
Paul Mundt 已提交
391

392 393 394 395 396 397 398
			/*
			 * Instead of trying smaller sizes on every
			 * iteration (even if we succeed in allocating
			 * space), try using pmb_sizes[i].size again.
			 */
			i--;
			mapped++;
P
Paul Mundt 已提交
399

400 401 402
			spin_unlock_irqrestore(&pmbe->lock, flags);
		}
	} while (size >= SZ_16M);
P
Paul Mundt 已提交
403

404 405
	flush_cache_vmap(orig_addr, orig_addr + orig_size);

406 407 408 409 410 411
	return 0;
}

void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
			       pgprot_t prot, void *caller)
{
412
	unsigned long vaddr;
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
	phys_addr_t offset, last_addr;
	phys_addr_t align_mask;
	unsigned long aligned;
	struct vm_struct *area;
	int i, ret;

	if (!pmb_iomapping_enabled)
		return NULL;

	/*
	 * Small mappings need to go through the TLB.
	 */
	if (size < SZ_16M)
		return ERR_PTR(-EINVAL);
	if (!pmb_prot_valid(prot))
		return ERR_PTR(-EINVAL);

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (size >= pmb_sizes[i].size)
			break;

	last_addr = phys + size;
	align_mask = ~(pmb_sizes[i].size - 1);
	offset = phys & ~align_mask;
	phys &= align_mask;
	aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;

440 441 442 443 444 445
	/*
	 * XXX: This should really start from uncached_end, but this
	 * causes the MMU to reset, so for now we restrict it to the
	 * 0xb000...0xc000 range.
	 */
	area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
446 447 448 449 450
				    P3SEG, caller);
	if (!area)
		return NULL;

	area->phys_addr = phys;
451
	vaddr = (unsigned long)area->addr;
452 453

	ret = pmb_bolt_mapping(vaddr, phys, size, prot);
454
	if (unlikely(ret != 0))
455 456
		return ERR_PTR(ret);

457
	return (void __iomem *)(offset + (char *)vaddr);
P
Paul Mundt 已提交
458 459
}

P
Paul Mundt 已提交
460
int pmb_unmap(void __iomem *addr)
P
Paul Mundt 已提交
461
{
P
Paul Mundt 已提交
462
	struct pmb_entry *pmbe = NULL;
P
Paul Mundt 已提交
463 464
	unsigned long vaddr = (unsigned long __force)addr;
	int i, found = 0;
P
Paul Mundt 已提交
465

P
Paul Mundt 已提交
466 467
	read_lock(&pmb_rwlock);

M
Matt Fleming 已提交
468
	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
P
Paul Mundt 已提交
469
		if (test_bit(i, pmb_map)) {
M
Matt Fleming 已提交
470
			pmbe = &pmb_entry_list[i];
P
Paul Mundt 已提交
471 472
			if (pmbe->vpn == vaddr) {
				found = 1;
M
Matt Fleming 已提交
473
				break;
P
Paul Mundt 已提交
474
			}
M
Matt Fleming 已提交
475 476
		}
	}
P
Paul Mundt 已提交
477 478 479

	read_unlock(&pmb_rwlock);

P
Paul Mundt 已提交
480 481 482 483
	if (found) {
		pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
		return 0;
	}
P
Paul Mundt 已提交
484

P
Paul Mundt 已提交
485
	return -EINVAL;
486 487 488 489
}

static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
P
Paul Mundt 已提交
490 491 492
	do {
		struct pmb_entry *pmblink = pmbe;

M
Matt Fleming 已提交
493 494 495 496 497 498 499
		/*
		 * We may be called before this pmb_entry has been
		 * entered into the PMB table via set_pmb_entry(), but
		 * that's OK because we've allocated a unique slot for
		 * this entry in pmb_alloc() (even if we haven't filled
		 * it yet).
		 *
P
Paul Mundt 已提交
500
		 * Therefore, calling __clear_pmb_entry() is safe as no
M
Matt Fleming 已提交
501 502
		 * other mapping can be using that slot.
		 */
P
Paul Mundt 已提交
503
		__clear_pmb_entry(pmbe);
M
Matt Fleming 已提交
504

505 506
		flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);

P
Paul Mundt 已提交
507 508 509
		pmbe = pmblink->link;

		pmb_free(pmblink);
510 511 512 513 514 515
	} while (pmbe && --depth);
}

static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
	unsigned long flags;
P
Paul Mundt 已提交
516

517 518 519 520 521
	if (unlikely(!pmbe))
		return;

	write_lock_irqsave(&pmb_rwlock, flags);
	__pmb_unmap_entry(pmbe, depth);
P
Paul Mundt 已提交
522
	write_unlock_irqrestore(&pmb_rwlock, flags);
P
Paul Mundt 已提交
523 524
}

525
static void __init pmb_notify(void)
526
{
527
	int i;
528

529
	pr_info("PMB: boot mappings:\n");
530

531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
			pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
			pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
	}

	read_unlock(&pmb_rwlock);
}

/*
 * Sync our software copy of the PMB mappings with those in hardware. The
 * mappings in the hardware PMB were either set up by the bootloader or
 * very early on by the kernel.
 */
static void __init pmb_synchronize(void)
{
	struct pmb_entry *pmbp = NULL;
	int i, j;

M
Matt Fleming 已提交
559
	/*
560 561 562 563
	 * Run through the initial boot mappings, log the established
	 * ones, and blow away anything that falls outside of the valid
	 * PPN range. Specifically, we only care about existing mappings
	 * that impact the cached/uncached sections.
M
Matt Fleming 已提交
564
	 *
565 566 567 568 569
	 * Note that touching these can be a bit of a minefield; the boot
	 * loader can establish multi-page mappings with the same caching
	 * attributes, so we need to ensure that we aren't modifying a
	 * mapping that we're presently executing from, or may execute
	 * from in the case of straddling page boundaries.
M
Matt Fleming 已提交
570
	 *
571 572 573
	 * In the future we will have to tidy up after the boot loader by
	 * jumping between the cached and uncached mappings and tearing
	 * down alternating mappings while executing from the other.
M
Matt Fleming 已提交
574
	 */
P
Paul Mundt 已提交
575
	for (i = 0; i < NR_PMB_ENTRIES; i++) {
M
Matt Fleming 已提交
576 577
		unsigned long addr, data;
		unsigned long addr_val, data_val;
578
		unsigned long ppn, vpn, flags;
P
Paul Mundt 已提交
579
		unsigned long irqflags;
580
		unsigned int size;
581
		struct pmb_entry *pmbe;
582

M
Matt Fleming 已提交
583 584
		addr = mk_pmb_addr(i);
		data = mk_pmb_data(i);
585

M
Matt Fleming 已提交
586 587
		addr_val = __raw_readl(addr);
		data_val = __raw_readl(data);
588

M
Matt Fleming 已提交
589 590 591 592 593
		/*
		 * Skip over any bogus entries
		 */
		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
			continue;
594

M
Matt Fleming 已提交
595 596
		ppn = data_val & PMB_PFN_MASK;
		vpn = addr_val & PMB_PFN_MASK;
P
Paul Mundt 已提交
597

M
Matt Fleming 已提交
598 599 600
		/*
		 * Only preserve in-range mappings.
		 */
601
		if (!pmb_ppn_in_range(ppn)) {
M
Matt Fleming 已提交
602 603 604
			/*
			 * Invalidate anything out of bounds.
			 */
605 606
			writel_uncached(addr_val & ~PMB_V, addr);
			writel_uncached(data_val & ~PMB_V, data);
607
			continue;
M
Matt Fleming 已提交
608
		}
609 610 611 612 613

		/*
		 * Update the caching attributes if necessary
		 */
		if (data_val & PMB_C) {
614 615
			data_val &= ~PMB_CACHE_MASK;
			data_val |= pmb_cache_flags();
616 617

			writel_uncached(data_val, data);
618 619
		}

620 621
		size = data_val & PMB_SZ_MASK;
		flags = size | (data_val & PMB_CACHE_MASK);
622 623 624 625 626 627 628

		pmbe = pmb_alloc(vpn, ppn, flags, i);
		if (IS_ERR(pmbe)) {
			WARN_ON_ONCE(1);
			continue;
		}

P
Paul Mundt 已提交
629 630
		spin_lock_irqsave(&pmbe->lock, irqflags);

631 632 633 634
		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
			if (pmb_sizes[j].flag == size)
				pmbe->size = pmb_sizes[j].size;

P
Paul Mundt 已提交
635 636 637 638 639 640
		if (pmbp) {
			spin_lock(&pmbp->lock);

			/*
			 * Compare the previous entry against the current one to
			 * see if the entries span a contiguous mapping. If so,
641 642
			 * setup the entry links accordingly. Compound mappings
			 * are later coalesced.
P
Paul Mundt 已提交
643
			 */
644
			if (pmb_can_merge(pmbp, pmbe))
P
Paul Mundt 已提交
645 646 647 648
				pmbp->link = pmbe;

			spin_unlock(&pmbp->lock);
		}
649 650 651

		pmbp = pmbe;

P
Paul Mundt 已提交
652
		spin_unlock_irqrestore(&pmbe->lock, irqflags);
653 654
	}
}
P
Paul Mundt 已提交
655

656 657 658 659 660 661 662
static void __init pmb_merge(struct pmb_entry *head)
{
	unsigned long span, newsize;
	struct pmb_entry *tail;
	int i = 1, depth = 0;

	span = newsize = head->size;
663

664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
	tail = head->link;
	while (tail) {
		span += tail->size;

		if (pmb_size_valid(span)) {
			newsize = span;
			depth = i;
		}

		/* This is the end of the line.. */
		if (!tail->link)
			break;

		tail = tail->link;
		i++;
P
Paul Mundt 已提交
679 680
	}

681 682 683 684 685 686 687 688 689 690 691 692 693
	/*
	 * The merged page size must be valid.
	 */
	if (!pmb_size_valid(newsize))
		return;

	head->flags &= ~PMB_SZ_MASK;
	head->flags |= pmb_size_to_flags(newsize);

	head->size = newsize;

	__pmb_unmap_entry(head->link, depth);
	__set_pmb_entry(head);
P
Paul Mundt 已提交
694 695
}

696
static void __init pmb_coalesce(void)
P
Paul Mundt 已提交
697
{
698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
	unsigned long flags;
	int i;

	write_lock_irqsave(&pmb_rwlock, flags);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		/*
		 * We're only interested in compound mappings
		 */
		if (!pmbe->link)
			continue;

		/*
		 * Nothing to do if it already uses the largest possible
		 * page size.
		 */
		if (pmbe->size == SZ_512M)
			continue;

		pmb_merge(pmbe);
	}

	write_unlock_irqrestore(&pmb_rwlock, flags);
}

#ifdef CONFIG_UNCACHED_MAPPING
static void __init pmb_resize(void)
{
	int i;
P
Paul Mundt 已提交
734 735

	/*
736 737
	 * If the uncached mapping was constructed by the kernel, it will
	 * already be a reasonable size.
P
Paul Mundt 已提交
738
	 */
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775
	if (uncached_size == SZ_16M)
		return;

	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;
		unsigned long flags;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		if (pmbe->vpn != uncached_start)
			continue;

		/*
		 * Found it, now resize it.
		 */
		spin_lock_irqsave(&pmbe->lock, flags);

		pmbe->size = SZ_16M;
		pmbe->flags &= ~PMB_SZ_MASK;
		pmbe->flags |= pmb_size_to_flags(pmbe->size);

		uncached_resize(pmbe->size);

		__set_pmb_entry(pmbe);

		spin_unlock_irqrestore(&pmbe->lock, flags);
	}

	read_lock(&pmb_rwlock);
}
#endif

776 777 778 779 780 781 782 783 784 785 786 787
static int __init early_pmb(char *p)
{
	if (!p)
		return 0;

	if (strstr(p, "iomap"))
		pmb_iomapping_enabled = 1;

	return 0;
}
early_param("pmb", early_pmb);

788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
void __init pmb_init(void)
{
	/* Synchronize software state */
	pmb_synchronize();

	/* Attempt to combine compound mappings */
	pmb_coalesce();

#ifdef CONFIG_UNCACHED_MAPPING
	/* Resize initial mappings, if necessary */
	pmb_resize();
#endif

	/* Log them */
	pmb_notify();
M
Matt Fleming 已提交
803

804
	writel_uncached(0, PMB_IRMCR);
P
Paul Mundt 已提交
805 806

	/* Flush out the TLB */
807
	local_flush_tlb_all();
808
	ctrl_barrier();
809
}
810

811 812 813 814 815
bool __in_29bit_mode(void)
{
        return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
}

816 817 818 819 820 821 822 823 824 825 826 827 828
static int pmb_seq_show(struct seq_file *file, void *iter)
{
	int i;

	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
	seq_printf(file, "ety   vpn  ppn  size   flags\n");

	for (i = 0; i < NR_PMB_ENTRIES; i++) {
		unsigned long addr, data;
		unsigned int size;
		char *sz_str = NULL;

829 830
		addr = __raw_readl(mk_pmb_addr(i));
		data = __raw_readl(mk_pmb_data(i));
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854

		size = data & PMB_SZ_MASK;
		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
			 (size == PMB_SZ_64M)  ? " 64MB":
			 (size == PMB_SZ_128M) ? "128MB":
					         "512MB";

		/* 02: V 0x88 0x08 128MB C CB  B */
		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
			   sz_str, (data & PMB_C) ? 'C' : ' ',
			   (data & PMB_WT) ? "WT" : "CB",
			   (data & PMB_UB) ? "UB" : " B");
	}

	return 0;
}

static int pmb_debugfs_open(struct inode *inode, struct file *file)
{
	return single_open(file, pmb_seq_show, NULL);
}

855
static const struct file_operations pmb_debugfs_fops = {
856 857 858 859
	.owner		= THIS_MODULE,
	.open		= pmb_debugfs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
L
Li Zefan 已提交
860
	.release	= single_release,
861 862 863 864 865 866 867
};

static int __init pmb_debugfs_init(void)
{
	struct dentry *dentry;

	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
P
Paul Mundt 已提交
868
				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
869 870
	if (!dentry)
		return -ENOMEM;
871 872 873 874 875
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);

	return 0;
}
876
subsys_initcall(pmb_debugfs_init);
F
Francesco VIRLINZI 已提交
877 878 879 880 881

#ifdef CONFIG_PM
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
	static pm_message_t prev_state;
M
Matt Fleming 已提交
882
	int i;
F
Francesco VIRLINZI 已提交
883 884 885 886 887

	/* Restore the PMB after a resume from hibernation */
	if (state.event == PM_EVENT_ON &&
	    prev_state.event == PM_EVENT_FREEZE) {
		struct pmb_entry *pmbe;
P
Paul Mundt 已提交
888 889 890

		read_lock(&pmb_rwlock);

M
Matt Fleming 已提交
891
		for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
P
Paul Mundt 已提交
892
			if (test_bit(i, pmb_map)) {
M
Matt Fleming 已提交
893 894 895 896
				pmbe = &pmb_entry_list[i];
				set_pmb_entry(pmbe);
			}
		}
P
Paul Mundt 已提交
897 898

		read_unlock(&pmb_rwlock);
F
Francesco VIRLINZI 已提交
899
	}
P
Paul Mundt 已提交
900

F
Francesco VIRLINZI 已提交
901
	prev_state = state;
P
Paul Mundt 已提交
902

F
Francesco VIRLINZI 已提交
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
	return 0;
}

static int pmb_sysdev_resume(struct sys_device *dev)
{
	return pmb_sysdev_suspend(dev, PMSG_ON);
}

static struct sysdev_driver pmb_sysdev_driver = {
	.suspend = pmb_sysdev_suspend,
	.resume = pmb_sysdev_resume,
};

static int __init pmb_sysdev_init(void)
{
	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
}
subsys_initcall(pmb_sysdev_init);
#endif