pmb.c 18.9 KB
Newer Older
1 2 3 4 5
/*
 * arch/sh/mm/pmb.c
 *
 * Privileged Space Mapping Buffer (PMB) Support.
 *
M
Matt Fleming 已提交
6 7
 * Copyright (C) 2005 - 2010  Paul Mundt
 * Copyright (C) 2010  Matt Fleming
8 9 10 11 12 13 14
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
F
Francesco VIRLINZI 已提交
15 16
#include <linux/sysdev.h>
#include <linux/cpu.h>
17 18 19 20 21 22
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/err.h>
P
Paul Mundt 已提交
23
#include <linux/io.h>
P
Paul Mundt 已提交
24
#include <linux/spinlock.h>
P
Paul Mundt 已提交
25
#include <linux/vmalloc.h>
26
#include <asm/cacheflush.h>
P
Paul Mundt 已提交
27
#include <asm/sizes.h>
28 29
#include <asm/system.h>
#include <asm/uaccess.h>
P
Paul Mundt 已提交
30
#include <asm/pgtable.h>
31
#include <asm/page.h>
32
#include <asm/mmu.h>
33
#include <asm/mmu_context.h>
34

P
Paul Mundt 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
struct pmb_entry;

struct pmb_entry {
	unsigned long vpn;
	unsigned long ppn;
	unsigned long flags;
	unsigned long size;

	spinlock_t lock;

	/*
	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
	 * PMB_NO_ENTRY to search for a free one
	 */
	int entry;

	/* Adjacent entry link for contiguous multi-entry mappings */
	struct pmb_entry *link;
};

P
Paul Mundt 已提交
55 56 57 58 59 60 61 62 63 64
static struct {
	unsigned long size;
	int flag;
} pmb_sizes[] = {
	{ .size	= SZ_512M, .flag = PMB_SZ_512M, },
	{ .size = SZ_128M, .flag = PMB_SZ_128M, },
	{ .size = SZ_64M,  .flag = PMB_SZ_64M,  },
	{ .size = SZ_16M,  .flag = PMB_SZ_16M,  },
};

65
static void pmb_unmap_entry(struct pmb_entry *, int depth);
M
Matt Fleming 已提交
66

P
Paul Mundt 已提交
67
static DEFINE_RWLOCK(pmb_rwlock);
M
Matt Fleming 已提交
68
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
P
Paul Mundt 已提交
69
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
70

71 72
static unsigned int pmb_iomapping_enabled;

P
Paul Mundt 已提交
73
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
74 75 76 77
{
	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
}

P
Paul Mundt 已提交
78
static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
79 80 81 82
{
	return mk_pmb_entry(entry) | PMB_ADDR;
}

P
Paul Mundt 已提交
83
static __always_inline unsigned long mk_pmb_data(unsigned int entry)
84 85 86 87
{
	return mk_pmb_entry(entry) | PMB_DATA;
}

P
Paul Mundt 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
{
	return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
}

/*
 * Ensure that the PMB entries match our cache configuration.
 *
 * When we are in 32-bit address extended mode, CCR.CB becomes
 * invalid, so care must be taken to manually adjust cacheable
 * translations.
 */
static __always_inline unsigned long pmb_cache_flags(void)
{
	unsigned long flags = 0;

#if defined(CONFIG_CACHE_OFF)
	flags |= PMB_WT | PMB_UB;
#elif defined(CONFIG_CACHE_WRITETHROUGH)
	flags |= PMB_C | PMB_WT | PMB_UB;
#elif defined(CONFIG_CACHE_WRITEBACK)
	flags |= PMB_C;
#endif

	return flags;
}

/*
 * Convert typical pgprot value to the PMB equivalent
 */
static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
{
	unsigned long pmb_flags = 0;
	u64 flags = pgprot_val(prot);

	if (flags & _PAGE_CACHABLE)
		pmb_flags |= PMB_C;
	if (flags & _PAGE_WT)
		pmb_flags |= PMB_WT | PMB_UB;

	return pmb_flags;
}

131
static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
P
Paul Mundt 已提交
132 133 134 135 136 137
{
	return (b->vpn == (a->vpn + a->size)) &&
	       (b->ppn == (a->ppn + a->size)) &&
	       (b->flags == a->flags);
}

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
			       unsigned long size)
{
	int i;

	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe, *iter;
		unsigned long span;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		/*
		 * See if VPN and PPN are bounded by an existing mapping.
		 */
		if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
			continue;
		if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
			continue;

		/*
		 * Now see if we're in range of a simple mapping.
		 */
		if (size <= pmbe->size) {
			read_unlock(&pmb_rwlock);
			return true;
		}

		span = pmbe->size;

		/*
		 * Finally for sizes that involve compound mappings, walk
		 * the chain.
		 */
		for (iter = pmbe->link; iter; iter = iter->link)
			span += iter->size;

		/*
		 * Nothing else to do if the range requirements are met.
		 */
		if (size <= span) {
			read_unlock(&pmb_rwlock);
			return true;
		}
	}

	read_unlock(&pmb_rwlock);
	return false;
}

P
Paul Mundt 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
static bool pmb_size_valid(unsigned long size)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (pmb_sizes[i].size == size)
			return true;

	return false;
}

static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
{
	return (addr >= P1SEG && (addr + size - 1) < P3SEG);
}

static inline bool pmb_prot_valid(pgprot_t prot)
{
	return (pgprot_val(prot) & _PAGE_USER) == 0;
}

static int pmb_size_to_flags(unsigned long size)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (pmb_sizes[i].size == size)
			return pmb_sizes[i].flag;

	return 0;
}

M
Matt Fleming 已提交
224 225
static int pmb_alloc_entry(void)
{
P
Paul Mundt 已提交
226
	int pos;
M
Matt Fleming 已提交
227

P
Paul Mundt 已提交
228
	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
P
Paul Mundt 已提交
229 230 231 232
	if (pos >= 0 && pos < NR_PMB_ENTRIES)
		__set_bit(pos, pmb_map);
	else
		pos = -ENOSPC;
M
Matt Fleming 已提交
233 234 235 236

	return pos;
}

M
Matt Fleming 已提交
237
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
238
				   unsigned long flags, int entry)
239 240
{
	struct pmb_entry *pmbe;
P
Paul Mundt 已提交
241 242
	unsigned long irqflags;
	void *ret = NULL;
M
Matt Fleming 已提交
243 244
	int pos;

P
Paul Mundt 已提交
245 246
	write_lock_irqsave(&pmb_rwlock, irqflags);

247 248
	if (entry == PMB_NO_ENTRY) {
		pos = pmb_alloc_entry();
P
Paul Mundt 已提交
249 250 251 252
		if (unlikely(pos < 0)) {
			ret = ERR_PTR(pos);
			goto out;
		}
253
	} else {
P
Paul Mundt 已提交
254 255 256 257 258
		if (__test_and_set_bit(entry, pmb_map)) {
			ret = ERR_PTR(-ENOSPC);
			goto out;
		}

259 260
		pos = entry;
	}
261

P
Paul Mundt 已提交
262 263
	write_unlock_irqrestore(&pmb_rwlock, irqflags);

M
Matt Fleming 已提交
264
	pmbe = &pmb_entry_list[pos];
P
Paul Mundt 已提交
265

266 267
	memset(pmbe, 0, sizeof(struct pmb_entry));

P
Paul Mundt 已提交
268
	spin_lock_init(&pmbe->lock);
269 270 271 272

	pmbe->vpn	= vpn;
	pmbe->ppn	= ppn;
	pmbe->flags	= flags;
M
Matt Fleming 已提交
273
	pmbe->entry	= pos;
274 275

	return pmbe;
P
Paul Mundt 已提交
276 277 278 279

out:
	write_unlock_irqrestore(&pmb_rwlock, irqflags);
	return ret;
280 281
}

M
Matt Fleming 已提交
282
static void pmb_free(struct pmb_entry *pmbe)
283
{
P
Paul Mundt 已提交
284
	__clear_bit(pmbe->entry, pmb_map);
285 286 287

	pmbe->entry	= PMB_NO_ENTRY;
	pmbe->link	= NULL;
288 289 290
}

/*
P
Paul Mundt 已提交
291
 * Must be run uncached.
292
 */
P
Paul Mundt 已提交
293
static void __set_pmb_entry(struct pmb_entry *pmbe)
294
{
295 296 297 298 299 300 301
	unsigned long addr, data;

	addr = mk_pmb_addr(pmbe->entry);
	data = mk_pmb_data(pmbe->entry);

	jump_to_uncached();

P
Paul Mundt 已提交
302
	/* Set V-bit */
303 304 305 306
	__raw_writel(pmbe->vpn | PMB_V, addr);
	__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);

	back_to_cached();
307 308
}

P
Paul Mundt 已提交
309
static void __clear_pmb_entry(struct pmb_entry *pmbe)
310
{
311 312
	unsigned long addr, data;
	unsigned long addr_val, data_val;
313

314 315
	addr = mk_pmb_addr(pmbe->entry);
	data = mk_pmb_data(pmbe->entry);
316

317 318
	addr_val = __raw_readl(addr);
	data_val = __raw_readl(data);
319

320 321 322
	/* Clear V-bit */
	writel_uncached(addr_val & ~PMB_V, addr);
	writel_uncached(data_val & ~PMB_V, data);
323 324
}

325
#ifdef CONFIG_PM
P
Paul Mundt 已提交
326 327 328 329 330 331 332 333
static void set_pmb_entry(struct pmb_entry *pmbe)
{
	unsigned long flags;

	spin_lock_irqsave(&pmbe->lock, flags);
	__set_pmb_entry(pmbe);
	spin_unlock_irqrestore(&pmbe->lock, flags);
}
334
#endif /* CONFIG_PM */
P
Paul Mundt 已提交
335

P
Paul Mundt 已提交
336 337
int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
		     unsigned long size, pgprot_t prot)
P
Paul Mundt 已提交
338
{
M
Matt Fleming 已提交
339
	struct pmb_entry *pmbp, *pmbe;
340
	unsigned long orig_addr, orig_size;
341
	unsigned long flags, pmb_flags;
P
Paul Mundt 已提交
342 343
	int i, mapped;

344 345
	if (size < SZ_16M)
		return -EINVAL;
346 347
	if (!pmb_addr_valid(vaddr, size))
		return -EFAULT;
348 349
	if (pmb_mapping_exists(vaddr, phys, size))
		return 0;
350

351 352 353 354 355
	orig_addr = vaddr;
	orig_size = size;

	flush_tlb_kernel_range(vaddr, vaddr + size);

P
Paul Mundt 已提交
356
	pmb_flags = pgprot_to_pmb_flags(prot);
357
	pmbp = NULL;
P
Paul Mundt 已提交
358

359 360 361 362
	do {
		for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
			if (size < pmb_sizes[i].size)
				continue;
P
Paul Mundt 已提交
363

364 365 366 367 368 369
			pmbe = pmb_alloc(vaddr, phys, pmb_flags |
					 pmb_sizes[i].flag, PMB_NO_ENTRY);
			if (IS_ERR(pmbe)) {
				pmb_unmap_entry(pmbp, mapped);
				return PTR_ERR(pmbe);
			}
P
Paul Mundt 已提交
370

371
			spin_lock_irqsave(&pmbe->lock, flags);
P
Paul Mundt 已提交
372

373
			pmbe->size = pmb_sizes[i].size;
P
Paul Mundt 已提交
374

375
			__set_pmb_entry(pmbe);
376

377 378 379
			phys	+= pmbe->size;
			vaddr	+= pmbe->size;
			size	-= pmbe->size;
P
Paul Mundt 已提交
380

381 382 383 384 385 386 387 388 389
			/*
			 * Link adjacent entries that span multiple PMB
			 * entries for easier tear-down.
			 */
			if (likely(pmbp)) {
				spin_lock(&pmbp->lock);
				pmbp->link = pmbe;
				spin_unlock(&pmbp->lock);
			}
390

391
			pmbp = pmbe;
P
Paul Mundt 已提交
392

393 394 395 396 397 398 399
			/*
			 * Instead of trying smaller sizes on every
			 * iteration (even if we succeed in allocating
			 * space), try using pmb_sizes[i].size again.
			 */
			i--;
			mapped++;
P
Paul Mundt 已提交
400

401 402 403
			spin_unlock_irqrestore(&pmbe->lock, flags);
		}
	} while (size >= SZ_16M);
P
Paul Mundt 已提交
404

405 406
	flush_cache_vmap(orig_addr, orig_addr + orig_size);

407 408 409 410 411 412
	return 0;
}

void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
			       pgprot_t prot, void *caller)
{
413
	unsigned long vaddr;
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
	phys_addr_t offset, last_addr;
	phys_addr_t align_mask;
	unsigned long aligned;
	struct vm_struct *area;
	int i, ret;

	if (!pmb_iomapping_enabled)
		return NULL;

	/*
	 * Small mappings need to go through the TLB.
	 */
	if (size < SZ_16M)
		return ERR_PTR(-EINVAL);
	if (!pmb_prot_valid(prot))
		return ERR_PTR(-EINVAL);

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (size >= pmb_sizes[i].size)
			break;

	last_addr = phys + size;
	align_mask = ~(pmb_sizes[i].size - 1);
	offset = phys & ~align_mask;
	phys &= align_mask;
	aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;

441 442 443 444 445 446
	/*
	 * XXX: This should really start from uncached_end, but this
	 * causes the MMU to reset, so for now we restrict it to the
	 * 0xb000...0xc000 range.
	 */
	area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
447 448 449 450 451
				    P3SEG, caller);
	if (!area)
		return NULL;

	area->phys_addr = phys;
452
	vaddr = (unsigned long)area->addr;
453 454

	ret = pmb_bolt_mapping(vaddr, phys, size, prot);
455
	if (unlikely(ret != 0))
456 457
		return ERR_PTR(ret);

458
	return (void __iomem *)(offset + (char *)vaddr);
P
Paul Mundt 已提交
459 460
}

P
Paul Mundt 已提交
461
int pmb_unmap(void __iomem *addr)
P
Paul Mundt 已提交
462
{
P
Paul Mundt 已提交
463
	struct pmb_entry *pmbe = NULL;
P
Paul Mundt 已提交
464 465
	unsigned long vaddr = (unsigned long __force)addr;
	int i, found = 0;
P
Paul Mundt 已提交
466

P
Paul Mundt 已提交
467 468
	read_lock(&pmb_rwlock);

M
Matt Fleming 已提交
469
	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
P
Paul Mundt 已提交
470
		if (test_bit(i, pmb_map)) {
M
Matt Fleming 已提交
471
			pmbe = &pmb_entry_list[i];
P
Paul Mundt 已提交
472 473
			if (pmbe->vpn == vaddr) {
				found = 1;
M
Matt Fleming 已提交
474
				break;
P
Paul Mundt 已提交
475
			}
M
Matt Fleming 已提交
476 477
		}
	}
P
Paul Mundt 已提交
478 479 480

	read_unlock(&pmb_rwlock);

P
Paul Mundt 已提交
481 482 483 484
	if (found) {
		pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
		return 0;
	}
P
Paul Mundt 已提交
485

P
Paul Mundt 已提交
486
	return -EINVAL;
487 488 489 490
}

static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
P
Paul Mundt 已提交
491 492 493
	do {
		struct pmb_entry *pmblink = pmbe;

M
Matt Fleming 已提交
494 495 496 497 498 499 500
		/*
		 * We may be called before this pmb_entry has been
		 * entered into the PMB table via set_pmb_entry(), but
		 * that's OK because we've allocated a unique slot for
		 * this entry in pmb_alloc() (even if we haven't filled
		 * it yet).
		 *
P
Paul Mundt 已提交
501
		 * Therefore, calling __clear_pmb_entry() is safe as no
M
Matt Fleming 已提交
502 503
		 * other mapping can be using that slot.
		 */
P
Paul Mundt 已提交
504
		__clear_pmb_entry(pmbe);
M
Matt Fleming 已提交
505

506 507
		flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);

P
Paul Mundt 已提交
508 509 510
		pmbe = pmblink->link;

		pmb_free(pmblink);
511 512 513 514 515 516
	} while (pmbe && --depth);
}

static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
	unsigned long flags;
P
Paul Mundt 已提交
517

518 519 520 521 522
	if (unlikely(!pmbe))
		return;

	write_lock_irqsave(&pmb_rwlock, flags);
	__pmb_unmap_entry(pmbe, depth);
P
Paul Mundt 已提交
523
	write_unlock_irqrestore(&pmb_rwlock, flags);
P
Paul Mundt 已提交
524 525
}

526
static void __init pmb_notify(void)
527
{
528
	int i;
529

530
	pr_info("PMB: boot mappings:\n");
531

532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
			pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
			pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
	}

	read_unlock(&pmb_rwlock);
}

/*
 * Sync our software copy of the PMB mappings with those in hardware. The
 * mappings in the hardware PMB were either set up by the bootloader or
 * very early on by the kernel.
 */
static void __init pmb_synchronize(void)
{
	struct pmb_entry *pmbp = NULL;
	int i, j;

M
Matt Fleming 已提交
560
	/*
561 562 563 564
	 * Run through the initial boot mappings, log the established
	 * ones, and blow away anything that falls outside of the valid
	 * PPN range. Specifically, we only care about existing mappings
	 * that impact the cached/uncached sections.
M
Matt Fleming 已提交
565
	 *
566 567 568 569 570
	 * Note that touching these can be a bit of a minefield; the boot
	 * loader can establish multi-page mappings with the same caching
	 * attributes, so we need to ensure that we aren't modifying a
	 * mapping that we're presently executing from, or may execute
	 * from in the case of straddling page boundaries.
M
Matt Fleming 已提交
571
	 *
572 573 574
	 * In the future we will have to tidy up after the boot loader by
	 * jumping between the cached and uncached mappings and tearing
	 * down alternating mappings while executing from the other.
M
Matt Fleming 已提交
575
	 */
P
Paul Mundt 已提交
576
	for (i = 0; i < NR_PMB_ENTRIES; i++) {
M
Matt Fleming 已提交
577 578
		unsigned long addr, data;
		unsigned long addr_val, data_val;
579
		unsigned long ppn, vpn, flags;
P
Paul Mundt 已提交
580
		unsigned long irqflags;
581
		unsigned int size;
582
		struct pmb_entry *pmbe;
583

M
Matt Fleming 已提交
584 585
		addr = mk_pmb_addr(i);
		data = mk_pmb_data(i);
586

M
Matt Fleming 已提交
587 588
		addr_val = __raw_readl(addr);
		data_val = __raw_readl(data);
589

M
Matt Fleming 已提交
590 591 592 593 594
		/*
		 * Skip over any bogus entries
		 */
		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
			continue;
595

M
Matt Fleming 已提交
596 597
		ppn = data_val & PMB_PFN_MASK;
		vpn = addr_val & PMB_PFN_MASK;
P
Paul Mundt 已提交
598

M
Matt Fleming 已提交
599 600 601
		/*
		 * Only preserve in-range mappings.
		 */
602
		if (!pmb_ppn_in_range(ppn)) {
M
Matt Fleming 已提交
603 604 605
			/*
			 * Invalidate anything out of bounds.
			 */
606 607
			writel_uncached(addr_val & ~PMB_V, addr);
			writel_uncached(data_val & ~PMB_V, data);
608
			continue;
M
Matt Fleming 已提交
609
		}
610 611 612 613 614

		/*
		 * Update the caching attributes if necessary
		 */
		if (data_val & PMB_C) {
615 616
			data_val &= ~PMB_CACHE_MASK;
			data_val |= pmb_cache_flags();
617 618

			writel_uncached(data_val, data);
619 620
		}

621 622
		size = data_val & PMB_SZ_MASK;
		flags = size | (data_val & PMB_CACHE_MASK);
623 624 625 626 627 628 629

		pmbe = pmb_alloc(vpn, ppn, flags, i);
		if (IS_ERR(pmbe)) {
			WARN_ON_ONCE(1);
			continue;
		}

P
Paul Mundt 已提交
630 631
		spin_lock_irqsave(&pmbe->lock, irqflags);

632 633 634 635
		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
			if (pmb_sizes[j].flag == size)
				pmbe->size = pmb_sizes[j].size;

P
Paul Mundt 已提交
636 637 638 639 640 641
		if (pmbp) {
			spin_lock(&pmbp->lock);

			/*
			 * Compare the previous entry against the current one to
			 * see if the entries span a contiguous mapping. If so,
642 643
			 * setup the entry links accordingly. Compound mappings
			 * are later coalesced.
P
Paul Mundt 已提交
644
			 */
645
			if (pmb_can_merge(pmbp, pmbe))
P
Paul Mundt 已提交
646 647 648 649
				pmbp->link = pmbe;

			spin_unlock(&pmbp->lock);
		}
650 651 652

		pmbp = pmbe;

P
Paul Mundt 已提交
653
		spin_unlock_irqrestore(&pmbe->lock, irqflags);
654 655
	}
}
P
Paul Mundt 已提交
656

657 658 659 660 661 662 663
static void __init pmb_merge(struct pmb_entry *head)
{
	unsigned long span, newsize;
	struct pmb_entry *tail;
	int i = 1, depth = 0;

	span = newsize = head->size;
664

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
	tail = head->link;
	while (tail) {
		span += tail->size;

		if (pmb_size_valid(span)) {
			newsize = span;
			depth = i;
		}

		/* This is the end of the line.. */
		if (!tail->link)
			break;

		tail = tail->link;
		i++;
P
Paul Mundt 已提交
680 681
	}

682 683 684
	/*
	 * The merged page size must be valid.
	 */
685
	if (!depth || !pmb_size_valid(newsize))
686 687 688 689 690 691 692 693 694
		return;

	head->flags &= ~PMB_SZ_MASK;
	head->flags |= pmb_size_to_flags(newsize);

	head->size = newsize;

	__pmb_unmap_entry(head->link, depth);
	__set_pmb_entry(head);
P
Paul Mundt 已提交
695 696
}

697
static void __init pmb_coalesce(void)
P
Paul Mundt 已提交
698
{
699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
	unsigned long flags;
	int i;

	write_lock_irqsave(&pmb_rwlock, flags);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		/*
		 * We're only interested in compound mappings
		 */
		if (!pmbe->link)
			continue;

		/*
		 * Nothing to do if it already uses the largest possible
		 * page size.
		 */
		if (pmbe->size == SZ_512M)
			continue;

		pmb_merge(pmbe);
	}

	write_unlock_irqrestore(&pmb_rwlock, flags);
}

#ifdef CONFIG_UNCACHED_MAPPING
static void __init pmb_resize(void)
{
	int i;
P
Paul Mundt 已提交
735 736

	/*
737 738
	 * If the uncached mapping was constructed by the kernel, it will
	 * already be a reasonable size.
P
Paul Mundt 已提交
739
	 */
740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776
	if (uncached_size == SZ_16M)
		return;

	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;
		unsigned long flags;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		if (pmbe->vpn != uncached_start)
			continue;

		/*
		 * Found it, now resize it.
		 */
		spin_lock_irqsave(&pmbe->lock, flags);

		pmbe->size = SZ_16M;
		pmbe->flags &= ~PMB_SZ_MASK;
		pmbe->flags |= pmb_size_to_flags(pmbe->size);

		uncached_resize(pmbe->size);

		__set_pmb_entry(pmbe);

		spin_unlock_irqrestore(&pmbe->lock, flags);
	}

	read_lock(&pmb_rwlock);
}
#endif

777 778 779 780 781 782 783 784 785 786 787 788
static int __init early_pmb(char *p)
{
	if (!p)
		return 0;

	if (strstr(p, "iomap"))
		pmb_iomapping_enabled = 1;

	return 0;
}
early_param("pmb", early_pmb);

789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
void __init pmb_init(void)
{
	/* Synchronize software state */
	pmb_synchronize();

	/* Attempt to combine compound mappings */
	pmb_coalesce();

#ifdef CONFIG_UNCACHED_MAPPING
	/* Resize initial mappings, if necessary */
	pmb_resize();
#endif

	/* Log them */
	pmb_notify();
M
Matt Fleming 已提交
804

805
	writel_uncached(0, PMB_IRMCR);
P
Paul Mundt 已提交
806 807

	/* Flush out the TLB */
808
	local_flush_tlb_all();
809
	ctrl_barrier();
810
}
811

812 813 814 815 816
bool __in_29bit_mode(void)
{
        return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
}

817 818 819 820 821 822 823 824 825 826 827 828 829
static int pmb_seq_show(struct seq_file *file, void *iter)
{
	int i;

	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
	seq_printf(file, "ety   vpn  ppn  size   flags\n");

	for (i = 0; i < NR_PMB_ENTRIES; i++) {
		unsigned long addr, data;
		unsigned int size;
		char *sz_str = NULL;

830 831
		addr = __raw_readl(mk_pmb_addr(i));
		data = __raw_readl(mk_pmb_data(i));
832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855

		size = data & PMB_SZ_MASK;
		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
			 (size == PMB_SZ_64M)  ? " 64MB":
			 (size == PMB_SZ_128M) ? "128MB":
					         "512MB";

		/* 02: V 0x88 0x08 128MB C CB  B */
		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
			   sz_str, (data & PMB_C) ? 'C' : ' ',
			   (data & PMB_WT) ? "WT" : "CB",
			   (data & PMB_UB) ? "UB" : " B");
	}

	return 0;
}

static int pmb_debugfs_open(struct inode *inode, struct file *file)
{
	return single_open(file, pmb_seq_show, NULL);
}

856
static const struct file_operations pmb_debugfs_fops = {
857 858 859 860
	.owner		= THIS_MODULE,
	.open		= pmb_debugfs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
L
Li Zefan 已提交
861
	.release	= single_release,
862 863 864 865 866 867 868
};

static int __init pmb_debugfs_init(void)
{
	struct dentry *dentry;

	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
P
Paul Mundt 已提交
869
				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
870 871
	if (!dentry)
		return -ENOMEM;
872 873 874 875 876
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);

	return 0;
}
877
subsys_initcall(pmb_debugfs_init);
F
Francesco VIRLINZI 已提交
878 879 880 881 882

#ifdef CONFIG_PM
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
	static pm_message_t prev_state;
M
Matt Fleming 已提交
883
	int i;
F
Francesco VIRLINZI 已提交
884 885 886 887 888

	/* Restore the PMB after a resume from hibernation */
	if (state.event == PM_EVENT_ON &&
	    prev_state.event == PM_EVENT_FREEZE) {
		struct pmb_entry *pmbe;
P
Paul Mundt 已提交
889 890 891

		read_lock(&pmb_rwlock);

M
Matt Fleming 已提交
892
		for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
P
Paul Mundt 已提交
893
			if (test_bit(i, pmb_map)) {
M
Matt Fleming 已提交
894 895 896 897
				pmbe = &pmb_entry_list[i];
				set_pmb_entry(pmbe);
			}
		}
P
Paul Mundt 已提交
898 899

		read_unlock(&pmb_rwlock);
F
Francesco VIRLINZI 已提交
900
	}
P
Paul Mundt 已提交
901

F
Francesco VIRLINZI 已提交
902
	prev_state = state;
P
Paul Mundt 已提交
903

F
Francesco VIRLINZI 已提交
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922
	return 0;
}

static int pmb_sysdev_resume(struct sys_device *dev)
{
	return pmb_sysdev_suspend(dev, PMSG_ON);
}

static struct sysdev_driver pmb_sysdev_driver = {
	.suspend = pmb_sysdev_suspend,
	.resume = pmb_sysdev_resume,
};

static int __init pmb_sysdev_init(void)
{
	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
}
subsys_initcall(pmb_sysdev_init);
#endif