pmb.c 17.3 KB
Newer Older
1 2 3 4 5
/*
 * arch/sh/mm/pmb.c
 *
 * Privileged Space Mapping Buffer (PMB) Support.
 *
M
Matt Fleming 已提交
6 7
 * Copyright (C) 2005 - 2010  Paul Mundt
 * Copyright (C) 2010  Matt Fleming
8 9 10 11 12 13 14
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
F
Francesco VIRLINZI 已提交
15 16
#include <linux/sysdev.h>
#include <linux/cpu.h>
17 18 19 20 21 22 23
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/err.h>
P
Paul Mundt 已提交
24
#include <linux/io.h>
P
Paul Mundt 已提交
25
#include <linux/spinlock.h>
P
Paul Mundt 已提交
26
#include <linux/vmalloc.h>
P
Paul Mundt 已提交
27
#include <asm/sizes.h>
28 29
#include <asm/system.h>
#include <asm/uaccess.h>
P
Paul Mundt 已提交
30
#include <asm/pgtable.h>
31
#include <asm/page.h>
32
#include <asm/mmu.h>
33
#include <asm/mmu_context.h>
34

P
Paul Mundt 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
struct pmb_entry;

struct pmb_entry {
	unsigned long vpn;
	unsigned long ppn;
	unsigned long flags;
	unsigned long size;

	spinlock_t lock;

	/*
	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
	 * PMB_NO_ENTRY to search for a free one
	 */
	int entry;

	/* Adjacent entry link for contiguous multi-entry mappings */
	struct pmb_entry *link;
};

P
Paul Mundt 已提交
55 56 57 58 59 60 61 62 63 64
static struct {
	unsigned long size;
	int flag;
} pmb_sizes[] = {
	{ .size	= SZ_512M, .flag = PMB_SZ_512M, },
	{ .size = SZ_128M, .flag = PMB_SZ_128M, },
	{ .size = SZ_64M,  .flag = PMB_SZ_64M,  },
	{ .size = SZ_16M,  .flag = PMB_SZ_16M,  },
};

65
static void pmb_unmap_entry(struct pmb_entry *, int depth);
M
Matt Fleming 已提交
66

P
Paul Mundt 已提交
67
static DEFINE_RWLOCK(pmb_rwlock);
M
Matt Fleming 已提交
68
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
P
Paul Mundt 已提交
69
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
70

71 72
static unsigned int pmb_iomapping_enabled;

P
Paul Mundt 已提交
73
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
74 75 76 77
{
	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
}

P
Paul Mundt 已提交
78
static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
79 80 81 82
{
	return mk_pmb_entry(entry) | PMB_ADDR;
}

P
Paul Mundt 已提交
83
static __always_inline unsigned long mk_pmb_data(unsigned int entry)
84 85 86 87
{
	return mk_pmb_entry(entry) | PMB_DATA;
}

P
Paul Mundt 已提交
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
{
	return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
}

/*
 * Ensure that the PMB entries match our cache configuration.
 *
 * When we are in 32-bit address extended mode, CCR.CB becomes
 * invalid, so care must be taken to manually adjust cacheable
 * translations.
 */
static __always_inline unsigned long pmb_cache_flags(void)
{
	unsigned long flags = 0;

#if defined(CONFIG_CACHE_OFF)
	flags |= PMB_WT | PMB_UB;
#elif defined(CONFIG_CACHE_WRITETHROUGH)
	flags |= PMB_C | PMB_WT | PMB_UB;
#elif defined(CONFIG_CACHE_WRITEBACK)
	flags |= PMB_C;
#endif

	return flags;
}

/*
 * Convert typical pgprot value to the PMB equivalent
 */
static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
{
	unsigned long pmb_flags = 0;
	u64 flags = pgprot_val(prot);

	if (flags & _PAGE_CACHABLE)
		pmb_flags |= PMB_C;
	if (flags & _PAGE_WT)
		pmb_flags |= PMB_WT | PMB_UB;

	return pmb_flags;
}

static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
{
	return (b->vpn == (a->vpn + a->size)) &&
	       (b->ppn == (a->ppn + a->size)) &&
	       (b->flags == a->flags);
}

static bool pmb_size_valid(unsigned long size)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (pmb_sizes[i].size == size)
			return true;

	return false;
}

static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
{
	return (addr >= P1SEG && (addr + size - 1) < P3SEG);
}

static inline bool pmb_prot_valid(pgprot_t prot)
{
	return (pgprot_val(prot) & _PAGE_USER) == 0;
}

static int pmb_size_to_flags(unsigned long size)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (pmb_sizes[i].size == size)
			return pmb_sizes[i].flag;

	return 0;
}

M
Matt Fleming 已提交
170 171
static int pmb_alloc_entry(void)
{
P
Paul Mundt 已提交
172
	int pos;
M
Matt Fleming 已提交
173

P
Paul Mundt 已提交
174
	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
P
Paul Mundt 已提交
175 176 177 178
	if (pos >= 0 && pos < NR_PMB_ENTRIES)
		__set_bit(pos, pmb_map);
	else
		pos = -ENOSPC;
M
Matt Fleming 已提交
179 180 181 182

	return pos;
}

M
Matt Fleming 已提交
183
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
184
				   unsigned long flags, int entry)
185 186
{
	struct pmb_entry *pmbe;
P
Paul Mundt 已提交
187 188
	unsigned long irqflags;
	void *ret = NULL;
M
Matt Fleming 已提交
189 190
	int pos;

P
Paul Mundt 已提交
191 192
	write_lock_irqsave(&pmb_rwlock, irqflags);

193 194
	if (entry == PMB_NO_ENTRY) {
		pos = pmb_alloc_entry();
P
Paul Mundt 已提交
195 196 197 198
		if (unlikely(pos < 0)) {
			ret = ERR_PTR(pos);
			goto out;
		}
199
	} else {
P
Paul Mundt 已提交
200 201 202 203 204
		if (__test_and_set_bit(entry, pmb_map)) {
			ret = ERR_PTR(-ENOSPC);
			goto out;
		}

205 206
		pos = entry;
	}
207

P
Paul Mundt 已提交
208 209
	write_unlock_irqrestore(&pmb_rwlock, irqflags);

M
Matt Fleming 已提交
210
	pmbe = &pmb_entry_list[pos];
P
Paul Mundt 已提交
211

212 213
	memset(pmbe, 0, sizeof(struct pmb_entry));

P
Paul Mundt 已提交
214
	spin_lock_init(&pmbe->lock);
215 216 217 218

	pmbe->vpn	= vpn;
	pmbe->ppn	= ppn;
	pmbe->flags	= flags;
M
Matt Fleming 已提交
219
	pmbe->entry	= pos;
220 221

	return pmbe;
P
Paul Mundt 已提交
222 223 224 225

out:
	write_unlock_irqrestore(&pmb_rwlock, irqflags);
	return ret;
226 227
}

M
Matt Fleming 已提交
228
static void pmb_free(struct pmb_entry *pmbe)
229
{
P
Paul Mundt 已提交
230
	__clear_bit(pmbe->entry, pmb_map);
231 232 233

	pmbe->entry	= PMB_NO_ENTRY;
	pmbe->link	= NULL;
234 235 236
}

/*
P
Paul Mundt 已提交
237
 * Must be run uncached.
238
 */
P
Paul Mundt 已提交
239
static void __set_pmb_entry(struct pmb_entry *pmbe)
240
{
P
Paul Mundt 已提交
241 242 243
	/* Set V-bit */
	__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
	__raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
244 245
}

P
Paul Mundt 已提交
246
static void __clear_pmb_entry(struct pmb_entry *pmbe)
247
{
248 249
	unsigned long addr, data;
	unsigned long addr_val, data_val;
250

251 252
	addr = mk_pmb_addr(pmbe->entry);
	data = mk_pmb_data(pmbe->entry);
253

254 255
	addr_val = __raw_readl(addr);
	data_val = __raw_readl(data);
256

257 258 259
	/* Clear V-bit */
	writel_uncached(addr_val & ~PMB_V, addr);
	writel_uncached(data_val & ~PMB_V, data);
260 261
}

P
Paul Mundt 已提交
262 263 264 265 266 267 268 269 270
static void set_pmb_entry(struct pmb_entry *pmbe)
{
	unsigned long flags;

	spin_lock_irqsave(&pmbe->lock, flags);
	__set_pmb_entry(pmbe);
	spin_unlock_irqrestore(&pmbe->lock, flags);
}

P
Paul Mundt 已提交
271 272
int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
		     unsigned long size, pgprot_t prot)
P
Paul Mundt 已提交
273
{
M
Matt Fleming 已提交
274
	struct pmb_entry *pmbp, *pmbe;
P
Paul Mundt 已提交
275 276 277
	unsigned long pmb_flags;
	int i, mapped;

278 279
	if (!pmb_addr_valid(vaddr, size))
		return -EFAULT;
280

P
Paul Mundt 已提交
281
	pmb_flags = pgprot_to_pmb_flags(prot);
282
	pmbp = NULL;
P
Paul Mundt 已提交
283 284

again:
285
	for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
P
Paul Mundt 已提交
286 287
		unsigned long flags;

P
Paul Mundt 已提交
288 289 290
		if (size < pmb_sizes[i].size)
			continue;

291 292
		pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
				 PMB_NO_ENTRY);
M
Matt Fleming 已提交
293
		if (IS_ERR(pmbe)) {
P
Paul Mundt 已提交
294
			pmb_unmap_entry(pmbp, mapped);
295
			return PTR_ERR(pmbe);
M
Matt Fleming 已提交
296
		}
P
Paul Mundt 已提交
297

P
Paul Mundt 已提交
298 299
		spin_lock_irqsave(&pmbe->lock, flags);

P
Paul Mundt 已提交
300
		pmbe->size = pmb_sizes[i].size;
P
Paul Mundt 已提交
301

P
Paul Mundt 已提交
302
		__set_pmb_entry(pmbe);
P
Paul Mundt 已提交
303

P
Paul Mundt 已提交
304 305 306
		phys	+= pmbe->size;
		vaddr	+= pmbe->size;
		size	-= pmbe->size;
307

P
Paul Mundt 已提交
308 309 310 311
		/*
		 * Link adjacent entries that span multiple PMB entries
		 * for easier tear-down.
		 */
P
Paul Mundt 已提交
312 313
		if (likely(pmbp)) {
			spin_lock(&pmbp->lock);
P
Paul Mundt 已提交
314
			pmbp->link = pmbe;
P
Paul Mundt 已提交
315 316
			spin_unlock(&pmbp->lock);
		}
P
Paul Mundt 已提交
317 318

		pmbp = pmbe;
319 320 321 322 323 324 325

		/*
		 * Instead of trying smaller sizes on every iteration
		 * (even if we succeed in allocating space), try using
		 * pmb_sizes[i].size again.
		 */
		i--;
P
Paul Mundt 已提交
326
		mapped++;
P
Paul Mundt 已提交
327 328

		spin_unlock_irqrestore(&pmbe->lock, flags);
P
Paul Mundt 已提交
329 330
	}

P
Paul Mundt 已提交
331
	if (size >= SZ_16M)
P
Paul Mundt 已提交
332 333
		goto again;

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
	return 0;
}

void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
			       pgprot_t prot, void *caller)
{
	unsigned long orig_addr, vaddr;
	phys_addr_t offset, last_addr;
	phys_addr_t align_mask;
	unsigned long aligned;
	struct vm_struct *area;
	int i, ret;

	if (!pmb_iomapping_enabled)
		return NULL;

	/*
	 * Small mappings need to go through the TLB.
	 */
	if (size < SZ_16M)
		return ERR_PTR(-EINVAL);
	if (!pmb_prot_valid(prot))
		return ERR_PTR(-EINVAL);

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (size >= pmb_sizes[i].size)
			break;

	last_addr = phys + size;
	align_mask = ~(pmb_sizes[i].size - 1);
	offset = phys & ~align_mask;
	phys &= align_mask;
	aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;

	area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
				    P3SEG, caller);
	if (!area)
		return NULL;

	area->phys_addr = phys;
	orig_addr = vaddr = (unsigned long)area->addr;

	ret = pmb_bolt_mapping(vaddr, phys, size, prot);
	if (ret != 0)
		return ERR_PTR(ret);

P
Paul Mundt 已提交
380
	return (void __iomem *)(offset + (char *)orig_addr);
P
Paul Mundt 已提交
381 382
}

P
Paul Mundt 已提交
383
int pmb_unmap(void __iomem *addr)
P
Paul Mundt 已提交
384
{
P
Paul Mundt 已提交
385
	struct pmb_entry *pmbe = NULL;
P
Paul Mundt 已提交
386 387
	unsigned long vaddr = (unsigned long __force)addr;
	int i, found = 0;
P
Paul Mundt 已提交
388

P
Paul Mundt 已提交
389 390
	read_lock(&pmb_rwlock);

M
Matt Fleming 已提交
391
	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
P
Paul Mundt 已提交
392
		if (test_bit(i, pmb_map)) {
M
Matt Fleming 已提交
393
			pmbe = &pmb_entry_list[i];
P
Paul Mundt 已提交
394 395
			if (pmbe->vpn == vaddr) {
				found = 1;
M
Matt Fleming 已提交
396
				break;
P
Paul Mundt 已提交
397
			}
M
Matt Fleming 已提交
398 399
		}
	}
P
Paul Mundt 已提交
400 401 402

	read_unlock(&pmb_rwlock);

P
Paul Mundt 已提交
403 404 405 406
	if (found) {
		pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
		return 0;
	}
P
Paul Mundt 已提交
407

P
Paul Mundt 已提交
408
	return -EINVAL;
409 410 411 412
}

static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
P
Paul Mundt 已提交
413 414 415
	do {
		struct pmb_entry *pmblink = pmbe;

M
Matt Fleming 已提交
416 417 418 419 420 421 422
		/*
		 * We may be called before this pmb_entry has been
		 * entered into the PMB table via set_pmb_entry(), but
		 * that's OK because we've allocated a unique slot for
		 * this entry in pmb_alloc() (even if we haven't filled
		 * it yet).
		 *
P
Paul Mundt 已提交
423
		 * Therefore, calling __clear_pmb_entry() is safe as no
M
Matt Fleming 已提交
424 425
		 * other mapping can be using that slot.
		 */
P
Paul Mundt 已提交
426
		__clear_pmb_entry(pmbe);
M
Matt Fleming 已提交
427

P
Paul Mundt 已提交
428 429 430
		pmbe = pmblink->link;

		pmb_free(pmblink);
431 432 433 434 435 436
	} while (pmbe && --depth);
}

static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
	unsigned long flags;
P
Paul Mundt 已提交
437

438 439 440 441 442
	if (unlikely(!pmbe))
		return;

	write_lock_irqsave(&pmb_rwlock, flags);
	__pmb_unmap_entry(pmbe, depth);
P
Paul Mundt 已提交
443
	write_unlock_irqrestore(&pmb_rwlock, flags);
P
Paul Mundt 已提交
444 445
}

446
static void __init pmb_notify(void)
447
{
448
	int i;
449

450
	pr_info("PMB: boot mappings:\n");
451

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
			pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
			pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
	}

	read_unlock(&pmb_rwlock);
}

/*
 * Sync our software copy of the PMB mappings with those in hardware. The
 * mappings in the hardware PMB were either set up by the bootloader or
 * very early on by the kernel.
 */
static void __init pmb_synchronize(void)
{
	struct pmb_entry *pmbp = NULL;
	int i, j;

M
Matt Fleming 已提交
480
	/*
481 482 483 484
	 * Run through the initial boot mappings, log the established
	 * ones, and blow away anything that falls outside of the valid
	 * PPN range. Specifically, we only care about existing mappings
	 * that impact the cached/uncached sections.
M
Matt Fleming 已提交
485
	 *
486 487 488 489 490
	 * Note that touching these can be a bit of a minefield; the boot
	 * loader can establish multi-page mappings with the same caching
	 * attributes, so we need to ensure that we aren't modifying a
	 * mapping that we're presently executing from, or may execute
	 * from in the case of straddling page boundaries.
M
Matt Fleming 已提交
491
	 *
492 493 494
	 * In the future we will have to tidy up after the boot loader by
	 * jumping between the cached and uncached mappings and tearing
	 * down alternating mappings while executing from the other.
M
Matt Fleming 已提交
495
	 */
P
Paul Mundt 已提交
496
	for (i = 0; i < NR_PMB_ENTRIES; i++) {
M
Matt Fleming 已提交
497 498
		unsigned long addr, data;
		unsigned long addr_val, data_val;
499
		unsigned long ppn, vpn, flags;
P
Paul Mundt 已提交
500
		unsigned long irqflags;
501
		unsigned int size;
502
		struct pmb_entry *pmbe;
503

M
Matt Fleming 已提交
504 505
		addr = mk_pmb_addr(i);
		data = mk_pmb_data(i);
506

M
Matt Fleming 已提交
507 508
		addr_val = __raw_readl(addr);
		data_val = __raw_readl(data);
509

M
Matt Fleming 已提交
510 511 512 513 514
		/*
		 * Skip over any bogus entries
		 */
		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
			continue;
515

M
Matt Fleming 已提交
516 517
		ppn = data_val & PMB_PFN_MASK;
		vpn = addr_val & PMB_PFN_MASK;
P
Paul Mundt 已提交
518

M
Matt Fleming 已提交
519 520 521
		/*
		 * Only preserve in-range mappings.
		 */
522
		if (!pmb_ppn_in_range(ppn)) {
M
Matt Fleming 已提交
523 524 525
			/*
			 * Invalidate anything out of bounds.
			 */
526 527
			writel_uncached(addr_val & ~PMB_V, addr);
			writel_uncached(data_val & ~PMB_V, data);
528
			continue;
M
Matt Fleming 已提交
529
		}
530 531 532 533 534

		/*
		 * Update the caching attributes if necessary
		 */
		if (data_val & PMB_C) {
535 536
			data_val &= ~PMB_CACHE_MASK;
			data_val |= pmb_cache_flags();
537 538

			writel_uncached(data_val, data);
539 540
		}

541 542
		size = data_val & PMB_SZ_MASK;
		flags = size | (data_val & PMB_CACHE_MASK);
543 544 545 546 547 548 549

		pmbe = pmb_alloc(vpn, ppn, flags, i);
		if (IS_ERR(pmbe)) {
			WARN_ON_ONCE(1);
			continue;
		}

P
Paul Mundt 已提交
550 551
		spin_lock_irqsave(&pmbe->lock, irqflags);

552 553 554 555
		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
			if (pmb_sizes[j].flag == size)
				pmbe->size = pmb_sizes[j].size;

P
Paul Mundt 已提交
556 557 558 559 560 561
		if (pmbp) {
			spin_lock(&pmbp->lock);

			/*
			 * Compare the previous entry against the current one to
			 * see if the entries span a contiguous mapping. If so,
562 563
			 * setup the entry links accordingly. Compound mappings
			 * are later coalesced.
P
Paul Mundt 已提交
564
			 */
565
			if (pmb_can_merge(pmbp, pmbe))
P
Paul Mundt 已提交
566 567 568 569
				pmbp->link = pmbe;

			spin_unlock(&pmbp->lock);
		}
570 571 572

		pmbp = pmbe;

P
Paul Mundt 已提交
573
		spin_unlock_irqrestore(&pmbe->lock, irqflags);
574 575
	}
}
P
Paul Mundt 已提交
576

577 578 579 580 581 582 583
static void __init pmb_merge(struct pmb_entry *head)
{
	unsigned long span, newsize;
	struct pmb_entry *tail;
	int i = 1, depth = 0;

	span = newsize = head->size;
584

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	tail = head->link;
	while (tail) {
		span += tail->size;

		if (pmb_size_valid(span)) {
			newsize = span;
			depth = i;
		}

		/* This is the end of the line.. */
		if (!tail->link)
			break;

		tail = tail->link;
		i++;
P
Paul Mundt 已提交
600 601
	}

602 603 604 605 606 607 608 609 610 611 612 613 614
	/*
	 * The merged page size must be valid.
	 */
	if (!pmb_size_valid(newsize))
		return;

	head->flags &= ~PMB_SZ_MASK;
	head->flags |= pmb_size_to_flags(newsize);

	head->size = newsize;

	__pmb_unmap_entry(head->link, depth);
	__set_pmb_entry(head);
P
Paul Mundt 已提交
615 616
}

617
static void __init pmb_coalesce(void)
P
Paul Mundt 已提交
618
{
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
	unsigned long flags;
	int i;

	write_lock_irqsave(&pmb_rwlock, flags);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		/*
		 * We're only interested in compound mappings
		 */
		if (!pmbe->link)
			continue;

		/*
		 * Nothing to do if it already uses the largest possible
		 * page size.
		 */
		if (pmbe->size == SZ_512M)
			continue;

		pmb_merge(pmbe);
	}

	write_unlock_irqrestore(&pmb_rwlock, flags);
}

#ifdef CONFIG_UNCACHED_MAPPING
static void __init pmb_resize(void)
{
	int i;
P
Paul Mundt 已提交
655 656

	/*
657 658
	 * If the uncached mapping was constructed by the kernel, it will
	 * already be a reasonable size.
P
Paul Mundt 已提交
659
	 */
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
	if (uncached_size == SZ_16M)
		return;

	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;
		unsigned long flags;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		if (pmbe->vpn != uncached_start)
			continue;

		/*
		 * Found it, now resize it.
		 */
		spin_lock_irqsave(&pmbe->lock, flags);

		pmbe->size = SZ_16M;
		pmbe->flags &= ~PMB_SZ_MASK;
		pmbe->flags |= pmb_size_to_flags(pmbe->size);

		uncached_resize(pmbe->size);

		__set_pmb_entry(pmbe);

		spin_unlock_irqrestore(&pmbe->lock, flags);
	}

	read_lock(&pmb_rwlock);
}
#endif

697 698 699 700 701 702 703 704 705 706 707 708
static int __init early_pmb(char *p)
{
	if (!p)
		return 0;

	if (strstr(p, "iomap"))
		pmb_iomapping_enabled = 1;

	return 0;
}
early_param("pmb", early_pmb);

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
void __init pmb_init(void)
{
	/* Synchronize software state */
	pmb_synchronize();

	/* Attempt to combine compound mappings */
	pmb_coalesce();

#ifdef CONFIG_UNCACHED_MAPPING
	/* Resize initial mappings, if necessary */
	pmb_resize();
#endif

	/* Log them */
	pmb_notify();
M
Matt Fleming 已提交
724

725
	writel_uncached(0, PMB_IRMCR);
P
Paul Mundt 已提交
726 727

	/* Flush out the TLB */
728
	__raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
729
	ctrl_barrier();
730
}
731

732 733 734 735 736
bool __in_29bit_mode(void)
{
        return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
}

737 738 739 740 741 742 743 744 745 746 747 748 749
static int pmb_seq_show(struct seq_file *file, void *iter)
{
	int i;

	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
	seq_printf(file, "ety   vpn  ppn  size   flags\n");

	for (i = 0; i < NR_PMB_ENTRIES; i++) {
		unsigned long addr, data;
		unsigned int size;
		char *sz_str = NULL;

750 751
		addr = __raw_readl(mk_pmb_addr(i));
		data = __raw_readl(mk_pmb_data(i));
752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775

		size = data & PMB_SZ_MASK;
		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
			 (size == PMB_SZ_64M)  ? " 64MB":
			 (size == PMB_SZ_128M) ? "128MB":
					         "512MB";

		/* 02: V 0x88 0x08 128MB C CB  B */
		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
			   sz_str, (data & PMB_C) ? 'C' : ' ',
			   (data & PMB_WT) ? "WT" : "CB",
			   (data & PMB_UB) ? "UB" : " B");
	}

	return 0;
}

static int pmb_debugfs_open(struct inode *inode, struct file *file)
{
	return single_open(file, pmb_seq_show, NULL);
}

776
static const struct file_operations pmb_debugfs_fops = {
777 778 779 780
	.owner		= THIS_MODULE,
	.open		= pmb_debugfs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
L
Li Zefan 已提交
781
	.release	= single_release,
782 783 784 785 786 787 788
};

static int __init pmb_debugfs_init(void)
{
	struct dentry *dentry;

	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
P
Paul Mundt 已提交
789
				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
790 791
	if (!dentry)
		return -ENOMEM;
792 793 794 795 796 797
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);

	return 0;
}
postcore_initcall(pmb_debugfs_init);
F
Francesco VIRLINZI 已提交
798 799 800 801 802

#ifdef CONFIG_PM
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
	static pm_message_t prev_state;
M
Matt Fleming 已提交
803
	int i;
F
Francesco VIRLINZI 已提交
804 805 806 807 808

	/* Restore the PMB after a resume from hibernation */
	if (state.event == PM_EVENT_ON &&
	    prev_state.event == PM_EVENT_FREEZE) {
		struct pmb_entry *pmbe;
P
Paul Mundt 已提交
809 810 811

		read_lock(&pmb_rwlock);

M
Matt Fleming 已提交
812
		for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
P
Paul Mundt 已提交
813
			if (test_bit(i, pmb_map)) {
M
Matt Fleming 已提交
814 815 816 817
				pmbe = &pmb_entry_list[i];
				set_pmb_entry(pmbe);
			}
		}
P
Paul Mundt 已提交
818 819

		read_unlock(&pmb_rwlock);
F
Francesco VIRLINZI 已提交
820
	}
P
Paul Mundt 已提交
821

F
Francesco VIRLINZI 已提交
822
	prev_state = state;
P
Paul Mundt 已提交
823

F
Francesco VIRLINZI 已提交
824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
	return 0;
}

static int pmb_sysdev_resume(struct sys_device *dev)
{
	return pmb_sysdev_suspend(dev, PMSG_ON);
}

static struct sysdev_driver pmb_sysdev_driver = {
	.suspend = pmb_sysdev_suspend,
	.resume = pmb_sysdev_resume,
};

static int __init pmb_sysdev_init(void)
{
	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
}
subsys_initcall(pmb_sysdev_init);
#endif