pmb.c 16.9 KB
Newer Older
1 2 3 4 5
/*
 * arch/sh/mm/pmb.c
 *
 * Privileged Space Mapping Buffer (PMB) Support.
 *
M
Matt Fleming 已提交
6 7
 * Copyright (C) 2005 - 2010  Paul Mundt
 * Copyright (C) 2010  Matt Fleming
8 9 10 11 12 13 14
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
F
Francesco VIRLINZI 已提交
15 16
#include <linux/sysdev.h>
#include <linux/cpu.h>
17 18 19 20 21 22 23
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/err.h>
P
Paul Mundt 已提交
24
#include <linux/io.h>
P
Paul Mundt 已提交
25
#include <linux/spinlock.h>
P
Paul Mundt 已提交
26
#include <linux/vmalloc.h>
P
Paul Mundt 已提交
27
#include <asm/sizes.h>
28 29
#include <asm/system.h>
#include <asm/uaccess.h>
P
Paul Mundt 已提交
30
#include <asm/pgtable.h>
31
#include <asm/page.h>
32
#include <asm/mmu.h>
33
#include <asm/mmu_context.h>
34

P
Paul Mundt 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
struct pmb_entry;

struct pmb_entry {
	unsigned long vpn;
	unsigned long ppn;
	unsigned long flags;
	unsigned long size;

	spinlock_t lock;

	/*
	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
	 * PMB_NO_ENTRY to search for a free one
	 */
	int entry;

	/* Adjacent entry link for contiguous multi-entry mappings */
	struct pmb_entry *link;
};

P
Paul Mundt 已提交
55 56 57 58 59 60 61 62 63 64
static struct {
	unsigned long size;
	int flag;
} pmb_sizes[] = {
	{ .size	= SZ_512M, .flag = PMB_SZ_512M, },
	{ .size = SZ_128M, .flag = PMB_SZ_128M, },
	{ .size = SZ_64M,  .flag = PMB_SZ_64M,  },
	{ .size = SZ_16M,  .flag = PMB_SZ_16M,  },
};

65
static void pmb_unmap_entry(struct pmb_entry *, int depth);
M
Matt Fleming 已提交
66

P
Paul Mundt 已提交
67
static DEFINE_RWLOCK(pmb_rwlock);
M
Matt Fleming 已提交
68
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
P
Paul Mundt 已提交
69
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
70

P
Paul Mundt 已提交
71
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
72 73 74 75
{
	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
}

P
Paul Mundt 已提交
76
static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
77 78 79 80
{
	return mk_pmb_entry(entry) | PMB_ADDR;
}

P
Paul Mundt 已提交
81
static __always_inline unsigned long mk_pmb_data(unsigned int entry)
82 83 84 85
{
	return mk_pmb_entry(entry) | PMB_DATA;
}

P
Paul Mundt 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
{
	return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
}

/*
 * Ensure that the PMB entries match our cache configuration.
 *
 * When we are in 32-bit address extended mode, CCR.CB becomes
 * invalid, so care must be taken to manually adjust cacheable
 * translations.
 */
static __always_inline unsigned long pmb_cache_flags(void)
{
	unsigned long flags = 0;

#if defined(CONFIG_CACHE_OFF)
	flags |= PMB_WT | PMB_UB;
#elif defined(CONFIG_CACHE_WRITETHROUGH)
	flags |= PMB_C | PMB_WT | PMB_UB;
#elif defined(CONFIG_CACHE_WRITEBACK)
	flags |= PMB_C;
#endif

	return flags;
}

/*
 * Convert typical pgprot value to the PMB equivalent
 */
static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
{
	unsigned long pmb_flags = 0;
	u64 flags = pgprot_val(prot);

	if (flags & _PAGE_CACHABLE)
		pmb_flags |= PMB_C;
	if (flags & _PAGE_WT)
		pmb_flags |= PMB_WT | PMB_UB;

	return pmb_flags;
}

static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
{
	return (b->vpn == (a->vpn + a->size)) &&
	       (b->ppn == (a->ppn + a->size)) &&
	       (b->flags == a->flags);
}

static bool pmb_size_valid(unsigned long size)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (pmb_sizes[i].size == size)
			return true;

	return false;
}

static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
{
	return (addr >= P1SEG && (addr + size - 1) < P3SEG);
}

static inline bool pmb_prot_valid(pgprot_t prot)
{
	return (pgprot_val(prot) & _PAGE_USER) == 0;
}

static int pmb_size_to_flags(unsigned long size)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (pmb_sizes[i].size == size)
			return pmb_sizes[i].flag;

	return 0;
}

M
Matt Fleming 已提交
168 169
static int pmb_alloc_entry(void)
{
P
Paul Mundt 已提交
170
	int pos;
M
Matt Fleming 已提交
171

P
Paul Mundt 已提交
172
	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
P
Paul Mundt 已提交
173 174 175 176
	if (pos >= 0 && pos < NR_PMB_ENTRIES)
		__set_bit(pos, pmb_map);
	else
		pos = -ENOSPC;
M
Matt Fleming 已提交
177 178 179 180

	return pos;
}

M
Matt Fleming 已提交
181
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
182
				   unsigned long flags, int entry)
183 184
{
	struct pmb_entry *pmbe;
P
Paul Mundt 已提交
185 186
	unsigned long irqflags;
	void *ret = NULL;
M
Matt Fleming 已提交
187 188
	int pos;

P
Paul Mundt 已提交
189 190
	write_lock_irqsave(&pmb_rwlock, irqflags);

191 192
	if (entry == PMB_NO_ENTRY) {
		pos = pmb_alloc_entry();
P
Paul Mundt 已提交
193 194 195 196
		if (unlikely(pos < 0)) {
			ret = ERR_PTR(pos);
			goto out;
		}
197
	} else {
P
Paul Mundt 已提交
198 199 200 201 202
		if (__test_and_set_bit(entry, pmb_map)) {
			ret = ERR_PTR(-ENOSPC);
			goto out;
		}

203 204
		pos = entry;
	}
205

P
Paul Mundt 已提交
206 207
	write_unlock_irqrestore(&pmb_rwlock, irqflags);

M
Matt Fleming 已提交
208
	pmbe = &pmb_entry_list[pos];
P
Paul Mundt 已提交
209

210 211
	memset(pmbe, 0, sizeof(struct pmb_entry));

P
Paul Mundt 已提交
212
	spin_lock_init(&pmbe->lock);
213 214 215 216

	pmbe->vpn	= vpn;
	pmbe->ppn	= ppn;
	pmbe->flags	= flags;
M
Matt Fleming 已提交
217
	pmbe->entry	= pos;
218 219

	return pmbe;
P
Paul Mundt 已提交
220 221 222 223

out:
	write_unlock_irqrestore(&pmb_rwlock, irqflags);
	return ret;
224 225
}

M
Matt Fleming 已提交
226
static void pmb_free(struct pmb_entry *pmbe)
227
{
P
Paul Mundt 已提交
228
	__clear_bit(pmbe->entry, pmb_map);
229 230 231

	pmbe->entry	= PMB_NO_ENTRY;
	pmbe->link	= NULL;
232 233 234
}

/*
P
Paul Mundt 已提交
235
 * Must be run uncached.
236
 */
P
Paul Mundt 已提交
237
static void __set_pmb_entry(struct pmb_entry *pmbe)
238
{
P
Paul Mundt 已提交
239 240 241
	/* Set V-bit */
	__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, mk_pmb_data(pmbe->entry));
	__raw_writel(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
242 243
}

P
Paul Mundt 已提交
244
static void __clear_pmb_entry(struct pmb_entry *pmbe)
245
{
246 247
	unsigned long addr, data;
	unsigned long addr_val, data_val;
248

249 250
	addr = mk_pmb_addr(pmbe->entry);
	data = mk_pmb_data(pmbe->entry);
251

252 253
	addr_val = __raw_readl(addr);
	data_val = __raw_readl(data);
254

255 256 257
	/* Clear V-bit */
	writel_uncached(addr_val & ~PMB_V, addr);
	writel_uncached(data_val & ~PMB_V, data);
258 259
}

P
Paul Mundt 已提交
260 261 262 263 264 265 266 267 268
static void set_pmb_entry(struct pmb_entry *pmbe)
{
	unsigned long flags;

	spin_lock_irqsave(&pmbe->lock, flags);
	__set_pmb_entry(pmbe);
	spin_unlock_irqrestore(&pmbe->lock, flags);
}

P
Paul Mundt 已提交
269 270 271 272 273
int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
		     unsigned long size, pgprot_t prot)
{
	return 0;
}
P
Paul Mundt 已提交
274

P
Paul Mundt 已提交
275 276
void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
			       pgprot_t prot, void *caller)
P
Paul Mundt 已提交
277
{
M
Matt Fleming 已提交
278
	struct pmb_entry *pmbp, *pmbe;
P
Paul Mundt 已提交
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
	unsigned long pmb_flags;
	int i, mapped;
	unsigned long orig_addr, vaddr;
	phys_addr_t offset, last_addr;
	phys_addr_t align_mask;
	unsigned long aligned;
	struct vm_struct *area;

	/*
	 * Small mappings need to go through the TLB.
	 */
	if (size < SZ_16M)
		return ERR_PTR(-EINVAL);
	if (!pmb_prot_valid(prot))
		return ERR_PTR(-EINVAL);
294

P
Paul Mundt 已提交
295 296 297
	pmbp = NULL;
	pmb_flags = pgprot_to_pmb_flags(prot);
	mapped = 0;
P
Paul Mundt 已提交
298

P
Paul Mundt 已提交
299 300 301
	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (size >= pmb_sizes[i].size)
			break;
302

P
Paul Mundt 已提交
303 304 305 306 307
	last_addr = phys + size;
	align_mask = ~(pmb_sizes[i].size - 1);
	offset = phys & ~align_mask;
	phys &= align_mask;
	aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
308

P
Paul Mundt 已提交
309 310 311 312
	area = __get_vm_area_caller(aligned, VM_IOREMAP, uncached_end,
				    P3SEG, caller);
	if (!area)
		return NULL;
P
Paul Mundt 已提交
313

P
Paul Mundt 已提交
314 315 316 317 318
	area->phys_addr = phys;
	orig_addr = vaddr = (unsigned long)area->addr;

	if (!pmb_addr_valid(vaddr, aligned))
		return ERR_PTR(-EFAULT);
P
Paul Mundt 已提交
319 320 321

again:
	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
P
Paul Mundt 已提交
322 323
		unsigned long flags;

P
Paul Mundt 已提交
324 325 326
		if (size < pmb_sizes[i].size)
			continue;

327 328
		pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
				 PMB_NO_ENTRY);
M
Matt Fleming 已提交
329
		if (IS_ERR(pmbe)) {
P
Paul Mundt 已提交
330 331
			pmb_unmap_entry(pmbp, mapped);
			return pmbe;
M
Matt Fleming 已提交
332
		}
P
Paul Mundt 已提交
333

P
Paul Mundt 已提交
334 335
		spin_lock_irqsave(&pmbe->lock, flags);

P
Paul Mundt 已提交
336
		pmbe->size = pmb_sizes[i].size;
P
Paul Mundt 已提交
337

P
Paul Mundt 已提交
338
		__set_pmb_entry(pmbe);
P
Paul Mundt 已提交
339

P
Paul Mundt 已提交
340 341 342
		phys	+= pmbe->size;
		vaddr	+= pmbe->size;
		size	-= pmbe->size;
343

P
Paul Mundt 已提交
344 345 346 347
		/*
		 * Link adjacent entries that span multiple PMB entries
		 * for easier tear-down.
		 */
P
Paul Mundt 已提交
348 349
		if (likely(pmbp)) {
			spin_lock(&pmbp->lock);
P
Paul Mundt 已提交
350
			pmbp->link = pmbe;
P
Paul Mundt 已提交
351 352
			spin_unlock(&pmbp->lock);
		}
P
Paul Mundt 已提交
353 354

		pmbp = pmbe;
355 356 357 358 359 360 361

		/*
		 * Instead of trying smaller sizes on every iteration
		 * (even if we succeed in allocating space), try using
		 * pmb_sizes[i].size again.
		 */
		i--;
P
Paul Mundt 已提交
362
		mapped++;
P
Paul Mundt 已提交
363 364

		spin_unlock_irqrestore(&pmbe->lock, flags);
P
Paul Mundt 已提交
365 366
	}

P
Paul Mundt 已提交
367
	if (size >= SZ_16M)
P
Paul Mundt 已提交
368 369
		goto again;

P
Paul Mundt 已提交
370
	return (void __iomem *)(offset + (char *)orig_addr);
P
Paul Mundt 已提交
371 372
}

P
Paul Mundt 已提交
373
int pmb_unmap(void __iomem *addr)
P
Paul Mundt 已提交
374
{
P
Paul Mundt 已提交
375
	struct pmb_entry *pmbe = NULL;
P
Paul Mundt 已提交
376 377
	unsigned long vaddr = (unsigned long __force)addr;
	int i, found = 0;
P
Paul Mundt 已提交
378

P
Paul Mundt 已提交
379 380
	read_lock(&pmb_rwlock);

M
Matt Fleming 已提交
381
	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
P
Paul Mundt 已提交
382
		if (test_bit(i, pmb_map)) {
M
Matt Fleming 已提交
383
			pmbe = &pmb_entry_list[i];
P
Paul Mundt 已提交
384 385
			if (pmbe->vpn == vaddr) {
				found = 1;
M
Matt Fleming 已提交
386
				break;
P
Paul Mundt 已提交
387
			}
M
Matt Fleming 已提交
388 389
		}
	}
P
Paul Mundt 已提交
390 391 392

	read_unlock(&pmb_rwlock);

P
Paul Mundt 已提交
393 394 395 396
	if (found) {
		pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
		return 0;
	}
P
Paul Mundt 已提交
397

P
Paul Mundt 已提交
398
	return -EINVAL;
399 400 401 402
}

static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
P
Paul Mundt 已提交
403 404 405
	do {
		struct pmb_entry *pmblink = pmbe;

M
Matt Fleming 已提交
406 407 408 409 410 411 412
		/*
		 * We may be called before this pmb_entry has been
		 * entered into the PMB table via set_pmb_entry(), but
		 * that's OK because we've allocated a unique slot for
		 * this entry in pmb_alloc() (even if we haven't filled
		 * it yet).
		 *
P
Paul Mundt 已提交
413
		 * Therefore, calling __clear_pmb_entry() is safe as no
M
Matt Fleming 已提交
414 415
		 * other mapping can be using that slot.
		 */
P
Paul Mundt 已提交
416
		__clear_pmb_entry(pmbe);
M
Matt Fleming 已提交
417

P
Paul Mundt 已提交
418 419 420
		pmbe = pmblink->link;

		pmb_free(pmblink);
421 422 423 424 425 426
	} while (pmbe && --depth);
}

static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
	unsigned long flags;
P
Paul Mundt 已提交
427

428 429 430 431 432
	if (unlikely(!pmbe))
		return;

	write_lock_irqsave(&pmb_rwlock, flags);
	__pmb_unmap_entry(pmbe, depth);
P
Paul Mundt 已提交
433
	write_unlock_irqrestore(&pmb_rwlock, flags);
P
Paul Mundt 已提交
434 435
}

436
static void __init pmb_notify(void)
437
{
438
	int i;
439

440
	pr_info("PMB: boot mappings:\n");
441

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
			pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
			pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
	}

	read_unlock(&pmb_rwlock);
}

/*
 * Sync our software copy of the PMB mappings with those in hardware. The
 * mappings in the hardware PMB were either set up by the bootloader or
 * very early on by the kernel.
 */
static void __init pmb_synchronize(void)
{
	struct pmb_entry *pmbp = NULL;
	int i, j;

M
Matt Fleming 已提交
470
	/*
471 472 473 474
	 * Run through the initial boot mappings, log the established
	 * ones, and blow away anything that falls outside of the valid
	 * PPN range. Specifically, we only care about existing mappings
	 * that impact the cached/uncached sections.
M
Matt Fleming 已提交
475
	 *
476 477 478 479 480
	 * Note that touching these can be a bit of a minefield; the boot
	 * loader can establish multi-page mappings with the same caching
	 * attributes, so we need to ensure that we aren't modifying a
	 * mapping that we're presently executing from, or may execute
	 * from in the case of straddling page boundaries.
M
Matt Fleming 已提交
481
	 *
482 483 484
	 * In the future we will have to tidy up after the boot loader by
	 * jumping between the cached and uncached mappings and tearing
	 * down alternating mappings while executing from the other.
M
Matt Fleming 已提交
485
	 */
P
Paul Mundt 已提交
486
	for (i = 0; i < NR_PMB_ENTRIES; i++) {
M
Matt Fleming 已提交
487 488
		unsigned long addr, data;
		unsigned long addr_val, data_val;
489
		unsigned long ppn, vpn, flags;
P
Paul Mundt 已提交
490
		unsigned long irqflags;
491
		unsigned int size;
492
		struct pmb_entry *pmbe;
493

M
Matt Fleming 已提交
494 495
		addr = mk_pmb_addr(i);
		data = mk_pmb_data(i);
496

M
Matt Fleming 已提交
497 498
		addr_val = __raw_readl(addr);
		data_val = __raw_readl(data);
499

M
Matt Fleming 已提交
500 501 502 503 504
		/*
		 * Skip over any bogus entries
		 */
		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
			continue;
505

M
Matt Fleming 已提交
506 507
		ppn = data_val & PMB_PFN_MASK;
		vpn = addr_val & PMB_PFN_MASK;
P
Paul Mundt 已提交
508

M
Matt Fleming 已提交
509 510 511
		/*
		 * Only preserve in-range mappings.
		 */
512
		if (!pmb_ppn_in_range(ppn)) {
M
Matt Fleming 已提交
513 514 515
			/*
			 * Invalidate anything out of bounds.
			 */
516 517
			writel_uncached(addr_val & ~PMB_V, addr);
			writel_uncached(data_val & ~PMB_V, data);
518
			continue;
M
Matt Fleming 已提交
519
		}
520 521 522 523 524

		/*
		 * Update the caching attributes if necessary
		 */
		if (data_val & PMB_C) {
525 526
			data_val &= ~PMB_CACHE_MASK;
			data_val |= pmb_cache_flags();
527 528

			writel_uncached(data_val, data);
529 530
		}

531 532
		size = data_val & PMB_SZ_MASK;
		flags = size | (data_val & PMB_CACHE_MASK);
533 534 535 536 537 538 539

		pmbe = pmb_alloc(vpn, ppn, flags, i);
		if (IS_ERR(pmbe)) {
			WARN_ON_ONCE(1);
			continue;
		}

P
Paul Mundt 已提交
540 541
		spin_lock_irqsave(&pmbe->lock, irqflags);

542 543 544 545
		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
			if (pmb_sizes[j].flag == size)
				pmbe->size = pmb_sizes[j].size;

P
Paul Mundt 已提交
546 547 548 549 550 551
		if (pmbp) {
			spin_lock(&pmbp->lock);

			/*
			 * Compare the previous entry against the current one to
			 * see if the entries span a contiguous mapping. If so,
552 553
			 * setup the entry links accordingly. Compound mappings
			 * are later coalesced.
P
Paul Mundt 已提交
554
			 */
555
			if (pmb_can_merge(pmbp, pmbe))
P
Paul Mundt 已提交
556 557 558 559
				pmbp->link = pmbe;

			spin_unlock(&pmbp->lock);
		}
560 561 562

		pmbp = pmbe;

P
Paul Mundt 已提交
563
		spin_unlock_irqrestore(&pmbe->lock, irqflags);
564 565
	}
}
P
Paul Mundt 已提交
566

567 568 569 570 571 572 573
static void __init pmb_merge(struct pmb_entry *head)
{
	unsigned long span, newsize;
	struct pmb_entry *tail;
	int i = 1, depth = 0;

	span = newsize = head->size;
574

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
	tail = head->link;
	while (tail) {
		span += tail->size;

		if (pmb_size_valid(span)) {
			newsize = span;
			depth = i;
		}

		/* This is the end of the line.. */
		if (!tail->link)
			break;

		tail = tail->link;
		i++;
P
Paul Mundt 已提交
590 591
	}

592 593 594 595 596 597 598 599 600 601 602 603 604
	/*
	 * The merged page size must be valid.
	 */
	if (!pmb_size_valid(newsize))
		return;

	head->flags &= ~PMB_SZ_MASK;
	head->flags |= pmb_size_to_flags(newsize);

	head->size = newsize;

	__pmb_unmap_entry(head->link, depth);
	__set_pmb_entry(head);
P
Paul Mundt 已提交
605 606
}

607
static void __init pmb_coalesce(void)
P
Paul Mundt 已提交
608
{
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
	unsigned long flags;
	int i;

	write_lock_irqsave(&pmb_rwlock, flags);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		/*
		 * We're only interested in compound mappings
		 */
		if (!pmbe->link)
			continue;

		/*
		 * Nothing to do if it already uses the largest possible
		 * page size.
		 */
		if (pmbe->size == SZ_512M)
			continue;

		pmb_merge(pmbe);
	}

	write_unlock_irqrestore(&pmb_rwlock, flags);
}

#ifdef CONFIG_UNCACHED_MAPPING
static void __init pmb_resize(void)
{
	int i;
P
Paul Mundt 已提交
645 646

	/*
647 648
	 * If the uncached mapping was constructed by the kernel, it will
	 * already be a reasonable size.
P
Paul Mundt 已提交
649
	 */
650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
	if (uncached_size == SZ_16M)
		return;

	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;
		unsigned long flags;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		if (pmbe->vpn != uncached_start)
			continue;

		/*
		 * Found it, now resize it.
		 */
		spin_lock_irqsave(&pmbe->lock, flags);

		pmbe->size = SZ_16M;
		pmbe->flags &= ~PMB_SZ_MASK;
		pmbe->flags |= pmb_size_to_flags(pmbe->size);

		uncached_resize(pmbe->size);

		__set_pmb_entry(pmbe);

		spin_unlock_irqrestore(&pmbe->lock, flags);
	}

	read_lock(&pmb_rwlock);
}
#endif

void __init pmb_init(void)
{
	/* Synchronize software state */
	pmb_synchronize();

	/* Attempt to combine compound mappings */
	pmb_coalesce();

#ifdef CONFIG_UNCACHED_MAPPING
	/* Resize initial mappings, if necessary */
	pmb_resize();
#endif

	/* Log them */
	pmb_notify();
M
Matt Fleming 已提交
702

703
	writel_uncached(0, PMB_IRMCR);
P
Paul Mundt 已提交
704 705

	/* Flush out the TLB */
706
	__raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
707
	ctrl_barrier();
708
}
709

710 711 712 713 714
bool __in_29bit_mode(void)
{
        return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
}

715 716 717 718 719 720 721 722 723 724 725 726 727
static int pmb_seq_show(struct seq_file *file, void *iter)
{
	int i;

	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
	seq_printf(file, "ety   vpn  ppn  size   flags\n");

	for (i = 0; i < NR_PMB_ENTRIES; i++) {
		unsigned long addr, data;
		unsigned int size;
		char *sz_str = NULL;

728 729
		addr = __raw_readl(mk_pmb_addr(i));
		data = __raw_readl(mk_pmb_data(i));
730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753

		size = data & PMB_SZ_MASK;
		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
			 (size == PMB_SZ_64M)  ? " 64MB":
			 (size == PMB_SZ_128M) ? "128MB":
					         "512MB";

		/* 02: V 0x88 0x08 128MB C CB  B */
		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
			   sz_str, (data & PMB_C) ? 'C' : ' ',
			   (data & PMB_WT) ? "WT" : "CB",
			   (data & PMB_UB) ? "UB" : " B");
	}

	return 0;
}

static int pmb_debugfs_open(struct inode *inode, struct file *file)
{
	return single_open(file, pmb_seq_show, NULL);
}

754
static const struct file_operations pmb_debugfs_fops = {
755 756 757 758
	.owner		= THIS_MODULE,
	.open		= pmb_debugfs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
L
Li Zefan 已提交
759
	.release	= single_release,
760 761 762 763 764 765 766
};

static int __init pmb_debugfs_init(void)
{
	struct dentry *dentry;

	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
P
Paul Mundt 已提交
767
				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
768 769
	if (!dentry)
		return -ENOMEM;
770 771 772 773 774 775
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);

	return 0;
}
postcore_initcall(pmb_debugfs_init);
F
Francesco VIRLINZI 已提交
776 777 778 779 780

#ifdef CONFIG_PM
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
	static pm_message_t prev_state;
M
Matt Fleming 已提交
781
	int i;
F
Francesco VIRLINZI 已提交
782 783 784 785 786

	/* Restore the PMB after a resume from hibernation */
	if (state.event == PM_EVENT_ON &&
	    prev_state.event == PM_EVENT_FREEZE) {
		struct pmb_entry *pmbe;
P
Paul Mundt 已提交
787 788 789

		read_lock(&pmb_rwlock);

M
Matt Fleming 已提交
790
		for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
P
Paul Mundt 已提交
791
			if (test_bit(i, pmb_map)) {
M
Matt Fleming 已提交
792 793 794 795
				pmbe = &pmb_entry_list[i];
				set_pmb_entry(pmbe);
			}
		}
P
Paul Mundt 已提交
796 797

		read_unlock(&pmb_rwlock);
F
Francesco VIRLINZI 已提交
798
	}
P
Paul Mundt 已提交
799

F
Francesco VIRLINZI 已提交
800
	prev_state = state;
P
Paul Mundt 已提交
801

F
Francesco VIRLINZI 已提交
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
	return 0;
}

static int pmb_sysdev_resume(struct sys_device *dev)
{
	return pmb_sysdev_suspend(dev, PMSG_ON);
}

static struct sysdev_driver pmb_sysdev_driver = {
	.suspend = pmb_sysdev_suspend,
	.resume = pmb_sysdev_resume,
};

static int __init pmb_sysdev_init(void)
{
	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
}
subsys_initcall(pmb_sysdev_init);
#endif