pmb.c 15.5 KB
Newer Older
1 2 3 4 5
/*
 * arch/sh/mm/pmb.c
 *
 * Privileged Space Mapping Buffer (PMB) Support.
 *
M
Matt Fleming 已提交
6 7
 * Copyright (C) 2005 - 2010  Paul Mundt
 * Copyright (C) 2010  Matt Fleming
8 9 10 11 12 13 14
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
F
Francesco VIRLINZI 已提交
15 16
#include <linux/sysdev.h>
#include <linux/cpu.h>
17 18 19 20 21 22 23
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/err.h>
P
Paul Mundt 已提交
24
#include <linux/io.h>
P
Paul Mundt 已提交
25
#include <linux/spinlock.h>
P
Paul Mundt 已提交
26
#include <asm/sizes.h>
27 28
#include <asm/system.h>
#include <asm/uaccess.h>
P
Paul Mundt 已提交
29
#include <asm/pgtable.h>
30
#include <asm/page.h>
31
#include <asm/mmu.h>
32
#include <asm/mmu_context.h>
33

P
Paul Mundt 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
struct pmb_entry;

struct pmb_entry {
	unsigned long vpn;
	unsigned long ppn;
	unsigned long flags;
	unsigned long size;

	spinlock_t lock;

	/*
	 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
	 * PMB_NO_ENTRY to search for a free one
	 */
	int entry;

	/* Adjacent entry link for contiguous multi-entry mappings */
	struct pmb_entry *link;
};

54
static void pmb_unmap_entry(struct pmb_entry *, int depth);
M
Matt Fleming 已提交
55

P
Paul Mundt 已提交
56
static DEFINE_RWLOCK(pmb_rwlock);
M
Matt Fleming 已提交
57
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
P
Paul Mundt 已提交
58
static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
59

P
Paul Mundt 已提交
60
static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
61 62 63 64
{
	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
}

P
Paul Mundt 已提交
65
static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
66 67 68 69
{
	return mk_pmb_entry(entry) | PMB_ADDR;
}

P
Paul Mundt 已提交
70
static __always_inline unsigned long mk_pmb_data(unsigned int entry)
71 72 73 74
{
	return mk_pmb_entry(entry) | PMB_DATA;
}

M
Matt Fleming 已提交
75 76
static int pmb_alloc_entry(void)
{
P
Paul Mundt 已提交
77
	int pos;
M
Matt Fleming 已提交
78

P
Paul Mundt 已提交
79
	pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
P
Paul Mundt 已提交
80 81 82 83
	if (pos >= 0 && pos < NR_PMB_ENTRIES)
		__set_bit(pos, pmb_map);
	else
		pos = -ENOSPC;
M
Matt Fleming 已提交
84 85 86 87

	return pos;
}

M
Matt Fleming 已提交
88
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
89
				   unsigned long flags, int entry)
90 91
{
	struct pmb_entry *pmbe;
P
Paul Mundt 已提交
92 93
	unsigned long irqflags;
	void *ret = NULL;
M
Matt Fleming 已提交
94 95
	int pos;

P
Paul Mundt 已提交
96 97
	write_lock_irqsave(&pmb_rwlock, irqflags);

98 99
	if (entry == PMB_NO_ENTRY) {
		pos = pmb_alloc_entry();
P
Paul Mundt 已提交
100 101 102 103
		if (unlikely(pos < 0)) {
			ret = ERR_PTR(pos);
			goto out;
		}
104
	} else {
P
Paul Mundt 已提交
105 106 107 108 109
		if (__test_and_set_bit(entry, pmb_map)) {
			ret = ERR_PTR(-ENOSPC);
			goto out;
		}

110 111
		pos = entry;
	}
112

P
Paul Mundt 已提交
113 114
	write_unlock_irqrestore(&pmb_rwlock, irqflags);

M
Matt Fleming 已提交
115
	pmbe = &pmb_entry_list[pos];
P
Paul Mundt 已提交
116

117 118
	memset(pmbe, 0, sizeof(struct pmb_entry));

P
Paul Mundt 已提交
119
	spin_lock_init(&pmbe->lock);
120 121 122 123

	pmbe->vpn	= vpn;
	pmbe->ppn	= ppn;
	pmbe->flags	= flags;
M
Matt Fleming 已提交
124
	pmbe->entry	= pos;
125 126

	return pmbe;
P
Paul Mundt 已提交
127 128 129 130

out:
	write_unlock_irqrestore(&pmb_rwlock, irqflags);
	return ret;
131 132
}

M
Matt Fleming 已提交
133
static void pmb_free(struct pmb_entry *pmbe)
134
{
P
Paul Mundt 已提交
135
	__clear_bit(pmbe->entry, pmb_map);
136 137 138

	pmbe->entry	= PMB_NO_ENTRY;
	pmbe->link	= NULL;
139 140
}

141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
/*
 * Ensure that the PMB entries match our cache configuration.
 *
 * When we are in 32-bit address extended mode, CCR.CB becomes
 * invalid, so care must be taken to manually adjust cacheable
 * translations.
 */
static __always_inline unsigned long pmb_cache_flags(void)
{
	unsigned long flags = 0;

#if defined(CONFIG_CACHE_WRITETHROUGH)
	flags |= PMB_C | PMB_WT | PMB_UB;
#elif defined(CONFIG_CACHE_WRITEBACK)
	flags |= PMB_C;
#endif

	return flags;
}

161
/*
P
Paul Mundt 已提交
162
 * Must be run uncached.
163
 */
P
Paul Mundt 已提交
164
static void __set_pmb_entry(struct pmb_entry *pmbe)
165
{
166 167 168
	writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
	writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
			mk_pmb_data(pmbe->entry));
169 170
}

P
Paul Mundt 已提交
171
static void __clear_pmb_entry(struct pmb_entry *pmbe)
172
{
173 174
	unsigned long addr, data;
	unsigned long addr_val, data_val;
175

176 177
	addr = mk_pmb_addr(pmbe->entry);
	data = mk_pmb_data(pmbe->entry);
178

179 180
	addr_val = __raw_readl(addr);
	data_val = __raw_readl(data);
181

182 183 184
	/* Clear V-bit */
	writel_uncached(addr_val & ~PMB_V, addr);
	writel_uncached(data_val & ~PMB_V, data);
185 186
}

P
Paul Mundt 已提交
187 188 189 190 191 192 193 194 195
static void set_pmb_entry(struct pmb_entry *pmbe)
{
	unsigned long flags;

	spin_lock_irqsave(&pmbe->lock, flags);
	__set_pmb_entry(pmbe);
	spin_unlock_irqrestore(&pmbe->lock, flags);
}

P
Paul Mundt 已提交
196 197 198 199
static struct {
	unsigned long size;
	int flag;
} pmb_sizes[] = {
P
Paul Mundt 已提交
200 201 202 203
	{ .size	= SZ_512M, .flag = PMB_SZ_512M, },
	{ .size = SZ_128M, .flag = PMB_SZ_128M, },
	{ .size = SZ_64M,  .flag = PMB_SZ_64M,  },
	{ .size = SZ_16M,  .flag = PMB_SZ_16M,  },
P
Paul Mundt 已提交
204 205 206
};

long pmb_remap(unsigned long vaddr, unsigned long phys,
207
	       unsigned long size, pgprot_t prot)
P
Paul Mundt 已提交
208
{
M
Matt Fleming 已提交
209
	struct pmb_entry *pmbp, *pmbe;
P
Paul Mundt 已提交
210 211
	unsigned long wanted;
	int pmb_flags, i;
M
Matt Fleming 已提交
212
	long err;
213 214 215
	u64 flags;

	flags = pgprot_val(prot);
P
Paul Mundt 已提交
216

217 218
	pmb_flags = PMB_WT | PMB_UB;

P
Paul Mundt 已提交
219 220
	/* Convert typical pgprot value to the PMB equivalent */
	if (flags & _PAGE_CACHABLE) {
221 222 223 224 225
		pmb_flags |= PMB_C;

		if ((flags & _PAGE_WT) == 0)
			pmb_flags &= ~(PMB_WT | PMB_UB);
	}
P
Paul Mundt 已提交
226 227 228 229 230 231

	pmbp = NULL;
	wanted = size;

again:
	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
P
Paul Mundt 已提交
232 233
		unsigned long flags;

P
Paul Mundt 已提交
234 235 236
		if (size < pmb_sizes[i].size)
			continue;

237 238
		pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
				 PMB_NO_ENTRY);
M
Matt Fleming 已提交
239 240 241 242
		if (IS_ERR(pmbe)) {
			err = PTR_ERR(pmbe);
			goto out;
		}
P
Paul Mundt 已提交
243

P
Paul Mundt 已提交
244 245 246
		spin_lock_irqsave(&pmbe->lock, flags);

		__set_pmb_entry(pmbe);
P
Paul Mundt 已提交
247 248 249 250 251

		phys	+= pmb_sizes[i].size;
		vaddr	+= pmb_sizes[i].size;
		size	-= pmb_sizes[i].size;

252 253
		pmbe->size = pmb_sizes[i].size;

P
Paul Mundt 已提交
254 255 256 257
		/*
		 * Link adjacent entries that span multiple PMB entries
		 * for easier tear-down.
		 */
P
Paul Mundt 已提交
258 259
		if (likely(pmbp)) {
			spin_lock(&pmbp->lock);
P
Paul Mundt 已提交
260
			pmbp->link = pmbe;
P
Paul Mundt 已提交
261 262
			spin_unlock(&pmbp->lock);
		}
P
Paul Mundt 已提交
263 264

		pmbp = pmbe;
265 266 267 268 269 270 271

		/*
		 * Instead of trying smaller sizes on every iteration
		 * (even if we succeed in allocating space), try using
		 * pmb_sizes[i].size again.
		 */
		i--;
P
Paul Mundt 已提交
272 273

		spin_unlock_irqrestore(&pmbe->lock, flags);
P
Paul Mundt 已提交
274 275
	}

P
Paul Mundt 已提交
276
	if (size >= SZ_16M)
P
Paul Mundt 已提交
277 278 279
		goto again;

	return wanted - size;
M
Matt Fleming 已提交
280 281

out:
282
	pmb_unmap_entry(pmbp, NR_PMB_ENTRIES);
M
Matt Fleming 已提交
283 284

	return err;
P
Paul Mundt 已提交
285 286 287 288
}

void pmb_unmap(unsigned long addr)
{
P
Paul Mundt 已提交
289
	struct pmb_entry *pmbe = NULL;
M
Matt Fleming 已提交
290
	int i;
P
Paul Mundt 已提交
291

P
Paul Mundt 已提交
292 293
	read_lock(&pmb_rwlock);

M
Matt Fleming 已提交
294
	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
P
Paul Mundt 已提交
295
		if (test_bit(i, pmb_map)) {
M
Matt Fleming 已提交
296
			pmbe = &pmb_entry_list[i];
P
Paul Mundt 已提交
297
			if (pmbe->vpn == addr)
M
Matt Fleming 已提交
298 299 300
				break;
		}
	}
P
Paul Mundt 已提交
301 302 303

	read_unlock(&pmb_rwlock);

304
	pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
P
Paul Mundt 已提交
305
}
P
Paul Mundt 已提交
306

307
static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
P
Paul Mundt 已提交
308
{
309 310 311 312
	return (b->vpn == (a->vpn + a->size)) &&
	       (b->ppn == (a->ppn + a->size)) &&
	       (b->flags == a->flags);
}
P
Paul Mundt 已提交
313

314 315 316
static bool pmb_size_valid(unsigned long size)
{
	int i;
P
Paul Mundt 已提交
317

318 319 320 321 322 323 324 325 326 327 328 329 330 331
	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (pmb_sizes[i].size == size)
			return true;

	return false;
}

static int pmb_size_to_flags(unsigned long size)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
		if (pmb_sizes[i].size == size)
			return pmb_sizes[i].flag;
P
Paul Mundt 已提交
332

333 334 335 336 337
	return 0;
}

static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
P
Paul Mundt 已提交
338 339 340
	do {
		struct pmb_entry *pmblink = pmbe;

M
Matt Fleming 已提交
341 342 343 344 345 346 347
		/*
		 * We may be called before this pmb_entry has been
		 * entered into the PMB table via set_pmb_entry(), but
		 * that's OK because we've allocated a unique slot for
		 * this entry in pmb_alloc() (even if we haven't filled
		 * it yet).
		 *
P
Paul Mundt 已提交
348
		 * Therefore, calling __clear_pmb_entry() is safe as no
M
Matt Fleming 已提交
349 350
		 * other mapping can be using that slot.
		 */
P
Paul Mundt 已提交
351
		__clear_pmb_entry(pmbe);
M
Matt Fleming 已提交
352

P
Paul Mundt 已提交
353 354 355
		pmbe = pmblink->link;

		pmb_free(pmblink);
356 357 358 359 360 361
	} while (pmbe && --depth);
}

static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{
	unsigned long flags;
P
Paul Mundt 已提交
362

363 364 365 366 367
	if (unlikely(!pmbe))
		return;

	write_lock_irqsave(&pmb_rwlock, flags);
	__pmb_unmap_entry(pmbe, depth);
P
Paul Mundt 已提交
368
	write_unlock_irqrestore(&pmb_rwlock, flags);
P
Paul Mundt 已提交
369 370
}

371
static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
M
Matt Fleming 已提交
372
{
373
	return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
M
Matt Fleming 已提交
374 375
}

376
static void __init pmb_notify(void)
377
{
378
	int i;
379

380
	pr_info("PMB: boot mappings:\n");
381

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
			pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
			pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
	}

	read_unlock(&pmb_rwlock);
}

/*
 * Sync our software copy of the PMB mappings with those in hardware. The
 * mappings in the hardware PMB were either set up by the bootloader or
 * very early on by the kernel.
 */
static void __init pmb_synchronize(void)
{
	struct pmb_entry *pmbp = NULL;
	int i, j;

M
Matt Fleming 已提交
410
	/*
411 412 413 414
	 * Run through the initial boot mappings, log the established
	 * ones, and blow away anything that falls outside of the valid
	 * PPN range. Specifically, we only care about existing mappings
	 * that impact the cached/uncached sections.
M
Matt Fleming 已提交
415
	 *
416 417 418 419 420
	 * Note that touching these can be a bit of a minefield; the boot
	 * loader can establish multi-page mappings with the same caching
	 * attributes, so we need to ensure that we aren't modifying a
	 * mapping that we're presently executing from, or may execute
	 * from in the case of straddling page boundaries.
M
Matt Fleming 已提交
421
	 *
422 423 424
	 * In the future we will have to tidy up after the boot loader by
	 * jumping between the cached and uncached mappings and tearing
	 * down alternating mappings while executing from the other.
M
Matt Fleming 已提交
425
	 */
P
Paul Mundt 已提交
426
	for (i = 0; i < NR_PMB_ENTRIES; i++) {
M
Matt Fleming 已提交
427 428
		unsigned long addr, data;
		unsigned long addr_val, data_val;
429
		unsigned long ppn, vpn, flags;
P
Paul Mundt 已提交
430
		unsigned long irqflags;
431
		unsigned int size;
432
		struct pmb_entry *pmbe;
433

M
Matt Fleming 已提交
434 435
		addr = mk_pmb_addr(i);
		data = mk_pmb_data(i);
436

M
Matt Fleming 已提交
437 438
		addr_val = __raw_readl(addr);
		data_val = __raw_readl(data);
439

M
Matt Fleming 已提交
440 441 442 443 444
		/*
		 * Skip over any bogus entries
		 */
		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
			continue;
445

M
Matt Fleming 已提交
446 447
		ppn = data_val & PMB_PFN_MASK;
		vpn = addr_val & PMB_PFN_MASK;
P
Paul Mundt 已提交
448

M
Matt Fleming 已提交
449 450 451
		/*
		 * Only preserve in-range mappings.
		 */
452
		if (!pmb_ppn_in_range(ppn)) {
M
Matt Fleming 已提交
453 454 455
			/*
			 * Invalidate anything out of bounds.
			 */
456 457
			writel_uncached(addr_val & ~PMB_V, addr);
			writel_uncached(data_val & ~PMB_V, data);
458
			continue;
M
Matt Fleming 已提交
459
		}
460 461 462 463 464

		/*
		 * Update the caching attributes if necessary
		 */
		if (data_val & PMB_C) {
465 466
			data_val &= ~PMB_CACHE_MASK;
			data_val |= pmb_cache_flags();
467 468

			writel_uncached(data_val, data);
469 470
		}

471 472
		size = data_val & PMB_SZ_MASK;
		flags = size | (data_val & PMB_CACHE_MASK);
473 474 475 476 477 478 479

		pmbe = pmb_alloc(vpn, ppn, flags, i);
		if (IS_ERR(pmbe)) {
			WARN_ON_ONCE(1);
			continue;
		}

P
Paul Mundt 已提交
480 481
		spin_lock_irqsave(&pmbe->lock, irqflags);

482 483 484 485
		for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
			if (pmb_sizes[j].flag == size)
				pmbe->size = pmb_sizes[j].size;

P
Paul Mundt 已提交
486 487 488 489 490 491
		if (pmbp) {
			spin_lock(&pmbp->lock);

			/*
			 * Compare the previous entry against the current one to
			 * see if the entries span a contiguous mapping. If so,
492 493
			 * setup the entry links accordingly. Compound mappings
			 * are later coalesced.
P
Paul Mundt 已提交
494
			 */
495
			if (pmb_can_merge(pmbp, pmbe))
P
Paul Mundt 已提交
496 497 498 499
				pmbp->link = pmbe;

			spin_unlock(&pmbp->lock);
		}
500 501 502

		pmbp = pmbe;

P
Paul Mundt 已提交
503
		spin_unlock_irqrestore(&pmbe->lock, irqflags);
504 505
	}
}
P
Paul Mundt 已提交
506

507 508 509 510 511 512 513
static void __init pmb_merge(struct pmb_entry *head)
{
	unsigned long span, newsize;
	struct pmb_entry *tail;
	int i = 1, depth = 0;

	span = newsize = head->size;
514

515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
	tail = head->link;
	while (tail) {
		span += tail->size;

		if (pmb_size_valid(span)) {
			newsize = span;
			depth = i;
		}

		/* This is the end of the line.. */
		if (!tail->link)
			break;

		tail = tail->link;
		i++;
P
Paul Mundt 已提交
530 531
	}

532 533 534 535 536 537 538 539 540 541 542 543 544
	/*
	 * The merged page size must be valid.
	 */
	if (!pmb_size_valid(newsize))
		return;

	head->flags &= ~PMB_SZ_MASK;
	head->flags |= pmb_size_to_flags(newsize);

	head->size = newsize;

	__pmb_unmap_entry(head->link, depth);
	__set_pmb_entry(head);
P
Paul Mundt 已提交
545 546
}

547
static void __init pmb_coalesce(void)
P
Paul Mundt 已提交
548
{
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
	unsigned long flags;
	int i;

	write_lock_irqsave(&pmb_rwlock, flags);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		/*
		 * We're only interested in compound mappings
		 */
		if (!pmbe->link)
			continue;

		/*
		 * Nothing to do if it already uses the largest possible
		 * page size.
		 */
		if (pmbe->size == SZ_512M)
			continue;

		pmb_merge(pmbe);
	}

	write_unlock_irqrestore(&pmb_rwlock, flags);
}

#ifdef CONFIG_UNCACHED_MAPPING
static void __init pmb_resize(void)
{
	int i;
P
Paul Mundt 已提交
585 586

	/*
587 588
	 * If the uncached mapping was constructed by the kernel, it will
	 * already be a reasonable size.
P
Paul Mundt 已提交
589
	 */
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
	if (uncached_size == SZ_16M)
		return;

	read_lock(&pmb_rwlock);

	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		struct pmb_entry *pmbe;
		unsigned long flags;

		if (!test_bit(i, pmb_map))
			continue;

		pmbe = &pmb_entry_list[i];

		if (pmbe->vpn != uncached_start)
			continue;

		/*
		 * Found it, now resize it.
		 */
		spin_lock_irqsave(&pmbe->lock, flags);

		pmbe->size = SZ_16M;
		pmbe->flags &= ~PMB_SZ_MASK;
		pmbe->flags |= pmb_size_to_flags(pmbe->size);

		uncached_resize(pmbe->size);

		__set_pmb_entry(pmbe);

		spin_unlock_irqrestore(&pmbe->lock, flags);
	}

	read_lock(&pmb_rwlock);
}
#endif

void __init pmb_init(void)
{
	/* Synchronize software state */
	pmb_synchronize();

	/* Attempt to combine compound mappings */
	pmb_coalesce();

#ifdef CONFIG_UNCACHED_MAPPING
	/* Resize initial mappings, if necessary */
	pmb_resize();
#endif

	/* Log them */
	pmb_notify();
M
Matt Fleming 已提交
642

643
	writel_uncached(0, PMB_IRMCR);
P
Paul Mundt 已提交
644 645

	/* Flush out the TLB */
646
	__raw_writel(__raw_readl(MMUCR) | MMUCR_TI, MMUCR);
647
	ctrl_barrier();
648
}
649

650 651 652 653 654
bool __in_29bit_mode(void)
{
        return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
}

655 656 657 658 659 660 661 662 663 664 665 666 667
static int pmb_seq_show(struct seq_file *file, void *iter)
{
	int i;

	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
	seq_printf(file, "ety   vpn  ppn  size   flags\n");

	for (i = 0; i < NR_PMB_ENTRIES; i++) {
		unsigned long addr, data;
		unsigned int size;
		char *sz_str = NULL;

668 669
		addr = __raw_readl(mk_pmb_addr(i));
		data = __raw_readl(mk_pmb_data(i));
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693

		size = data & PMB_SZ_MASK;
		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
			 (size == PMB_SZ_64M)  ? " 64MB":
			 (size == PMB_SZ_128M) ? "128MB":
					         "512MB";

		/* 02: V 0x88 0x08 128MB C CB  B */
		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
			   sz_str, (data & PMB_C) ? 'C' : ' ',
			   (data & PMB_WT) ? "WT" : "CB",
			   (data & PMB_UB) ? "UB" : " B");
	}

	return 0;
}

static int pmb_debugfs_open(struct inode *inode, struct file *file)
{
	return single_open(file, pmb_seq_show, NULL);
}

694
static const struct file_operations pmb_debugfs_fops = {
695 696 697 698
	.owner		= THIS_MODULE,
	.open		= pmb_debugfs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
L
Li Zefan 已提交
699
	.release	= single_release,
700 701 702 703 704 705 706
};

static int __init pmb_debugfs_init(void)
{
	struct dentry *dentry;

	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
P
Paul Mundt 已提交
707
				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
708 709
	if (!dentry)
		return -ENOMEM;
710 711 712 713 714 715
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);

	return 0;
}
postcore_initcall(pmb_debugfs_init);
F
Francesco VIRLINZI 已提交
716 717 718 719 720

#ifdef CONFIG_PM
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
	static pm_message_t prev_state;
M
Matt Fleming 已提交
721
	int i;
F
Francesco VIRLINZI 已提交
722 723 724 725 726

	/* Restore the PMB after a resume from hibernation */
	if (state.event == PM_EVENT_ON &&
	    prev_state.event == PM_EVENT_FREEZE) {
		struct pmb_entry *pmbe;
P
Paul Mundt 已提交
727 728 729

		read_lock(&pmb_rwlock);

M
Matt Fleming 已提交
730
		for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
P
Paul Mundt 已提交
731
			if (test_bit(i, pmb_map)) {
M
Matt Fleming 已提交
732 733 734 735
				pmbe = &pmb_entry_list[i];
				set_pmb_entry(pmbe);
			}
		}
P
Paul Mundt 已提交
736 737

		read_unlock(&pmb_rwlock);
F
Francesco VIRLINZI 已提交
738
	}
P
Paul Mundt 已提交
739

F
Francesco VIRLINZI 已提交
740
	prev_state = state;
P
Paul Mundt 已提交
741

F
Francesco VIRLINZI 已提交
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
	return 0;
}

static int pmb_sysdev_resume(struct sys_device *dev)
{
	return pmb_sysdev_suspend(dev, PMSG_ON);
}

static struct sysdev_driver pmb_sysdev_driver = {
	.suspend = pmb_sysdev_suspend,
	.resume = pmb_sysdev_resume,
};

static int __init pmb_sysdev_init(void)
{
	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
}
subsys_initcall(pmb_sysdev_init);
#endif