pmb.c 11.5 KB
Newer Older
1 2 3 4 5
/*
 * arch/sh/mm/pmb.c
 *
 * Privileged Space Mapping Buffer (PMB) Support.
 *
M
Matt Fleming 已提交
6 7
 * Copyright (C) 2005 - 2010  Paul Mundt
 * Copyright (C) 2010  Matt Fleming
8 9 10 11 12 13 14
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/kernel.h>
F
Francesco VIRLINZI 已提交
15 16
#include <linux/sysdev.h>
#include <linux/cpu.h>
17 18 19 20 21 22 23 24 25
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/err.h>
#include <asm/system.h>
#include <asm/uaccess.h>
P
Paul Mundt 已提交
26
#include <asm/pgtable.h>
27 28
#include <asm/mmu.h>
#include <asm/io.h>
29
#include <asm/mmu_context.h>
30 31 32

#define NR_PMB_ENTRIES	16

M
Matt Fleming 已提交
33 34
static void __pmb_unmap(struct pmb_entry *);

M
Matt Fleming 已提交
35
static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
static unsigned long pmb_map;

static inline unsigned long mk_pmb_entry(unsigned int entry)
{
	return (entry & PMB_E_MASK) << PMB_E_SHIFT;
}

static inline unsigned long mk_pmb_addr(unsigned int entry)
{
	return mk_pmb_entry(entry) | PMB_ADDR;
}

static inline unsigned long mk_pmb_data(unsigned int entry)
{
	return mk_pmb_entry(entry) | PMB_DATA;
}

M
Matt Fleming 已提交
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
static int pmb_alloc_entry(void)
{
	unsigned int pos;

repeat:
	pos = find_first_zero_bit(&pmb_map, NR_PMB_ENTRIES);

	if (unlikely(pos > NR_PMB_ENTRIES))
		return -ENOSPC;

	if (test_and_set_bit(pos, &pmb_map))
		goto repeat;

	return pos;
}

M
Matt Fleming 已提交
69
static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
70
				   unsigned long flags, int entry)
71 72
{
	struct pmb_entry *pmbe;
M
Matt Fleming 已提交
73 74
	int pos;

75 76 77 78 79
	if (entry == PMB_NO_ENTRY) {
		pos = pmb_alloc_entry();
		if (pos < 0)
			return ERR_PTR(pos);
	} else {
80
		if (test_and_set_bit(entry, &pmb_map))
81 82 83
			return ERR_PTR(-ENOSPC);
		pos = entry;
	}
84

M
Matt Fleming 已提交
85
	pmbe = &pmb_entry_list[pos];
86 87 88 89 90 91
	if (!pmbe)
		return ERR_PTR(-ENOMEM);

	pmbe->vpn	= vpn;
	pmbe->ppn	= ppn;
	pmbe->flags	= flags;
M
Matt Fleming 已提交
92
	pmbe->entry	= pos;
93 94 95 96

	return pmbe;
}

M
Matt Fleming 已提交
97
static void pmb_free(struct pmb_entry *pmbe)
98
{
M
Matt Fleming 已提交
99
	int pos = pmbe->entry;
100

M
Matt Fleming 已提交
101 102 103 104 105 106
	pmbe->vpn	= 0;
	pmbe->ppn	= 0;
	pmbe->flags	= 0;
	pmbe->entry	= 0;

	clear_bit(pos, &pmb_map);
107 108 109 110 111
}

/*
 * Must be in P2 for __set_pmb_entry()
 */
M
Matt Fleming 已提交
112 113
static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
			    unsigned long flags, int pos)
114
{
115
	__raw_writel(vpn | PMB_V, mk_pmb_addr(pos));
116

117
#ifdef CONFIG_CACHE_WRITETHROUGH
118 119 120 121 122 123 124 125 126
	/*
	 * When we are in 32-bit address extended mode, CCR.CB becomes
	 * invalid, so care must be taken to manually adjust cacheable
	 * translations.
	 */
	if (likely(flags & PMB_C))
		flags |= PMB_WT;
#endif

127
	__raw_writel(ppn | flags | PMB_V, mk_pmb_data(pos));
128 129
}

130
static void set_pmb_entry(struct pmb_entry *pmbe)
131
{
132
	jump_to_uncached();
M
Matt Fleming 已提交
133
	__set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
134
	back_to_cached();
135 136
}

137
static void clear_pmb_entry(struct pmb_entry *pmbe)
138 139 140 141
{
	unsigned int entry = pmbe->entry;
	unsigned long addr;

142
	if (unlikely(entry >= NR_PMB_ENTRIES))
143 144
		return;

145
	jump_to_uncached();
146 147 148

	/* Clear V-bit */
	addr = mk_pmb_addr(entry);
149
	__raw_writel(__raw_readl(addr) & ~PMB_V, addr);
150 151

	addr = mk_pmb_data(entry);
152
	__raw_writel(__raw_readl(addr) & ~PMB_V, addr);
153

154
	back_to_cached();
155 156
}

P
Paul Mundt 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170

static struct {
	unsigned long size;
	int flag;
} pmb_sizes[] = {
	{ .size	= 0x20000000, .flag = PMB_SZ_512M, },
	{ .size = 0x08000000, .flag = PMB_SZ_128M, },
	{ .size = 0x04000000, .flag = PMB_SZ_64M,  },
	{ .size = 0x01000000, .flag = PMB_SZ_16M,  },
};

long pmb_remap(unsigned long vaddr, unsigned long phys,
	       unsigned long size, unsigned long flags)
{
M
Matt Fleming 已提交
171
	struct pmb_entry *pmbp, *pmbe;
P
Paul Mundt 已提交
172 173
	unsigned long wanted;
	int pmb_flags, i;
M
Matt Fleming 已提交
174
	long err;
P
Paul Mundt 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192

	/* Convert typical pgprot value to the PMB equivalent */
	if (flags & _PAGE_CACHABLE) {
		if (flags & _PAGE_WT)
			pmb_flags = PMB_WT;
		else
			pmb_flags = PMB_C;
	} else
		pmb_flags = PMB_WT | PMB_UB;

	pmbp = NULL;
	wanted = size;

again:
	for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
		if (size < pmb_sizes[i].size)
			continue;

193 194
		pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag,
				 PMB_NO_ENTRY);
M
Matt Fleming 已提交
195 196 197 198
		if (IS_ERR(pmbe)) {
			err = PTR_ERR(pmbe);
			goto out;
		}
P
Paul Mundt 已提交
199

M
Matt Fleming 已提交
200
		set_pmb_entry(pmbe);
P
Paul Mundt 已提交
201 202 203 204 205 206 207 208 209 210 211 212 213

		phys	+= pmb_sizes[i].size;
		vaddr	+= pmb_sizes[i].size;
		size	-= pmb_sizes[i].size;

		/*
		 * Link adjacent entries that span multiple PMB entries
		 * for easier tear-down.
		 */
		if (likely(pmbp))
			pmbp->link = pmbe;

		pmbp = pmbe;
214 215 216 217 218 219 220

		/*
		 * Instead of trying smaller sizes on every iteration
		 * (even if we succeed in allocating space), try using
		 * pmb_sizes[i].size again.
		 */
		i--;
P
Paul Mundt 已提交
221 222 223 224 225 226
	}

	if (size >= 0x1000000)
		goto again;

	return wanted - size;
M
Matt Fleming 已提交
227 228 229 230 231 232

out:
	if (pmbp)
		__pmb_unmap(pmbp);

	return err;
P
Paul Mundt 已提交
233 234 235 236
}

void pmb_unmap(unsigned long addr)
{
M
Matt Fleming 已提交
237 238
	struct pmb_entry *pmbe = NULL;
	int i;
P
Paul Mundt 已提交
239

M
Matt Fleming 已提交
240 241 242 243 244 245 246
	for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
		if (test_bit(i, &pmb_map)) {
			pmbe = &pmb_entry_list[i];
			if (pmbe->vpn == addr)
				break;
		}
	}
P
Paul Mundt 已提交
247 248 249 250

	if (unlikely(!pmbe))
		return;

M
Matt Fleming 已提交
251 252 253 254 255
	__pmb_unmap(pmbe);
}

static void __pmb_unmap(struct pmb_entry *pmbe)
{
M
Matt Fleming 已提交
256
	BUG_ON(!test_bit(pmbe->entry, &pmb_map));
P
Paul Mundt 已提交
257 258 259 260

	do {
		struct pmb_entry *pmblink = pmbe;

M
Matt Fleming 已提交
261 262 263 264 265 266 267 268 269 270 271
		/*
		 * We may be called before this pmb_entry has been
		 * entered into the PMB table via set_pmb_entry(), but
		 * that's OK because we've allocated a unique slot for
		 * this entry in pmb_alloc() (even if we haven't filled
		 * it yet).
		 *
		 * Therefore, calling clear_pmb_entry() is safe as no
		 * other mapping can be using that slot.
		 */
		clear_pmb_entry(pmbe);
M
Matt Fleming 已提交
272

P
Paul Mundt 已提交
273 274 275 276 277 278
		pmbe = pmblink->link;

		pmb_free(pmblink);
	} while (pmbe);
}

P
Paul Mundt 已提交
279
#ifdef CONFIG_PMB_LEGACY
M
Matt Fleming 已提交
280 281 282 283 284
static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
{
	return ppn >= __MEMORY_START && ppn < __MEMORY_START + __MEMORY_SIZE;
}

P
Paul Mundt 已提交
285
static int pmb_apply_legacy_mappings(void)
286
{
P
Paul Mundt 已提交
287
	unsigned int applied = 0;
M
Matt Fleming 已提交
288
	int i;
289

M
Matt Fleming 已提交
290
	pr_info("PMB: Preserving legacy mappings:\n");
291

M
Matt Fleming 已提交
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
	/*
	 * The following entries are setup by the bootloader.
	 *
	 * Entry       VPN	   PPN	    V	SZ	C	UB
	 * --------------------------------------------------------
	 *   0      0xA0000000 0x00000000   1   64MB    0       0
	 *   1      0xA4000000 0x04000000   1   16MB    0       0
	 *   2      0xA6000000 0x08000000   1   16MB    0       0
	 *   9      0x88000000 0x48000000   1  128MB    1       1
	 *  10      0x90000000 0x50000000   1  128MB    1       1
	 *  11      0x98000000 0x58000000   1  128MB    1       1
	 *  13      0xA8000000 0x48000000   1  128MB    0       0
	 *  14      0xB0000000 0x50000000   1  128MB    0       0
	 *  15      0xB8000000 0x58000000   1  128MB    0       0
	 *
	 * The only entries the we need are the ones that map the kernel
	 * at the cached and uncached addresses.
	 */
	for (i = 0; i < PMB_ENTRY_MAX; i++) {
		unsigned long addr, data;
		unsigned long addr_val, data_val;
		unsigned long ppn, vpn;
314

M
Matt Fleming 已提交
315 316
		addr = mk_pmb_addr(i);
		data = mk_pmb_data(i);
317

M
Matt Fleming 已提交
318 319
		addr_val = __raw_readl(addr);
		data_val = __raw_readl(data);
320

M
Matt Fleming 已提交
321 322 323 324 325
		/*
		 * Skip over any bogus entries
		 */
		if (!(data_val & PMB_V) || !(addr_val & PMB_V))
			continue;
326

M
Matt Fleming 已提交
327 328
		ppn = data_val & PMB_PFN_MASK;
		vpn = addr_val & PMB_PFN_MASK;
P
Paul Mundt 已提交
329

M
Matt Fleming 已提交
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
		/*
		 * Only preserve in-range mappings.
		 */
		if (pmb_ppn_in_range(ppn)) {
			unsigned int size;
			char *sz_str = NULL;

			size = data_val & PMB_SZ_MASK;

			sz_str = (size == PMB_SZ_16M)  ? " 16MB":
				 (size == PMB_SZ_64M)  ? " 64MB":
				 (size == PMB_SZ_128M) ? "128MB":
							 "512MB";

			pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
				vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
				(data_val & PMB_C) ? "" : "un");

			applied++;
		} else {
			/*
			 * Invalidate anything out of bounds.
			 */
			__raw_writel(addr_val & ~PMB_V, addr);
			__raw_writel(data_val & ~PMB_V, data);
		}
P
Paul Mundt 已提交
356 357 358 359 360 361 362 363 364 365 366
	}

	return (applied == 0);
}
#else
static inline int pmb_apply_legacy_mappings(void)
{
	return 1;
}
#endif

367
int pmb_init(void)
P
Paul Mundt 已提交
368
{
M
Matt Fleming 已提交
369 370 371
	int i;
	unsigned long addr, data;
	unsigned long ret;
P
Paul Mundt 已提交
372 373 374 375 376 377 378 379 380 381 382 383 384 385

	jump_to_uncached();

	/*
	 * Attempt to apply the legacy boot mappings if configured. If
	 * this is successful then we simply carry on with those and
	 * don't bother establishing additional memory mappings. Dynamic
	 * device mappings through pmb_remap() can still be bolted on
	 * after this.
	 */
	ret = pmb_apply_legacy_mappings();
	if (ret == 0) {
		back_to_cached();
		return 0;
386 387
	}

P
Paul Mundt 已提交
388
	/*
M
Matt Fleming 已提交
389 390 391
	 * Sync our software copy of the PMB mappings with those in
	 * hardware. The mappings in the hardware PMB were either set up
	 * by the bootloader or very early on by the kernel.
P
Paul Mundt 已提交
392
	 */
M
Matt Fleming 已提交
393 394 395
	for (i = 0; i < PMB_ENTRY_MAX; i++) {
		struct pmb_entry *pmbe;
		unsigned long vpn, ppn, flags;
P
Paul Mundt 已提交
396

M
Matt Fleming 已提交
397
		addr = PMB_DATA + (i << PMB_E_SHIFT);
398
		data = __raw_readl(addr);
M
Matt Fleming 已提交
399 400
		if (!(data & PMB_V))
			continue;
P
Paul Mundt 已提交
401

M
Matt Fleming 已提交
402 403 404 405 406 407 408 409 410
		if (data & PMB_C) {
#if defined(CONFIG_CACHE_WRITETHROUGH)
			data |= PMB_WT;
#elif defined(CONFIG_CACHE_WRITEBACK)
			data &= ~PMB_WT;
#else
			data &= ~(PMB_C | PMB_WT);
#endif
		}
411
		__raw_writel(data, addr);
P
Paul Mundt 已提交
412

M
Matt Fleming 已提交
413 414 415 416
		ppn = data & PMB_PFN_MASK;

		flags = data & (PMB_C | PMB_WT | PMB_UB);
		flags |= data & PMB_SZ_MASK;
P
Paul Mundt 已提交
417

M
Matt Fleming 已提交
418
		addr = PMB_ADDR + (i << PMB_E_SHIFT);
419
		data = __raw_readl(addr);
M
Matt Fleming 已提交
420 421 422 423 424 425 426

		vpn = data & PMB_PFN_MASK;

		pmbe = pmb_alloc(vpn, ppn, flags, i);
		WARN_ON(IS_ERR(pmbe));
	}

427
	__raw_writel(0, PMB_IRMCR);
P
Paul Mundt 已提交
428 429

	/* Flush out the TLB */
430
	i =  __raw_readl(MMUCR);
P
Paul Mundt 已提交
431
	i |= MMUCR_TI;
432
	__raw_writel(i, MMUCR);
P
Paul Mundt 已提交
433

434 435 436 437
	back_to_cached();

	return 0;
}
438

439 440 441 442 443
bool __in_29bit_mode(void)
{
        return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
}

444 445 446 447 448 449 450 451 452 453 454 455 456
static int pmb_seq_show(struct seq_file *file, void *iter)
{
	int i;

	seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
			 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
	seq_printf(file, "ety   vpn  ppn  size   flags\n");

	for (i = 0; i < NR_PMB_ENTRIES; i++) {
		unsigned long addr, data;
		unsigned int size;
		char *sz_str = NULL;

457 458
		addr = __raw_readl(mk_pmb_addr(i));
		data = __raw_readl(mk_pmb_data(i));
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482

		size = data & PMB_SZ_MASK;
		sz_str = (size == PMB_SZ_16M)  ? " 16MB":
			 (size == PMB_SZ_64M)  ? " 64MB":
			 (size == PMB_SZ_128M) ? "128MB":
					         "512MB";

		/* 02: V 0x88 0x08 128MB C CB  B */
		seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
			   i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
			   (addr >> 24) & 0xff, (data >> 24) & 0xff,
			   sz_str, (data & PMB_C) ? 'C' : ' ',
			   (data & PMB_WT) ? "WT" : "CB",
			   (data & PMB_UB) ? "UB" : " B");
	}

	return 0;
}

static int pmb_debugfs_open(struct inode *inode, struct file *file)
{
	return single_open(file, pmb_seq_show, NULL);
}

483
static const struct file_operations pmb_debugfs_fops = {
484 485 486 487
	.owner		= THIS_MODULE,
	.open		= pmb_debugfs_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
L
Li Zefan 已提交
488
	.release	= single_release,
489 490 491 492 493 494 495
};

static int __init pmb_debugfs_init(void)
{
	struct dentry *dentry;

	dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
P
Paul Mundt 已提交
496
				     sh_debugfs_root, NULL, &pmb_debugfs_fops);
497 498
	if (!dentry)
		return -ENOMEM;
499 500 501 502 503 504
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);

	return 0;
}
postcore_initcall(pmb_debugfs_init);
F
Francesco VIRLINZI 已提交
505 506 507 508 509

#ifdef CONFIG_PM
static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
{
	static pm_message_t prev_state;
M
Matt Fleming 已提交
510
	int i;
F
Francesco VIRLINZI 已提交
511 512 513 514 515

	/* Restore the PMB after a resume from hibernation */
	if (state.event == PM_EVENT_ON &&
	    prev_state.event == PM_EVENT_FREEZE) {
		struct pmb_entry *pmbe;
M
Matt Fleming 已提交
516 517 518 519 520 521
		for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
			if (test_bit(i, &pmb_map)) {
				pmbe = &pmb_entry_list[i];
				set_pmb_entry(pmbe);
			}
		}
F
Francesco VIRLINZI 已提交
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
	}
	prev_state = state;
	return 0;
}

static int pmb_sysdev_resume(struct sys_device *dev)
{
	return pmb_sysdev_suspend(dev, PMSG_ON);
}

static struct sysdev_driver pmb_sysdev_driver = {
	.suspend = pmb_sysdev_suspend,
	.resume = pmb_sysdev_resume,
};

static int __init pmb_sysdev_init(void)
{
	return sysdev_driver_register(&cpu_sysdev_class, &pmb_sysdev_driver);
}
subsys_initcall(pmb_sysdev_init);
#endif