cache-l2x0.c 14.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
 *
 * Copyright (C) 2007 ARM Limited
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 */
19
#include <linux/err.h>
20
#include <linux/init.h>
21
#include <linux/spinlock.h>
22
#include <linux/io.h>
23 24
#include <linux/of.h>
#include <linux/of_address.h>
25 26 27 28 29 30 31

#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>

#define CACHE_LINE_SIZE		32

static void __iomem *l2x0_base;
32
static DEFINE_RAW_SPINLOCK(l2x0_lock);
33 34
static u32 l2x0_way_mask;	/* Bitmask of active ways */
static u32 l2x0_size;
35
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
36

37 38 39
struct l2x0_regs l2x0_saved_regs;

struct l2x0_of_data {
40
	void (*setup)(const struct device_node *, u32 *, u32 *);
41 42 43 44
	void (*save)(void);
	void (*resume)(void);
};

45
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
46
{
47
	/* wait for cache operation by line or way to complete */
48
	while (readl_relaxed(reg) & mask)
49
		cpu_relax();
50 51
}

52 53 54 55 56 57 58 59 60
#ifdef CONFIG_CACHE_PL310
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
	/* cache operations by line are atomic on PL310 */
}
#else
#define cache_wait	cache_wait_way
#endif

61 62
static inline void cache_sync(void)
{
63
	void __iomem *base = l2x0_base;
64

65
	writel_relaxed(0, base + sync_reg_offset);
66
	cache_wait(base + L2X0_CACHE_SYNC, 1);
67 68
}

69 70 71 72
static inline void l2x0_clean_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;
	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
73
	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
74 75 76 77 78 79
}

static inline void l2x0_inv_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;
	cache_wait(base + L2X0_INV_LINE_PA, 1);
80
	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
81 82
}

83
#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
84 85 86 87 88
static inline void debug_writel(unsigned long val)
{
	if (outer_cache.set_debug)
		outer_cache.set_debug(val);
}
89

90
static void pl310_set_debug(unsigned long val)
91 92
{
	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
93
}
94 95 96 97 98 99
#else
/* Optimised out for non-errata case */
static inline void debug_writel(unsigned long val)
{
}

100
#define pl310_set_debug	NULL
101
#endif
102

103
#ifdef CONFIG_PL310_ERRATA_588369
104 105 106 107 108 109
static inline void l2x0_flush_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;

	/* Clean by PA followed by Invalidate by PA */
	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
110
	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
111
	cache_wait(base + L2X0_INV_LINE_PA, 1);
112
	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
113 114 115
}
#else

116 117 118 119
static inline void l2x0_flush_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;
	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
120
	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
121
}
122
#endif
123

124 125 126 127
static void l2x0_cache_sync(void)
{
	unsigned long flags;

128
	raw_spin_lock_irqsave(&l2x0_lock, flags);
129
	cache_sync();
130
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
131 132
}

133
static void __l2x0_flush_all(void)
134
{
135
	debug_writel(0x03);
136 137 138
	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
	cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
	cache_sync();
139
	debug_writel(0x00);
140 141 142 143 144 145 146
}

static void l2x0_flush_all(void)
{
	unsigned long flags;

	/* clean all ways */
147
	raw_spin_lock_irqsave(&l2x0_lock, flags);
148
	__l2x0_flush_all();
149
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
150 151
}

152 153 154 155 156
static void l2x0_clean_all(void)
{
	unsigned long flags;

	/* clean all ways */
157
	raw_spin_lock_irqsave(&l2x0_lock, flags);
158 159 160
	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
	cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
	cache_sync();
161
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
162 163
}

164
static void l2x0_inv_all(void)
165
{
166 167
	unsigned long flags;

168
	/* invalidate all ways */
169
	raw_spin_lock_irqsave(&l2x0_lock, flags);
170 171
	/* Invalidating when L2 is enabled is a nono */
	BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
172
	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
173
	cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
174
	cache_sync();
175
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
176 177 178 179
}

static void l2x0_inv_range(unsigned long start, unsigned long end)
{
180
	void __iomem *base = l2x0_base;
181
	unsigned long flags;
182

183
	raw_spin_lock_irqsave(&l2x0_lock, flags);
184 185
	if (start & (CACHE_LINE_SIZE - 1)) {
		start &= ~(CACHE_LINE_SIZE - 1);
186
		debug_writel(0x03);
187
		l2x0_flush_line(start);
188
		debug_writel(0x00);
189 190 191 192 193
		start += CACHE_LINE_SIZE;
	}

	if (end & (CACHE_LINE_SIZE - 1)) {
		end &= ~(CACHE_LINE_SIZE - 1);
194
		debug_writel(0x03);
195
		l2x0_flush_line(end);
196
		debug_writel(0x00);
197 198
	}

199 200 201 202
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

		while (start < blk_end) {
203
			l2x0_inv_line(start);
204 205 206 207
			start += CACHE_LINE_SIZE;
		}

		if (blk_end < end) {
208 209
			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
			raw_spin_lock_irqsave(&l2x0_lock, flags);
210 211
		}
	}
212
	cache_wait(base + L2X0_INV_LINE_PA, 1);
213
	cache_sync();
214
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
215 216 217 218
}

static void l2x0_clean_range(unsigned long start, unsigned long end)
{
219
	void __iomem *base = l2x0_base;
220
	unsigned long flags;
221

222 223 224 225 226
	if ((end - start) >= l2x0_size) {
		l2x0_clean_all();
		return;
	}

227
	raw_spin_lock_irqsave(&l2x0_lock, flags);
228
	start &= ~(CACHE_LINE_SIZE - 1);
229 230 231 232
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

		while (start < blk_end) {
233
			l2x0_clean_line(start);
234 235 236 237
			start += CACHE_LINE_SIZE;
		}

		if (blk_end < end) {
238 239
			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
			raw_spin_lock_irqsave(&l2x0_lock, flags);
240 241
		}
	}
242
	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
243
	cache_sync();
244
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
245 246 247 248
}

static void l2x0_flush_range(unsigned long start, unsigned long end)
{
249
	void __iomem *base = l2x0_base;
250
	unsigned long flags;
251

252 253 254 255 256
	if ((end - start) >= l2x0_size) {
		l2x0_flush_all();
		return;
	}

257
	raw_spin_lock_irqsave(&l2x0_lock, flags);
258
	start &= ~(CACHE_LINE_SIZE - 1);
259 260 261
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

262
		debug_writel(0x03);
263
		while (start < blk_end) {
264
			l2x0_flush_line(start);
265 266
			start += CACHE_LINE_SIZE;
		}
267
		debug_writel(0x00);
268 269

		if (blk_end < end) {
270 271
			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
			raw_spin_lock_irqsave(&l2x0_lock, flags);
272 273
		}
	}
274
	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
275
	cache_sync();
276
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
277 278
}

279 280 281 282
static void l2x0_disable(void)
{
	unsigned long flags;

283
	raw_spin_lock_irqsave(&l2x0_lock, flags);
284 285 286
	__l2x0_flush_all();
	writel_relaxed(0, l2x0_base + L2X0_CTRL);
	dsb();
287
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
288 289
}

290
static void l2x0_unlock(u32 cache_id)
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
{
	int lockregs;
	int i;

	if (cache_id == L2X0_CACHE_ID_PART_L310)
		lockregs = 8;
	else
		/* L210 and unknown types */
		lockregs = 1;

	for (i = 0; i < lockregs; i++) {
		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
			       i * L2X0_LOCKDOWN_STRIDE);
		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
			       i * L2X0_LOCKDOWN_STRIDE);
	}
}

309
void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
310
{
311 312 313
	u32 aux;
	u32 cache_id;
	u32 way_size = 0;
314 315
	int ways;
	const char *type;
316 317 318

	l2x0_base = base;

319 320
	cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
321

322 323 324
	aux &= aux_mask;
	aux |= aux_val;

325 326 327 328 329 330 331 332
	/* Determine the number of ways */
	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
	case L2X0_CACHE_ID_PART_L310:
		if (aux & (1 << 16))
			ways = 16;
		else
			ways = 8;
		type = "L310";
333 334 335 336
#ifdef CONFIG_PL310_ERRATA_753970
		/* Unmapped register. */
		sync_reg_offset = L2X0_DUMMY_REG;
#endif
337
		outer_cache.set_debug = pl310_set_debug;
338 339 340 341 342 343 344 345 346 347 348 349 350 351
		break;
	case L2X0_CACHE_ID_PART_L210:
		ways = (aux >> 13) & 0xf;
		type = "L210";
		break;
	default:
		/* Assume unknown chips have 8 ways */
		ways = 8;
		type = "L2x0 series";
		break;
	}

	l2x0_way_mask = (1 << ways) - 1;

352 353 354 355 356 357 358
	/*
	 * L2 cache Size =  Way size * Number of ways
	 */
	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
	way_size = 1 << (way_size + 3);
	l2x0_size = ways * way_size * SZ_1K;

359 360 361 362 363
	/*
	 * Check if l2x0 controller is already enabled.
	 * If you are booting from non-secure mode
	 * accessing the below registers will fault.
	 */
364
	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
365 366
		/* Make sure that I&D is not locked down when starting */
		l2x0_unlock(cache_id);
367

368
		/* l2x0 controller is disabled */
369
		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
370

371 372 373
		l2x0_inv_all();

		/* enable L2X0 */
374
		writel_relaxed(1, l2x0_base + L2X0_CTRL);
375
	}
376

377 378 379 380 381 382
	/* Re-read it in case some bits are reserved. */
	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);

	/* Save the value for resuming. */
	l2x0_saved_regs.aux_ctrl = aux;

383 384 385
	outer_cache.inv_range = l2x0_inv_range;
	outer_cache.clean_range = l2x0_clean_range;
	outer_cache.flush_range = l2x0_flush_range;
386
	outer_cache.sync = l2x0_cache_sync;
387 388 389
	outer_cache.flush_all = l2x0_flush_all;
	outer_cache.inv_all = l2x0_inv_all;
	outer_cache.disable = l2x0_disable;
390

391
	printk(KERN_INFO "%s cache controller enabled\n", type);
392 393
	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
			ways, cache_id, aux, l2x0_size);
394
}
395 396 397

#ifdef CONFIG_OF
static void __init l2x0_of_setup(const struct device_node *np,
398
				 u32 *aux_val, u32 *aux_mask)
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
{
	u32 data[2] = { 0, 0 };
	u32 tag = 0;
	u32 dirty = 0;
	u32 val = 0, mask = 0;

	of_property_read_u32(np, "arm,tag-latency", &tag);
	if (tag) {
		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
	}

	of_property_read_u32_array(np, "arm,data-latency",
				   data, ARRAY_SIZE(data));
	if (data[0] && data[1]) {
		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
	}

	of_property_read_u32(np, "arm,dirty-latency", &dirty);
	if (dirty) {
		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
	}

	*aux_val &= ~mask;
	*aux_val |= val;
	*aux_mask &= ~mask;
}

static void __init pl310_of_setup(const struct device_node *np,
432
				  u32 *aux_val, u32 *aux_mask)
433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
{
	u32 data[3] = { 0, 0, 0 };
	u32 tag[3] = { 0, 0, 0 };
	u32 filter[2] = { 0, 0 };

	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
	if (tag[0] && tag[1] && tag[2])
		writel_relaxed(
			((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
			((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
			((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
			l2x0_base + L2X0_TAG_LATENCY_CTRL);

	of_property_read_u32_array(np, "arm,data-latency",
				   data, ARRAY_SIZE(data));
	if (data[0] && data[1] && data[2])
		writel_relaxed(
			((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
			((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
			((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
			l2x0_base + L2X0_DATA_LATENCY_CTRL);

	of_property_read_u32_array(np, "arm,filter-ranges",
				   filter, ARRAY_SIZE(filter));
457
	if (filter[1]) {
458 459 460 461 462 463 464
		writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
			       l2x0_base + L2X0_ADDR_FILTER_END);
		writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
			       l2x0_base + L2X0_ADDR_FILTER_START);
	}
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
static void __init pl310_save(void)
{
	u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
		L2X0_CACHE_ID_RTL_MASK;

	l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
		L2X0_TAG_LATENCY_CTRL);
	l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
		L2X0_DATA_LATENCY_CTRL);
	l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
		L2X0_ADDR_FILTER_END);
	l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
		L2X0_ADDR_FILTER_START);

	if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
		/*
		 * From r2p0, there is Prefetch offset/control register
		 */
		l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
			L2X0_PREFETCH_CTRL);
		/*
		 * From r3p0, there is Power control register
		 */
		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
			l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
				L2X0_POWER_CTRL);
	}
}

static void l2x0_resume(void)
{
	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
		/* restore aux ctrl and enable l2 */
		l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));

		writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
			L2X0_AUX_CTRL);

		l2x0_inv_all();

		writel_relaxed(1, l2x0_base + L2X0_CTRL);
	}
}

static void pl310_resume(void)
{
	u32 l2x0_revision;

	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
		/* restore pl310 setup */
		writel_relaxed(l2x0_saved_regs.tag_latency,
			l2x0_base + L2X0_TAG_LATENCY_CTRL);
		writel_relaxed(l2x0_saved_regs.data_latency,
			l2x0_base + L2X0_DATA_LATENCY_CTRL);
		writel_relaxed(l2x0_saved_regs.filter_end,
			l2x0_base + L2X0_ADDR_FILTER_END);
		writel_relaxed(l2x0_saved_regs.filter_start,
			l2x0_base + L2X0_ADDR_FILTER_START);

		l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
			L2X0_CACHE_ID_RTL_MASK;

		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
			writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
				l2x0_base + L2X0_PREFETCH_CTRL);
			if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
				writel_relaxed(l2x0_saved_regs.pwr_ctrl,
					l2x0_base + L2X0_POWER_CTRL);
		}
	}

	l2x0_resume();
}

static const struct l2x0_of_data pl310_data = {
	pl310_of_setup,
	pl310_save,
	pl310_resume,
};

static const struct l2x0_of_data l2x0_data = {
	l2x0_of_setup,
	NULL,
	l2x0_resume,
};

551
static const struct of_device_id l2x0_ids[] __initconst = {
552 553 554
	{ .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
	{ .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
	{ .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
555 556 557
	{}
};

558
int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
559 560
{
	struct device_node *np;
561
	const struct l2x0_of_data *data;
562
	struct resource res;
563 564 565 566

	np = of_find_matching_node(NULL, l2x0_ids);
	if (!np)
		return -ENODEV;
567 568 569 570 571

	if (of_address_to_resource(np, 0, &res))
		return -ENODEV;

	l2x0_base = ioremap(res.start, resource_size(&res));
572 573 574
	if (!l2x0_base)
		return -ENOMEM;

575 576 577 578
	l2x0_saved_regs.phy_base = res.start;

	data = of_match_node(l2x0_ids, np)->data;

579 580
	/* L2 configuration can only be changed if the cache is disabled */
	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
581 582
		if (data->setup)
			data->setup(np, &aux_val, &aux_mask);
583
	}
584 585 586 587

	if (data->save)
		data->save();

588
	l2x0_init(l2x0_base, aux_val, aux_mask);
589 590

	outer_cache.resume = data->resume;
591 592 593
	return 0;
}
#endif