cache-l2x0.c 11.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
 *
 * Copyright (C) 2007 ARM Limited
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 */
19
#include <linux/err.h>
20
#include <linux/init.h>
21
#include <linux/spinlock.h>
22
#include <linux/io.h>
23 24
#include <linux/of.h>
#include <linux/of_address.h>
25 26 27 28 29 30 31

#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>

#define CACHE_LINE_SIZE		32

static void __iomem *l2x0_base;
32
static DEFINE_SPINLOCK(l2x0_lock);
33
static uint32_t l2x0_way_mask;	/* Bitmask of active ways */
34
static uint32_t l2x0_size;
35

36
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
37
{
38
	/* wait for cache operation by line or way to complete */
39
	while (readl_relaxed(reg) & mask)
40
		cpu_relax();
41 42
}

43 44 45 46 47 48 49 50 51
#ifdef CONFIG_CACHE_PL310
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
	/* cache operations by line are atomic on PL310 */
}
#else
#define cache_wait	cache_wait_way
#endif

52 53
static inline void cache_sync(void)
{
54
	void __iomem *base = l2x0_base;
55 56 57 58 59

#ifdef CONFIG_ARM_ERRATA_753970
	/* write to an unmmapped register */
	writel_relaxed(0, base + L2X0_DUMMY_REG);
#else
60
	writel_relaxed(0, base + L2X0_CACHE_SYNC);
61
#endif
62
	cache_wait(base + L2X0_CACHE_SYNC, 1);
63 64
}

65 66 67 68
static inline void l2x0_clean_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;
	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
69
	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
70 71 72 73 74 75
}

static inline void l2x0_inv_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;
	cache_wait(base + L2X0_INV_LINE_PA, 1);
76
	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
77 78
}

79
#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
80

81 82 83 84 85
#define debug_writel(val)	outer_cache.set_debug(val)

static void l2x0_set_debug(unsigned long val)
{
	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
86
}
87 88 89 90 91 92 93 94
#else
/* Optimised out for non-errata case */
static inline void debug_writel(unsigned long val)
{
}

#define l2x0_set_debug	NULL
#endif
95

96
#ifdef CONFIG_PL310_ERRATA_588369
97 98 99 100 101 102
static inline void l2x0_flush_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;

	/* Clean by PA followed by Invalidate by PA */
	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
103
	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
104
	cache_wait(base + L2X0_INV_LINE_PA, 1);
105
	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
106 107 108
}
#else

109 110 111 112
static inline void l2x0_flush_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;
	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
113
	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
114
}
115
#endif
116

117 118 119 120 121 122 123 124 125
static void l2x0_cache_sync(void)
{
	unsigned long flags;

	spin_lock_irqsave(&l2x0_lock, flags);
	cache_sync();
	spin_unlock_irqrestore(&l2x0_lock, flags);
}

126
static void __l2x0_flush_all(void)
127
{
128
	debug_writel(0x03);
129 130 131
	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
	cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
	cache_sync();
132
	debug_writel(0x00);
133 134 135 136 137 138 139 140 141
}

static void l2x0_flush_all(void)
{
	unsigned long flags;

	/* clean all ways */
	spin_lock_irqsave(&l2x0_lock, flags);
	__l2x0_flush_all();
142 143 144
	spin_unlock_irqrestore(&l2x0_lock, flags);
}

145 146 147 148 149 150 151 152 153 154 155 156
static void l2x0_clean_all(void)
{
	unsigned long flags;

	/* clean all ways */
	spin_lock_irqsave(&l2x0_lock, flags);
	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
	cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
	cache_sync();
	spin_unlock_irqrestore(&l2x0_lock, flags);
}

157
static void l2x0_inv_all(void)
158
{
159 160
	unsigned long flags;

161
	/* invalidate all ways */
162
	spin_lock_irqsave(&l2x0_lock, flags);
163 164
	/* Invalidating when L2 is enabled is a nono */
	BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
165
	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
166
	cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
167
	cache_sync();
168
	spin_unlock_irqrestore(&l2x0_lock, flags);
169 170 171 172
}

static void l2x0_inv_range(unsigned long start, unsigned long end)
{
173
	void __iomem *base = l2x0_base;
174
	unsigned long flags;
175

176
	spin_lock_irqsave(&l2x0_lock, flags);
177 178
	if (start & (CACHE_LINE_SIZE - 1)) {
		start &= ~(CACHE_LINE_SIZE - 1);
179
		debug_writel(0x03);
180
		l2x0_flush_line(start);
181
		debug_writel(0x00);
182 183 184 185 186
		start += CACHE_LINE_SIZE;
	}

	if (end & (CACHE_LINE_SIZE - 1)) {
		end &= ~(CACHE_LINE_SIZE - 1);
187
		debug_writel(0x03);
188
		l2x0_flush_line(end);
189
		debug_writel(0x00);
190 191
	}

192 193 194 195
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

		while (start < blk_end) {
196
			l2x0_inv_line(start);
197 198 199 200 201 202 203 204
			start += CACHE_LINE_SIZE;
		}

		if (blk_end < end) {
			spin_unlock_irqrestore(&l2x0_lock, flags);
			spin_lock_irqsave(&l2x0_lock, flags);
		}
	}
205
	cache_wait(base + L2X0_INV_LINE_PA, 1);
206
	cache_sync();
207
	spin_unlock_irqrestore(&l2x0_lock, flags);
208 209 210 211
}

static void l2x0_clean_range(unsigned long start, unsigned long end)
{
212
	void __iomem *base = l2x0_base;
213
	unsigned long flags;
214

215 216 217 218 219
	if ((end - start) >= l2x0_size) {
		l2x0_clean_all();
		return;
	}

220
	spin_lock_irqsave(&l2x0_lock, flags);
221
	start &= ~(CACHE_LINE_SIZE - 1);
222 223 224 225
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

		while (start < blk_end) {
226
			l2x0_clean_line(start);
227 228 229 230 231 232 233 234
			start += CACHE_LINE_SIZE;
		}

		if (blk_end < end) {
			spin_unlock_irqrestore(&l2x0_lock, flags);
			spin_lock_irqsave(&l2x0_lock, flags);
		}
	}
235
	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
236
	cache_sync();
237
	spin_unlock_irqrestore(&l2x0_lock, flags);
238 239 240 241
}

static void l2x0_flush_range(unsigned long start, unsigned long end)
{
242
	void __iomem *base = l2x0_base;
243
	unsigned long flags;
244

245 246 247 248 249
	if ((end - start) >= l2x0_size) {
		l2x0_flush_all();
		return;
	}

250
	spin_lock_irqsave(&l2x0_lock, flags);
251
	start &= ~(CACHE_LINE_SIZE - 1);
252 253 254
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

255
		debug_writel(0x03);
256
		while (start < blk_end) {
257
			l2x0_flush_line(start);
258 259
			start += CACHE_LINE_SIZE;
		}
260
		debug_writel(0x00);
261 262 263 264 265 266

		if (blk_end < end) {
			spin_unlock_irqrestore(&l2x0_lock, flags);
			spin_lock_irqsave(&l2x0_lock, flags);
		}
	}
267
	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
268
	cache_sync();
269
	spin_unlock_irqrestore(&l2x0_lock, flags);
270 271
}

272 273 274 275 276
static void l2x0_disable(void)
{
	unsigned long flags;

	spin_lock_irqsave(&l2x0_lock, flags);
277 278 279
	__l2x0_flush_all();
	writel_relaxed(0, l2x0_base + L2X0_CTRL);
	dsb();
280 281 282
	spin_unlock_irqrestore(&l2x0_lock, flags);
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
static void __init l2x0_unlock(__u32 cache_id)
{
	int lockregs;
	int i;

	if (cache_id == L2X0_CACHE_ID_PART_L310)
		lockregs = 8;
	else
		/* L210 and unknown types */
		lockregs = 1;

	for (i = 0; i < lockregs; i++) {
		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
			       i * L2X0_LOCKDOWN_STRIDE);
		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
			       i * L2X0_LOCKDOWN_STRIDE);
	}
}

302 303 304
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
	__u32 aux;
305
	__u32 cache_id;
306
	__u32 way_size = 0;
307 308
	int ways;
	const char *type;
309 310 311

	l2x0_base = base;

312 313
	cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
314

315 316 317
	aux &= aux_mask;
	aux |= aux_val;

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
	/* Determine the number of ways */
	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
	case L2X0_CACHE_ID_PART_L310:
		if (aux & (1 << 16))
			ways = 16;
		else
			ways = 8;
		type = "L310";
		break;
	case L2X0_CACHE_ID_PART_L210:
		ways = (aux >> 13) & 0xf;
		type = "L210";
		break;
	default:
		/* Assume unknown chips have 8 ways */
		ways = 8;
		type = "L2x0 series";
		break;
	}

	l2x0_way_mask = (1 << ways) - 1;

340 341 342 343 344 345 346
	/*
	 * L2 cache Size =  Way size * Number of ways
	 */
	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
	way_size = 1 << (way_size + 3);
	l2x0_size = ways * way_size * SZ_1K;

347 348 349 350 351
	/*
	 * Check if l2x0 controller is already enabled.
	 * If you are booting from non-secure mode
	 * accessing the below registers will fault.
	 */
352
	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
353 354
		/* Make sure that I&D is not locked down when starting */
		l2x0_unlock(cache_id);
355

356
		/* l2x0 controller is disabled */
357
		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
358

359 360 361
		l2x0_inv_all();

		/* enable L2X0 */
362
		writel_relaxed(1, l2x0_base + L2X0_CTRL);
363
	}
364 365 366 367

	outer_cache.inv_range = l2x0_inv_range;
	outer_cache.clean_range = l2x0_clean_range;
	outer_cache.flush_range = l2x0_flush_range;
368
	outer_cache.sync = l2x0_cache_sync;
369 370 371
	outer_cache.flush_all = l2x0_flush_all;
	outer_cache.inv_all = l2x0_inv_all;
	outer_cache.disable = l2x0_disable;
372
	outer_cache.set_debug = l2x0_set_debug;
373

374
	printk(KERN_INFO "%s cache controller enabled\n", type);
375 376
	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
			ways, cache_id, aux, l2x0_size);
377
}
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439

#ifdef CONFIG_OF
static void __init l2x0_of_setup(const struct device_node *np,
				 __u32 *aux_val, __u32 *aux_mask)
{
	u32 data[2] = { 0, 0 };
	u32 tag = 0;
	u32 dirty = 0;
	u32 val = 0, mask = 0;

	of_property_read_u32(np, "arm,tag-latency", &tag);
	if (tag) {
		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
	}

	of_property_read_u32_array(np, "arm,data-latency",
				   data, ARRAY_SIZE(data));
	if (data[0] && data[1]) {
		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
	}

	of_property_read_u32(np, "arm,dirty-latency", &dirty);
	if (dirty) {
		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
	}

	*aux_val &= ~mask;
	*aux_val |= val;
	*aux_mask &= ~mask;
}

static void __init pl310_of_setup(const struct device_node *np,
				  __u32 *aux_val, __u32 *aux_mask)
{
	u32 data[3] = { 0, 0, 0 };
	u32 tag[3] = { 0, 0, 0 };
	u32 filter[2] = { 0, 0 };

	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
	if (tag[0] && tag[1] && tag[2])
		writel_relaxed(
			((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
			((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
			((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
			l2x0_base + L2X0_TAG_LATENCY_CTRL);

	of_property_read_u32_array(np, "arm,data-latency",
				   data, ARRAY_SIZE(data));
	if (data[0] && data[1] && data[2])
		writel_relaxed(
			((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
			((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
			((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
			l2x0_base + L2X0_DATA_LATENCY_CTRL);

	of_property_read_u32_array(np, "arm,filter-ranges",
				   filter, ARRAY_SIZE(filter));
440
	if (filter[1]) {
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
		writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
			       l2x0_base + L2X0_ADDR_FILTER_END);
		writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
			       l2x0_base + L2X0_ADDR_FILTER_START);
	}
}

static const struct of_device_id l2x0_ids[] __initconst = {
	{ .compatible = "arm,pl310-cache", .data = pl310_of_setup },
	{ .compatible = "arm,l220-cache", .data = l2x0_of_setup },
	{ .compatible = "arm,l210-cache", .data = l2x0_of_setup },
	{}
};

int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
{
	struct device_node *np;
	void (*l2_setup)(const struct device_node *np,
		__u32 *aux_val, __u32 *aux_mask);

	np = of_find_matching_node(NULL, l2x0_ids);
	if (!np)
		return -ENODEV;
	l2x0_base = of_iomap(np, 0);
	if (!l2x0_base)
		return -ENOMEM;

	/* L2 configuration can only be changed if the cache is disabled */
	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
		l2_setup = of_match_node(l2x0_ids, np)->data;
		if (l2_setup)
			l2_setup(np, &aux_val, &aux_mask);
	}
	l2x0_init(l2x0_base, aux_val, aux_mask);
	return 0;
}
#endif