cache-l2x0.c 15.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/*
 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
 *
 * Copyright (C) 2007 ARM Limited
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 */
19
#include <linux/err.h>
20
#include <linux/init.h>
21
#include <linux/spinlock.h>
22
#include <linux/io.h>
23 24
#include <linux/of.h>
#include <linux/of_address.h>
25 26 27 28 29 30 31

#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>

#define CACHE_LINE_SIZE		32

static void __iomem *l2x0_base;
32
static DEFINE_RAW_SPINLOCK(l2x0_lock);
33 34
static u32 l2x0_way_mask;	/* Bitmask of active ways */
static u32 l2x0_size;
35
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
36

37 38 39
struct l2x0_regs l2x0_saved_regs;

struct l2x0_of_data {
40
	void (*setup)(const struct device_node *, u32 *, u32 *);
41
	void (*save)(void);
42
	struct outer_cache_fns outer_cache;
43 44
};

45 46
static bool of_init = false;

47
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
48
{
49
	/* wait for cache operation by line or way to complete */
50
	while (readl_relaxed(reg) & mask)
51
		cpu_relax();
52 53
}

54 55 56 57 58 59 60 61 62
#ifdef CONFIG_CACHE_PL310
static inline void cache_wait(void __iomem *reg, unsigned long mask)
{
	/* cache operations by line are atomic on PL310 */
}
#else
#define cache_wait	cache_wait_way
#endif

63 64
static inline void cache_sync(void)
{
65
	void __iomem *base = l2x0_base;
66

67
	writel_relaxed(0, base + sync_reg_offset);
68
	cache_wait(base + L2X0_CACHE_SYNC, 1);
69 70
}

71 72 73 74
static inline void l2x0_clean_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;
	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
75
	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
76 77 78 79 80 81
}

static inline void l2x0_inv_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;
	cache_wait(base + L2X0_INV_LINE_PA, 1);
82
	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
83 84
}

85
#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
86 87 88 89 90
static inline void debug_writel(unsigned long val)
{
	if (outer_cache.set_debug)
		outer_cache.set_debug(val);
}
91

92
static void pl310_set_debug(unsigned long val)
93 94
{
	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
95
}
96 97 98 99 100 101
#else
/* Optimised out for non-errata case */
static inline void debug_writel(unsigned long val)
{
}

102
#define pl310_set_debug	NULL
103
#endif
104

105
#ifdef CONFIG_PL310_ERRATA_588369
106 107 108 109 110 111
static inline void l2x0_flush_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;

	/* Clean by PA followed by Invalidate by PA */
	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
112
	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
113
	cache_wait(base + L2X0_INV_LINE_PA, 1);
114
	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
115 116 117
}
#else

118 119 120 121
static inline void l2x0_flush_line(unsigned long addr)
{
	void __iomem *base = l2x0_base;
	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
122
	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
123
}
124
#endif
125

126 127 128 129
static void l2x0_cache_sync(void)
{
	unsigned long flags;

130
	raw_spin_lock_irqsave(&l2x0_lock, flags);
131
	cache_sync();
132
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
133 134
}

135
static void __l2x0_flush_all(void)
136
{
137
	debug_writel(0x03);
138 139 140
	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
	cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
	cache_sync();
141
	debug_writel(0x00);
142 143 144 145 146 147 148
}

static void l2x0_flush_all(void)
{
	unsigned long flags;

	/* clean all ways */
149
	raw_spin_lock_irqsave(&l2x0_lock, flags);
150
	__l2x0_flush_all();
151
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
152 153
}

154 155 156 157 158
static void l2x0_clean_all(void)
{
	unsigned long flags;

	/* clean all ways */
159
	raw_spin_lock_irqsave(&l2x0_lock, flags);
160 161 162
	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
	cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
	cache_sync();
163
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
164 165
}

166
static void l2x0_inv_all(void)
167
{
168 169
	unsigned long flags;

170
	/* invalidate all ways */
171
	raw_spin_lock_irqsave(&l2x0_lock, flags);
172 173
	/* Invalidating when L2 is enabled is a nono */
	BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
174
	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
175
	cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
176
	cache_sync();
177
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
178 179 180 181
}

static void l2x0_inv_range(unsigned long start, unsigned long end)
{
182
	void __iomem *base = l2x0_base;
183
	unsigned long flags;
184

185
	raw_spin_lock_irqsave(&l2x0_lock, flags);
186 187
	if (start & (CACHE_LINE_SIZE - 1)) {
		start &= ~(CACHE_LINE_SIZE - 1);
188
		debug_writel(0x03);
189
		l2x0_flush_line(start);
190
		debug_writel(0x00);
191 192 193 194 195
		start += CACHE_LINE_SIZE;
	}

	if (end & (CACHE_LINE_SIZE - 1)) {
		end &= ~(CACHE_LINE_SIZE - 1);
196
		debug_writel(0x03);
197
		l2x0_flush_line(end);
198
		debug_writel(0x00);
199 200
	}

201 202 203 204
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

		while (start < blk_end) {
205
			l2x0_inv_line(start);
206 207 208 209
			start += CACHE_LINE_SIZE;
		}

		if (blk_end < end) {
210 211
			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
			raw_spin_lock_irqsave(&l2x0_lock, flags);
212 213
		}
	}
214
	cache_wait(base + L2X0_INV_LINE_PA, 1);
215
	cache_sync();
216
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
217 218 219 220
}

static void l2x0_clean_range(unsigned long start, unsigned long end)
{
221
	void __iomem *base = l2x0_base;
222
	unsigned long flags;
223

224 225 226 227 228
	if ((end - start) >= l2x0_size) {
		l2x0_clean_all();
		return;
	}

229
	raw_spin_lock_irqsave(&l2x0_lock, flags);
230
	start &= ~(CACHE_LINE_SIZE - 1);
231 232 233 234
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

		while (start < blk_end) {
235
			l2x0_clean_line(start);
236 237 238 239
			start += CACHE_LINE_SIZE;
		}

		if (blk_end < end) {
240 241
			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
			raw_spin_lock_irqsave(&l2x0_lock, flags);
242 243
		}
	}
244
	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
245
	cache_sync();
246
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
247 248 249 250
}

static void l2x0_flush_range(unsigned long start, unsigned long end)
{
251
	void __iomem *base = l2x0_base;
252
	unsigned long flags;
253

254 255 256 257 258
	if ((end - start) >= l2x0_size) {
		l2x0_flush_all();
		return;
	}

259
	raw_spin_lock_irqsave(&l2x0_lock, flags);
260
	start &= ~(CACHE_LINE_SIZE - 1);
261 262 263
	while (start < end) {
		unsigned long blk_end = start + min(end - start, 4096UL);

264
		debug_writel(0x03);
265
		while (start < blk_end) {
266
			l2x0_flush_line(start);
267 268
			start += CACHE_LINE_SIZE;
		}
269
		debug_writel(0x00);
270 271

		if (blk_end < end) {
272 273
			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
			raw_spin_lock_irqsave(&l2x0_lock, flags);
274 275
		}
	}
276
	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
277
	cache_sync();
278
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
279 280
}

281 282 283 284
static void l2x0_disable(void)
{
	unsigned long flags;

285
	raw_spin_lock_irqsave(&l2x0_lock, flags);
286 287 288
	__l2x0_flush_all();
	writel_relaxed(0, l2x0_base + L2X0_CTRL);
	dsb();
289
	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
290 291
}

292
static void l2x0_unlock(u32 cache_id)
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
{
	int lockregs;
	int i;

	if (cache_id == L2X0_CACHE_ID_PART_L310)
		lockregs = 8;
	else
		/* L210 and unknown types */
		lockregs = 1;

	for (i = 0; i < lockregs; i++) {
		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
			       i * L2X0_LOCKDOWN_STRIDE);
		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
			       i * L2X0_LOCKDOWN_STRIDE);
	}
}

311
void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
312
{
313 314 315
	u32 aux;
	u32 cache_id;
	u32 way_size = 0;
316 317
	int ways;
	const char *type;
318 319 320

	l2x0_base = base;

321 322
	cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
323

324 325 326
	aux &= aux_mask;
	aux |= aux_val;

327 328 329 330 331 332 333 334
	/* Determine the number of ways */
	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
	case L2X0_CACHE_ID_PART_L310:
		if (aux & (1 << 16))
			ways = 16;
		else
			ways = 8;
		type = "L310";
335 336 337 338
#ifdef CONFIG_PL310_ERRATA_753970
		/* Unmapped register. */
		sync_reg_offset = L2X0_DUMMY_REG;
#endif
339
		outer_cache.set_debug = pl310_set_debug;
340 341 342 343 344 345 346 347 348 349 350 351 352 353
		break;
	case L2X0_CACHE_ID_PART_L210:
		ways = (aux >> 13) & 0xf;
		type = "L210";
		break;
	default:
		/* Assume unknown chips have 8 ways */
		ways = 8;
		type = "L2x0 series";
		break;
	}

	l2x0_way_mask = (1 << ways) - 1;

354 355 356 357 358 359 360
	/*
	 * L2 cache Size =  Way size * Number of ways
	 */
	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
	way_size = 1 << (way_size + 3);
	l2x0_size = ways * way_size * SZ_1K;

361 362 363 364 365
	/*
	 * Check if l2x0 controller is already enabled.
	 * If you are booting from non-secure mode
	 * accessing the below registers will fault.
	 */
366
	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
367 368
		/* Make sure that I&D is not locked down when starting */
		l2x0_unlock(cache_id);
369

370
		/* l2x0 controller is disabled */
371
		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
372

373 374 375
		l2x0_inv_all();

		/* enable L2X0 */
376
		writel_relaxed(1, l2x0_base + L2X0_CTRL);
377
	}
378

379 380 381 382 383 384
	/* Re-read it in case some bits are reserved. */
	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);

	/* Save the value for resuming. */
	l2x0_saved_regs.aux_ctrl = aux;

385 386 387 388 389 390 391 392 393
	if (!of_init) {
		outer_cache.inv_range = l2x0_inv_range;
		outer_cache.clean_range = l2x0_clean_range;
		outer_cache.flush_range = l2x0_flush_range;
		outer_cache.sync = l2x0_cache_sync;
		outer_cache.flush_all = l2x0_flush_all;
		outer_cache.inv_all = l2x0_inv_all;
		outer_cache.disable = l2x0_disable;
	}
394

395
	printk(KERN_INFO "%s cache controller enabled\n", type);
396 397
	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
			ways, cache_id, aux, l2x0_size);
398
}
399 400 401

#ifdef CONFIG_OF
static void __init l2x0_of_setup(const struct device_node *np,
402
				 u32 *aux_val, u32 *aux_mask)
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
{
	u32 data[2] = { 0, 0 };
	u32 tag = 0;
	u32 dirty = 0;
	u32 val = 0, mask = 0;

	of_property_read_u32(np, "arm,tag-latency", &tag);
	if (tag) {
		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
	}

	of_property_read_u32_array(np, "arm,data-latency",
				   data, ARRAY_SIZE(data));
	if (data[0] && data[1]) {
		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
	}

	of_property_read_u32(np, "arm,dirty-latency", &dirty);
	if (dirty) {
		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
	}

	*aux_val &= ~mask;
	*aux_val |= val;
	*aux_mask &= ~mask;
}

static void __init pl310_of_setup(const struct device_node *np,
436
				  u32 *aux_val, u32 *aux_mask)
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
{
	u32 data[3] = { 0, 0, 0 };
	u32 tag[3] = { 0, 0, 0 };
	u32 filter[2] = { 0, 0 };

	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
	if (tag[0] && tag[1] && tag[2])
		writel_relaxed(
			((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
			((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
			((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
			l2x0_base + L2X0_TAG_LATENCY_CTRL);

	of_property_read_u32_array(np, "arm,data-latency",
				   data, ARRAY_SIZE(data));
	if (data[0] && data[1] && data[2])
		writel_relaxed(
			((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
			((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
			((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
			l2x0_base + L2X0_DATA_LATENCY_CTRL);

	of_property_read_u32_array(np, "arm,filter-ranges",
				   filter, ARRAY_SIZE(filter));
461
	if (filter[1]) {
462 463 464 465 466 467 468
		writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
			       l2x0_base + L2X0_ADDR_FILTER_END);
		writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
			       l2x0_base + L2X0_ADDR_FILTER_START);
	}
}

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
static void __init pl310_save(void)
{
	u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
		L2X0_CACHE_ID_RTL_MASK;

	l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
		L2X0_TAG_LATENCY_CTRL);
	l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
		L2X0_DATA_LATENCY_CTRL);
	l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
		L2X0_ADDR_FILTER_END);
	l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
		L2X0_ADDR_FILTER_START);

	if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
		/*
		 * From r2p0, there is Prefetch offset/control register
		 */
		l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
			L2X0_PREFETCH_CTRL);
		/*
		 * From r3p0, there is Power control register
		 */
		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
			l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
				L2X0_POWER_CTRL);
	}
}

static void l2x0_resume(void)
{
	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
		/* restore aux ctrl and enable l2 */
		l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));

		writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
			L2X0_AUX_CTRL);

		l2x0_inv_all();

		writel_relaxed(1, l2x0_base + L2X0_CTRL);
	}
}

static void pl310_resume(void)
{
	u32 l2x0_revision;

	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
		/* restore pl310 setup */
		writel_relaxed(l2x0_saved_regs.tag_latency,
			l2x0_base + L2X0_TAG_LATENCY_CTRL);
		writel_relaxed(l2x0_saved_regs.data_latency,
			l2x0_base + L2X0_DATA_LATENCY_CTRL);
		writel_relaxed(l2x0_saved_regs.filter_end,
			l2x0_base + L2X0_ADDR_FILTER_END);
		writel_relaxed(l2x0_saved_regs.filter_start,
			l2x0_base + L2X0_ADDR_FILTER_START);

		l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
			L2X0_CACHE_ID_RTL_MASK;

		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
			writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
				l2x0_base + L2X0_PREFETCH_CTRL);
			if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
				writel_relaxed(l2x0_saved_regs.pwr_ctrl,
					l2x0_base + L2X0_POWER_CTRL);
		}
	}

	l2x0_resume();
}

static const struct l2x0_of_data pl310_data = {
544 545 546 547 548 549 550 551 552 553 554 555 556
	.setup = pl310_of_setup,
	.save  = pl310_save,
	.outer_cache = {
		.resume      = pl310_resume,
		.inv_range   = l2x0_inv_range,
		.clean_range = l2x0_clean_range,
		.flush_range = l2x0_flush_range,
		.sync        = l2x0_cache_sync,
		.flush_all   = l2x0_flush_all,
		.inv_all     = l2x0_inv_all,
		.disable     = l2x0_disable,
		.set_debug   = pl310_set_debug,
	},
557 558 559
};

static const struct l2x0_of_data l2x0_data = {
560 561 562 563 564 565 566 567 568 569 570 571
	.setup = l2x0_of_setup,
	.save  = NULL,
	.outer_cache = {
		.resume      = l2x0_resume,
		.inv_range   = l2x0_inv_range,
		.clean_range = l2x0_clean_range,
		.flush_range = l2x0_flush_range,
		.sync        = l2x0_cache_sync,
		.flush_all   = l2x0_flush_all,
		.inv_all     = l2x0_inv_all,
		.disable     = l2x0_disable,
	},
572 573
};

574
static const struct of_device_id l2x0_ids[] __initconst = {
575 576 577
	{ .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
	{ .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
	{ .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
578 579 580
	{}
};

581
int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
582 583
{
	struct device_node *np;
584
	const struct l2x0_of_data *data;
585
	struct resource res;
586 587 588 589

	np = of_find_matching_node(NULL, l2x0_ids);
	if (!np)
		return -ENODEV;
590 591 592 593 594

	if (of_address_to_resource(np, 0, &res))
		return -ENODEV;

	l2x0_base = ioremap(res.start, resource_size(&res));
595 596 597
	if (!l2x0_base)
		return -ENOMEM;

598 599 600 601
	l2x0_saved_regs.phy_base = res.start;

	data = of_match_node(l2x0_ids, np)->data;

602 603
	/* L2 configuration can only be changed if the cache is disabled */
	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
604 605
		if (data->setup)
			data->setup(np, &aux_val, &aux_mask);
606
	}
607 608 609 610

	if (data->save)
		data->save();

611
	of_init = true;
612
	l2x0_init(l2x0_base, aux_val, aux_mask);
613

614 615
	memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));

616 617 618
	return 0;
}
#endif