cache-sh4.c 17.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 * arch/sh/mm/cache-sh4.c
 *
 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5
 * Copyright (C) 2001 - 2007  Paul Mundt
L
Linus Torvalds 已提交
6
 * Copyright (C) 2003  Richard Curnow
7
 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
L
Linus Torvalds 已提交
8 9 10 11 12 13 14
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 */
#include <linux/init.h>
#include <linux/mm.h>
P
Paul Mundt 已提交
15 16
#include <linux/io.h>
#include <linux/mutex.h>
17
#include <linux/fs.h>
L
Linus Torvalds 已提交
18 19 20
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

21 22 23 24 25 26
/*
 * The maximum number of pages we support up to when doing ranged dcache
 * flushing. Anything exceeding this will simply flush the dcache in its
 * entirety.
 */
#define MAX_DCACHE_PAGES	64	/* XXX: Tune for ways */
27
#define MAX_ICACHE_PAGES	32
28

29
static void __flush_cache_4096(unsigned long addr, unsigned long phys,
30
			       unsigned long exec_offset);
31 32 33 34 35 36 37 38 39

/*
 * This is initialised here to ensure that it is not placed in the BSS.  If
 * that were to happen, note that cache_init gets called before the BSS is
 * cleared, so this would get nulled out which would be hopeless.
 */
static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
	(void (*)(unsigned long, unsigned long))0xdeadbeef;

L
Linus Torvalds 已提交
40 41 42
/*
 * Write back the range of D-cache, and purge the I-cache.
 *
43 44
 * Called from kernel/module.c:sys_init_module and routine for a.out format,
 * signal handler code and kprobes code
L
Linus Torvalds 已提交
45
 */
P
Paul Mundt 已提交
46
static void sh4_flush_icache_range(void *args)
L
Linus Torvalds 已提交
47
{
P
Paul Mundt 已提交
48
	struct flusher_data *data = args;
49
	int icacheaddr;
P
Paul Mundt 已提交
50
	unsigned long start, end;
51
	unsigned long v;
L
Linus Torvalds 已提交
52 53
	int i;

P
Paul Mundt 已提交
54 55 56
	start = data->addr1;
	end = data->addr2;

57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
	/* If there are too many pages then just blow the caches */
	if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
		local_flush_cache_all(args);
	} else {
		/* selectively flush d-cache then invalidate the i-cache */
		/* this is inefficient, so only use for small ranges */
		start &= ~(L1_CACHE_BYTES-1);
		end += L1_CACHE_BYTES-1;
		end &= ~(L1_CACHE_BYTES-1);

		jump_to_uncached();

		for (v = start; v < end; v+=L1_CACHE_BYTES) {
			__ocbwb(v);

			icacheaddr = CACHE_IC_ADDRESS_ARRAY |
				(v & cpu_data->icache.entry_mask);

			for (i = 0; i < cpu_data->icache.ways;
				i++, icacheaddr += cpu_data->icache.way_incr)
				/* Clear i-cache line valid-bit */
				ctrl_outl(0, icacheaddr);
		}
80 81 82

		back_to_cached();
	}
L
Linus Torvalds 已提交
83 84 85 86 87
}

static inline void flush_cache_4096(unsigned long start,
				    unsigned long phys)
{
88
	unsigned long exec_offset = 0;
89

L
Linus Torvalds 已提交
90
	/*
91 92
	 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
	 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
L
Linus Torvalds 已提交
93
	 */
94
	if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
95
	    (start < CACHE_OC_ADDRESS_ARRAY))
96
		exec_offset = 0x20000000;
97 98 99

	__flush_cache_4096(start | SH_CACHE_ASSOC,
			   P1SEGADDR(phys), exec_offset);
L
Linus Torvalds 已提交
100 101 102 103 104 105
}

/*
 * Write back & invalidate the D-cache of the page.
 * (To avoid "alias" issues)
 */
P
Paul Mundt 已提交
106
static void sh4_flush_dcache_page(void *page)
L
Linus Torvalds 已提交
107
{
P
Paul Mundt 已提交
108
#ifndef CONFIG_SMP
109 110 111 112 113 114 115
	struct address_space *mapping = page_mapping(page);

	if (mapping && !mapping_mapped(mapping))
		set_bit(PG_dcache_dirty, &page->flags);
	else
#endif
	{
L
Linus Torvalds 已提交
116
		unsigned long phys = PHYSADDR(page_address(page));
117 118
		unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
		int i, n;
L
Linus Torvalds 已提交
119 120

		/* Loop all the D-cache */
121
		n = boot_cpu_data.dcache.n_aliases;
122
		for (i = 0; i < n; i++, addr += 4096)
123
			flush_cache_4096(addr, phys);
L
Linus Torvalds 已提交
124
	}
125 126

	wmb();
L
Linus Torvalds 已提交
127 128
}

129
/* TODO: Selective icache invalidation through IC address array.. */
P
Paul Mundt 已提交
130
static void __uses_jump_to_uncached flush_icache_all(void)
L
Linus Torvalds 已提交
131
{
132
	unsigned long ccr;
L
Linus Torvalds 已提交
133

134
	jump_to_uncached();
L
Linus Torvalds 已提交
135 136 137 138 139 140

	/* Flush I-cache */
	ccr = ctrl_inl(CCR);
	ccr |= CCR_CACHE_ICI;
	ctrl_outl(ccr, CCR);

P
Paul Mundt 已提交
141
	/*
142
	 * back_to_cached() will take care of the barrier for us, don't add
P
Paul Mundt 已提交
143 144
	 * another one!
	 */
145
	back_to_cached();
L
Linus Torvalds 已提交
146 147
}

148
static inline void flush_dcache_all(void)
L
Linus Torvalds 已提交
149
{
150
	(*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
151
	wmb();
152 153
}

P
Paul Mundt 已提交
154
static void sh4_flush_cache_all(void *unused)
155 156
{
	flush_dcache_all();
L
Linus Torvalds 已提交
157 158 159
	flush_icache_all();
}

160 161 162 163
static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
			     unsigned long end)
{
	unsigned long d = 0, p = start & PAGE_MASK;
164 165
	unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
	unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
	unsigned long select_bit;
	unsigned long all_aliases_mask;
	unsigned long addr_offset;
	pgd_t *dir;
	pmd_t *pmd;
	pud_t *pud;
	pte_t *pte;
	int i;

	dir = pgd_offset(mm, p);
	pud = pud_offset(dir, p);
	pmd = pmd_offset(pud, p);
	end = PAGE_ALIGN(end);

	all_aliases_mask = (1 << n_aliases) - 1;

	do {
		if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
			p &= PMD_MASK;
			p += PMD_SIZE;
			pmd++;

			continue;
		}

		pte = pte_offset_kernel(pmd, p);

		do {
			unsigned long phys;
			pte_t entry = *pte;

			if (!(pte_val(entry) & _PAGE_PRESENT)) {
				pte++;
				p += PAGE_SIZE;
				continue;
			}

			phys = pte_val(entry) & PTE_PHYS_MASK;

			if ((p ^ phys) & alias_mask) {
				d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
				d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);

				if (d == all_aliases_mask)
					goto loop_exit;
			}

			pte++;
			p += PAGE_SIZE;
		} while (p < end && ((unsigned long)pte & ~PAGE_MASK));
		pmd++;
	} while (p < end);

loop_exit:
	addr_offset = 0;
	select_bit = 1;

	for (i = 0; i < n_aliases; i++) {
		if (d & select_bit) {
			(*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
			wmb();
		}

		select_bit <<= 1;
		addr_offset += PAGE_SIZE;
	}
}

/*
 * Note : (RPC) since the caches are physically tagged, the only point
 * of flush_cache_mm for SH-4 is to get rid of aliases from the
 * D-cache.  The assumption elsewhere, e.g. flush_cache_range, is that
 * lines can stay resident so long as the virtual address they were
 * accessed with (hence cache set) is in accord with the physical
 * address (i.e. tag).  It's no different here.  So I reckon we don't
 * need to flush the I-cache, since aliases don't matter for that.  We
 * should try that.
 *
 * Caller takes mm->mmap_sem.
 */
P
Paul Mundt 已提交
246
static void sh4_flush_cache_mm(void *arg)
L
Linus Torvalds 已提交
247
{
P
Paul Mundt 已提交
248 249
	struct mm_struct *mm = arg;

250 251 252
	if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
		return;

253
	/*
254 255 256
	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
	 * the cache is physically tagged, the data can just be left in there.
	 */
257
	if (boot_cpu_data.dcache.n_aliases == 0)
258 259 260 261 262
		return;

	/*
	 * Don't bother groveling around the dcache for the VMA ranges
	 * if there are too many PTEs to make it worthwhile.
263
	 */
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	if (mm->nr_ptes >= MAX_DCACHE_PAGES)
		flush_dcache_all();
	else {
		struct vm_area_struct *vma;

		/*
		 * In this case there are reasonably sized ranges to flush,
		 * iterate through the VMA list and take care of any aliases.
		 */
		for (vma = mm->mmap; vma; vma = vma->vm_next)
			__flush_cache_mm(mm, vma->vm_start, vma->vm_end);
	}

	/* Only touch the icache if one of the VMAs has VM_EXEC set. */
	if (mm->exec_vm)
		flush_icache_all();
L
Linus Torvalds 已提交
280 281 282 283 284 285 286 287
}

/*
 * Write back and invalidate I/D-caches for the page.
 *
 * ADDR: Virtual Address (U0 address)
 * PFN: Physical page number
 */
P
Paul Mundt 已提交
288
static void sh4_flush_cache_page(void *args)
L
Linus Torvalds 已提交
289
{
P
Paul Mundt 已提交
290 291 292
	struct flusher_data *data = args;
	struct vm_area_struct *vma;
	unsigned long address, pfn, phys;
293 294
	unsigned int alias_mask;

P
Paul Mundt 已提交
295 296 297 298 299
	vma = data->vma;
	address = data->addr1;
	pfn = data->addr2;
	phys = pfn << PAGE_SHIFT;

300 301 302
	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
		return;

303
	alias_mask = boot_cpu_data.dcache.alias_mask;
L
Linus Torvalds 已提交
304 305

	/* We only need to flush D-cache when we have alias */
306
	if ((address^phys) & alias_mask) {
L
Linus Torvalds 已提交
307 308
		/* Loop 4K of the D-cache */
		flush_cache_4096(
309
			CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
L
Linus Torvalds 已提交
310 311 312
			phys);
		/* Loop another 4K of the D-cache */
		flush_cache_4096(
313
			CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
L
Linus Torvalds 已提交
314 315 316
			phys);
	}

317
	alias_mask = boot_cpu_data.icache.alias_mask;
318 319 320 321 322 323 324 325 326
	if (vma->vm_flags & VM_EXEC) {
		/*
		 * Evict entries from the portion of the cache from which code
		 * may have been executed at this address (virtual).  There's
		 * no need to evict from the portion corresponding to the
		 * physical address as for the D-cache, because we know the
		 * kernel has never executed the code through its identity
		 * translation.
		 */
L
Linus Torvalds 已提交
327
		flush_cache_4096(
328
			CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
L
Linus Torvalds 已提交
329
			phys);
330
	}
L
Linus Torvalds 已提交
331 332 333 334 335 336 337 338 339 340 341
}

/*
 * Write back and invalidate D-caches.
 *
 * START, END: Virtual Address (U0 address)
 *
 * NOTE: We need to flush the _physical_ page entry.
 * Flushing the cache lines for U0 only isn't enough.
 * We need to flush for P1 too, which may contain aliases.
 */
P
Paul Mundt 已提交
342
static void sh4_flush_cache_range(void *args)
L
Linus Torvalds 已提交
343
{
P
Paul Mundt 已提交
344 345 346 347 348 349 350 351
	struct flusher_data *data = args;
	struct vm_area_struct *vma;
	unsigned long start, end;

	vma = data->vma;
	start = data->addr1;
	end = data->addr2;

352 353 354
	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
		return;

355 356 357 358
	/*
	 * If cache is only 4k-per-way, there are never any 'aliases'.  Since
	 * the cache is physically tagged, the data can just be left in there.
	 */
359
	if (boot_cpu_data.dcache.n_aliases == 0)
360 361
		return;

362 363 364 365 366
	/*
	 * Don't bother with the lookup and alias check if we have a
	 * wide range to cover, just blow away the dcache in its
	 * entirety instead. -- PFM.
	 */
367
	if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
368
		flush_dcache_all();
369 370
	else
		__flush_cache_mm(vma->vm_mm, start, end);
371 372 373 374 375 376 377

	if (vma->vm_flags & VM_EXEC) {
		/*
		 * TODO: Is this required???  Need to look at how I-cache
		 * coherency is assured when new programs are loaded to see if
		 * this matters.
		 */
L
Linus Torvalds 已提交
378
		flush_icache_all();
379
	}
L
Linus Torvalds 已提交
380 381
}

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
/**
 * __flush_cache_4096
 *
 * @addr:  address in memory mapped cache array
 * @phys:  P1 address to flush (has to match tags if addr has 'A' bit
 *         set i.e. associative write)
 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
 *               region else 0x0
 *
 * The offset into the cache array implied by 'addr' selects the
 * 'colour' of the virtual address range that will be flushed.  The
 * operation (purge/write-back) is selected by the lower 2 bits of
 * 'phys'.
 */
static void __flush_cache_4096(unsigned long addr, unsigned long phys,
			       unsigned long exec_offset)
{
	int way_count;
	unsigned long base_addr = addr;
	struct cache_info *dcache;
	unsigned long way_incr;
	unsigned long a, ea, p;
	unsigned long temp_pc;

406
	dcache = &boot_cpu_data.dcache;
407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
	/* Write this way for better assembly. */
	way_count = dcache->ways;
	way_incr = dcache->way_incr;

	/*
	 * Apply exec_offset (i.e. branch to P2 if required.).
	 *
	 * FIXME:
	 *
	 *	If I write "=r" for the (temp_pc), it puts this in r6 hence
	 *	trashing exec_offset before it's been added on - why?  Hence
	 *	"=&r" as a 'workaround'
	 */
	asm volatile("mov.l 1f, %0\n\t"
		     "add   %1, %0\n\t"
		     "jmp   @%0\n\t"
		     "nop\n\t"
		     ".balign 4\n\t"
		     "1:  .long 2f\n\t"
		     "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));

	/*
	 * We know there will be >=1 iteration, so write as do-while to avoid
	 * pointless nead-of-loop check for 0 iterations.
	 */
	do {
		ea = base_addr + PAGE_SIZE;
		a = base_addr;
		p = phys;

		do {
			*(volatile unsigned long *)a = p;
			/*
			 * Next line: intentionally not p+32, saves an add, p
			 * will do since only the cache tag bits need to
			 * match.
			 */
			*(volatile unsigned long *)(a+32) = p;
			a += 64;
			p += 64;
		} while (a < ea);

		base_addr += way_incr;
	} while (--way_count != 0);
}

/*
 * Break the 1, 2 and 4 way variants of this out into separate functions to
 * avoid nearly all the overhead of having the conditional stuff in the function
 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
 */
static void __flush_dcache_segment_1way(unsigned long start,
					unsigned long extent_per_way)
{
	unsigned long orig_sr, sr_with_bl;
	unsigned long base_addr;
	unsigned long way_incr, linesz, way_size;
	struct cache_info *dcache;
	register unsigned long a0, a0e;

	asm volatile("stc sr, %0" : "=r" (orig_sr));
	sr_with_bl = orig_sr | (1<<28);
	base_addr = ((unsigned long)&empty_zero_page[0]);

	/*
	 * The previous code aligned base_addr to 16k, i.e. the way_size of all
	 * existing SH-4 D-caches.  Whilst I don't see a need to have this
	 * aligned to any better than the cache line size (which it will be
	 * anyway by construction), let's align it to at least the way_size of
	 * any existing or conceivable SH-4 D-cache.  -- RPC
	 */
	base_addr = ((base_addr >> 16) << 16);
	base_addr |= start;

481
	dcache = &boot_cpu_data.dcache;
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
	linesz = dcache->linesz;
	way_incr = dcache->way_incr;
	way_size = dcache->way_size;

	a0 = base_addr;
	a0e = base_addr + extent_per_way;
	do {
		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
		asm volatile("movca.l r0, @%0\n\t"
			     "ocbi @%0" : : "r" (a0));
		a0 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "ocbi @%0" : : "r" (a0));
		a0 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "ocbi @%0" : : "r" (a0));
		a0 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "ocbi @%0" : : "r" (a0));
		asm volatile("ldc %0, sr" : : "r" (orig_sr));
		a0 += linesz;
	} while (a0 < a0e);
}

static void __flush_dcache_segment_2way(unsigned long start,
					unsigned long extent_per_way)
{
	unsigned long orig_sr, sr_with_bl;
	unsigned long base_addr;
	unsigned long way_incr, linesz, way_size;
	struct cache_info *dcache;
	register unsigned long a0, a1, a0e;

	asm volatile("stc sr, %0" : "=r" (orig_sr));
	sr_with_bl = orig_sr | (1<<28);
	base_addr = ((unsigned long)&empty_zero_page[0]);

	/* See comment under 1-way above */
	base_addr = ((base_addr >> 16) << 16);
	base_addr |= start;

523
	dcache = &boot_cpu_data.dcache;
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
	linesz = dcache->linesz;
	way_incr = dcache->way_incr;
	way_size = dcache->way_size;

	a0 = base_addr;
	a1 = a0 + way_incr;
	a0e = base_addr + extent_per_way;
	do {
		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1" : :
			     "r" (a0), "r" (a1));
		a0 += linesz;
		a1 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1" : :
			     "r" (a0), "r" (a1));
		a0 += linesz;
		a1 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1" : :
			     "r" (a0), "r" (a1));
		a0 += linesz;
		a1 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1" : :
			     "r" (a0), "r" (a1));
		asm volatile("ldc %0, sr" : : "r" (orig_sr));
		a0 += linesz;
		a1 += linesz;
	} while (a0 < a0e);
}

static void __flush_dcache_segment_4way(unsigned long start,
					unsigned long extent_per_way)
{
	unsigned long orig_sr, sr_with_bl;
	unsigned long base_addr;
	unsigned long way_incr, linesz, way_size;
	struct cache_info *dcache;
	register unsigned long a0, a1, a2, a3, a0e;

	asm volatile("stc sr, %0" : "=r" (orig_sr));
	sr_with_bl = orig_sr | (1<<28);
	base_addr = ((unsigned long)&empty_zero_page[0]);

	/* See comment under 1-way above */
	base_addr = ((base_addr >> 16) << 16);
	base_addr |= start;

582
	dcache = &boot_cpu_data.dcache;
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
	linesz = dcache->linesz;
	way_incr = dcache->way_incr;
	way_size = dcache->way_size;

	a0 = base_addr;
	a1 = a0 + way_incr;
	a2 = a1 + way_incr;
	a3 = a2 + way_incr;
	a0e = base_addr + extent_per_way;
	do {
		asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "movca.l r0, @%2\n\t"
			     "movca.l r0, @%3\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1\n\t"
			     "ocbi @%2\n\t"
			     "ocbi @%3\n\t" : :
			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
		a0 += linesz;
		a1 += linesz;
		a2 += linesz;
		a3 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "movca.l r0, @%2\n\t"
			     "movca.l r0, @%3\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1\n\t"
			     "ocbi @%2\n\t"
			     "ocbi @%3\n\t" : :
			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
		a0 += linesz;
		a1 += linesz;
		a2 += linesz;
		a3 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "movca.l r0, @%2\n\t"
			     "movca.l r0, @%3\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1\n\t"
			     "ocbi @%2\n\t"
			     "ocbi @%3\n\t" : :
			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
		a0 += linesz;
		a1 += linesz;
		a2 += linesz;
		a3 += linesz;
		asm volatile("movca.l r0, @%0\n\t"
			     "movca.l r0, @%1\n\t"
			     "movca.l r0, @%2\n\t"
			     "movca.l r0, @%3\n\t"
			     "ocbi @%0\n\t"
			     "ocbi @%1\n\t"
			     "ocbi @%2\n\t"
			     "ocbi @%3\n\t" : :
			     "r" (a0), "r" (a1), "r" (a2), "r" (a3));
		asm volatile("ldc %0, sr" : : "r" (orig_sr));
		a0 += linesz;
		a1 += linesz;
		a2 += linesz;
		a3 += linesz;
	} while (a0 < a0e);
}
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676

extern void __weak sh4__flush_region_init(void);

/*
 * SH-4 has virtually indexed and physically tagged cache.
 */
void __init sh4_cache_init(void)
{
	printk("PVR=%08x CVR=%08x PRR=%08x\n",
		ctrl_inl(CCN_PVR),
		ctrl_inl(CCN_CVR),
		ctrl_inl(CCN_PRR));

	switch (boot_cpu_data.dcache.ways) {
	case 1:
		__flush_dcache_segment_fn = __flush_dcache_segment_1way;
		break;
	case 2:
		__flush_dcache_segment_fn = __flush_dcache_segment_2way;
		break;
	case 4:
		__flush_dcache_segment_fn = __flush_dcache_segment_4way;
		break;
	default:
		panic("unknown number of cache ways\n");
		break;
	}

P
Paul Mundt 已提交
677 678 679 680 681 682 683
	local_flush_icache_range	= sh4_flush_icache_range;
	local_flush_dcache_page		= sh4_flush_dcache_page;
	local_flush_cache_all		= sh4_flush_cache_all;
	local_flush_cache_mm		= sh4_flush_cache_mm;
	local_flush_cache_dup_mm	= sh4_flush_cache_mm;
	local_flush_cache_page		= sh4_flush_cache_page;
	local_flush_cache_range		= sh4_flush_cache_range;
684 685 686

	sh4__flush_region_init();
}