提交 ba5187db 编写于 作者: T Thiemo Seufer 提交者: Ralf Baechle

Better interface to run uncached cache setup code.

Signed-off-by: NThiemo Seufer <ths@networkno.de>
Signed-off-by: NRalf Baechle <ralf@linux-mips.org>
上级 7de8d232
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
# Makefile for MIPS-specific library files.. # Makefile for MIPS-specific library files..
# #
lib-y += csum_partial_copy.o memcpy.o promlib.o \ lib-y += csum_partial_copy.o memcpy.o promlib.o strlen_user.o strncpy_user.o \
strlen_user.o strncpy_user.o strnlen_user.o strnlen_user.o uncached.o
obj-y += iomap.o obj-y += iomap.o
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Thiemo Seufer
* Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
*/
#include <linux/init.h>
#include <asm/addrspace.h>
#include <asm/bug.h>
#ifndef CKSEG2
#define CKSEG2 CKSSEG
#endif
#ifndef TO_PHYS_MASK
#define TO_PHYS_MASK -1
#endif
/*
* FUNC is executed in one of the uncached segments, depending on its
* original address as follows:
*
* 1. If the original address is in CKSEG0 or CKSEG1, then the uncached
* segment used is CKSEG1.
* 2. If the original address is in XKPHYS, then the uncached segment
* used is XKPHYS(2).
* 3. Otherwise it's a bug.
*
* The same remapping is done with the stack pointer. Stack handling
* works because we don't handle stack arguments or more complex return
* values, so we can avoid sharing the same stack area between a cached
* and the uncached mode.
*/
unsigned long __init run_uncached(void *func)
{
register long sp __asm__("$sp");
register long ret __asm__("$2");
long lfunc = (long)func, ufunc;
long usp;
if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
usp = CKSEG1ADDR(sp);
else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&
(long long)sp < (long long)PHYS_TO_XKPHYS(8LL, 0))
usp = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,
XKPHYS_TO_PHYS((long long)sp));
else {
BUG();
usp = sp;
}
if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2)
ufunc = CKSEG1ADDR(lfunc);
else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0LL, 0) &&
(long long)lfunc < (long long)PHYS_TO_XKPHYS(8LL, 0))
ufunc = PHYS_TO_XKPHYS((long long)K_CALG_UNCACHED,
XKPHYS_TO_PHYS((long long)lfunc));
else {
BUG();
ufunc = lfunc;
}
__asm__ __volatile__ (
" move $16, $sp\n"
" move $sp, %1\n"
" jalr %2\n"
" move $sp, $16"
: "=r" (ret)
: "r" (usp), "r" (ufunc)
: "$16", "$31");
return ret;
}
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/war.h> #include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
static unsigned long icache_size, dcache_size, scache_size; static unsigned long icache_size, dcache_size, scache_size;
...@@ -1119,7 +1120,6 @@ static int __init probe_scache(void) ...@@ -1119,7 +1120,6 @@ static int __init probe_scache(void)
return 1; return 1;
} }
typedef int (*probe_func_t)(unsigned long);
extern int r5k_sc_init(void); extern int r5k_sc_init(void);
extern int rm7k_sc_init(void); extern int rm7k_sc_init(void);
...@@ -1127,7 +1127,6 @@ static void __init setup_scache(void) ...@@ -1127,7 +1127,6 @@ static void __init setup_scache(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
unsigned int config = read_c0_config(); unsigned int config = read_c0_config();
probe_func_t probe_scache_kseg1;
int sc_present = 0; int sc_present = 0;
/* /*
...@@ -1140,8 +1139,7 @@ static void __init setup_scache(void) ...@@ -1140,8 +1139,7 @@ static void __init setup_scache(void)
case CPU_R4000MC: case CPU_R4000MC:
case CPU_R4400SC: case CPU_R4400SC:
case CPU_R4400MC: case CPU_R4400MC:
probe_scache_kseg1 = (probe_func_t) (CKSEG1ADDR(&probe_scache)); sc_present = run_uncached(probe_scache);
sc_present = probe_scache_kseg1(config);
if (sc_present) if (sc_present)
c->options |= MIPS_CPU_CACHE_CDEX_S; c->options |= MIPS_CPU_CACHE_CDEX_S;
break; break;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/cacheops.h> #include <asm/cacheops.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cacheflush.h> /* for run_uncached() */
/* Primary cache parameters. */ /* Primary cache parameters. */
#define sc_lsize 32 #define sc_lsize 32
...@@ -96,25 +97,13 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size) ...@@ -96,25 +97,13 @@ static void rm7k_sc_inv(unsigned long addr, unsigned long size)
} }
/* /*
* This function is executed in the uncached segment CKSEG1. * This function is executed in uncached address space.
* It must not touch the stack, because the stack pointer still points
* into CKSEG0.
*
* Three options:
* - Write it in assembly and guarantee that we don't use the stack.
* - Disable caching for CKSEG0 before calling it.
* - Pray that GCC doesn't randomly start using the stack.
*
* This being Linux, we obviously take the least sane of those options -
* following DaveM's lead in c-r4k.c
*
* It seems we get our kicks from relying on unguaranteed behaviour in GCC
*/ */
static __init void __rm7k_sc_enable(void) static __init void __rm7k_sc_enable(void)
{ {
int i; int i;
set_c0_config(1 << 3); /* CONF_SE */ set_c0_config(R7K_CONF_SE);
write_c0_taglo(0); write_c0_taglo(0);
write_c0_taghi(0); write_c0_taghi(0);
...@@ -127,24 +116,22 @@ static __init void __rm7k_sc_enable(void) ...@@ -127,24 +116,22 @@ static __init void __rm7k_sc_enable(void)
".set mips0\n\t" ".set mips0\n\t"
".set reorder" ".set reorder"
: :
: "r" (KSEG0ADDR(i)), "i" (Index_Store_Tag_SD)); : "r" (CKSEG0ADDR(i)), "i" (Index_Store_Tag_SD));
} }
} }
static __init void rm7k_sc_enable(void) static __init void rm7k_sc_enable(void)
{ {
void (*func)(void) = (void *) KSEG1ADDR(&__rm7k_sc_enable); if (read_c0_config() & R7K_CONF_SE)
if (read_c0_config() & 0x08) /* CONF_SE */
return; return;
printk(KERN_INFO "Enabling secondary cache..."); printk(KERN_INFO "Enabling secondary cache...");
func(); run_uncached(__rm7k_sc_enable);
} }
static void rm7k_sc_disable(void) static void rm7k_sc_disable(void)
{ {
clear_c0_config(1<<3); /* CONF_SE */ clear_c0_config(R7K_CONF_SE);
} }
struct bcache_ops rm7k_sc_ops = { struct bcache_ops rm7k_sc_ops = {
...@@ -164,7 +151,7 @@ void __init rm7k_sc_init(void) ...@@ -164,7 +151,7 @@ void __init rm7k_sc_init(void)
printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n", printk(KERN_INFO "Secondary cache size %dK, linesize %d bytes.\n",
(scache_size >> 10), sc_lsize); (scache_size >> 10), sc_lsize);
if (!((config >> 3) & 1)) /* CONF_SE */ if (!(config & R7K_CONF_SE))
rm7k_sc_enable(); rm7k_sc_enable();
/* /*
......
...@@ -90,4 +90,7 @@ extern void (*flush_data_cache_page)(unsigned long addr); ...@@ -90,4 +90,7 @@ extern void (*flush_data_cache_page)(unsigned long addr);
#define ClearPageDcacheDirty(page) \ #define ClearPageDcacheDirty(page) \
clear_bit(PG_dcache_dirty, &(page)->flags) clear_bit(PG_dcache_dirty, &(page)->flags)
/* Run kernel code uncached, useful for cache probing functions. */
unsigned long __init run_uncached(void *func);
#endif /* _ASM_CACHEFLUSH_H */ #endif /* _ASM_CACHEFLUSH_H */
...@@ -433,6 +433,9 @@ ...@@ -433,6 +433,9 @@
#define R5K_CONF_SE (_ULCAST_(1) << 12) #define R5K_CONF_SE (_ULCAST_(1) << 12)
#define R5K_CONF_SS (_ULCAST_(3) << 20) #define R5K_CONF_SS (_ULCAST_(3) << 20)
/* Bits specific to the RM7000. */
#define R7K_CONF_SE (_ULCAST_(1) << 3)
/* Bits specific to the R10000. */ /* Bits specific to the R10000. */
#define R10K_CONF_DN (_ULCAST_(3) << 3) #define R10K_CONF_DN (_ULCAST_(3) << 3)
#define R10K_CONF_CT (_ULCAST_(1) << 5) #define R10K_CONF_CT (_ULCAST_(1) << 5)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册