提交 14bd8c08 编写于 作者: R Ralf Baechle

MIPS: Loongson: Get rid of Loongson 2 #ifdefery all over arch/mips.

It was ugly.
Signed-off-by: NRalf Baechle <ralf@linux-mips.org>
上级 7b784c63
...@@ -20,11 +20,7 @@ ...@@ -20,11 +20,7 @@
#define Index_Load_Tag_D 0x05 #define Index_Load_Tag_D 0x05
#define Index_Store_Tag_I 0x08 #define Index_Store_Tag_I 0x08
#define Index_Store_Tag_D 0x09 #define Index_Store_Tag_D 0x09
#if defined(CONFIG_CPU_LOONGSON2)
#define Hit_Invalidate_I 0x00
#else
#define Hit_Invalidate_I 0x10 #define Hit_Invalidate_I 0x10
#endif
#define Hit_Invalidate_D 0x11 #define Hit_Invalidate_D 0x11
#define Hit_Writeback_Inv_D 0x15 #define Hit_Writeback_Inv_D 0x15
...@@ -84,4 +80,9 @@ ...@@ -84,4 +80,9 @@
#define Index_Store_Data_D 0x1d #define Index_Store_Data_D 0x1d
#define Index_Store_Data_S 0x1f #define Index_Store_Data_S 0x1f
/*
* Loongson2-specific cacheops
*/
#define Hit_Invalidate_I_Loongson23 0x00
#endif /* __ASM_CACHEOPS_H */ #endif /* __ASM_CACHEOPS_H */
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/cacheops.h> #include <asm/cacheops.h>
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
/* /*
...@@ -162,7 +163,15 @@ static inline void flush_scache_line_indexed(unsigned long addr) ...@@ -162,7 +163,15 @@ static inline void flush_scache_line_indexed(unsigned long addr)
static inline void flush_icache_line(unsigned long addr) static inline void flush_icache_line(unsigned long addr)
{ {
__iflush_prologue __iflush_prologue
cache_op(Hit_Invalidate_I, addr); switch (boot_cpu_type()) {
case CPU_LOONGSON2:
cache_op(Hit_Invalidate_I_Loongson23, addr);
break;
default:
cache_op(Hit_Invalidate_I, addr);
break;
}
__iflush_epilogue __iflush_epilogue
} }
...@@ -208,7 +217,15 @@ static inline void flush_scache_line(unsigned long addr) ...@@ -208,7 +217,15 @@ static inline void flush_scache_line(unsigned long addr)
*/ */
static inline void protected_flush_icache_line(unsigned long addr) static inline void protected_flush_icache_line(unsigned long addr)
{ {
protected_cache_op(Hit_Invalidate_I, addr); switch (boot_cpu_type()) {
case CPU_LOONGSON2:
protected_cache_op(Hit_Invalidate_I_Loongson23, addr);
break;
default:
protected_cache_op(Hit_Invalidate_I, addr);
break;
}
} }
/* /*
...@@ -412,8 +429,8 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64 ...@@ -412,8 +429,8 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
/* build blast_xxx_range, protected_blast_xxx_range */ /* build blast_xxx_range, protected_blast_xxx_range */
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \ #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
unsigned long end) \ unsigned long end) \
{ \ { \
unsigned long lsize = cpu_##desc##_line_size(); \ unsigned long lsize = cpu_##desc##_line_size(); \
...@@ -432,13 +449,15 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ ...@@ -432,13 +449,15 @@ static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
__##pfx##flush_epilogue \ __##pfx##flush_epilogue \
} }
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_) __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_) __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, ) __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, ) protected_, loongson23_)
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
/* blast_inv_dcache_range */ /* blast_inv_dcache_range */
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, ) __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, ) __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
#endif /* _ASM_R4KCACHE_H */ #endif /* _ASM_R4KCACHE_H */
...@@ -346,14 +346,8 @@ static void r4k_blast_scache_setup(void) ...@@ -346,14 +346,8 @@ static void r4k_blast_scache_setup(void)
static inline void local_r4k___flush_cache_all(void * args) static inline void local_r4k___flush_cache_all(void * args)
{ {
#if defined(CONFIG_CPU_LOONGSON2)
r4k_blast_scache();
return;
#endif
r4k_blast_dcache();
r4k_blast_icache();
switch (current_cpu_type()) { switch (current_cpu_type()) {
case CPU_LOONGSON2:
case CPU_R4000SC: case CPU_R4000SC:
case CPU_R4000MC: case CPU_R4000MC:
case CPU_R4400SC: case CPU_R4400SC:
...@@ -361,7 +355,18 @@ static inline void local_r4k___flush_cache_all(void * args) ...@@ -361,7 +355,18 @@ static inline void local_r4k___flush_cache_all(void * args)
case CPU_R10000: case CPU_R10000:
case CPU_R12000: case CPU_R12000:
case CPU_R14000: case CPU_R14000:
/*
* These caches are inclusive caches, that is, if something
* is not cached in the S-cache, we know it also won't be
* in one of the primary caches.
*/
r4k_blast_scache(); r4k_blast_scache();
break;
default:
r4k_blast_dcache();
r4k_blast_icache();
break;
} }
} }
...@@ -572,8 +577,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo ...@@ -572,8 +577,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
if (end - start > icache_size) if (end - start > icache_size)
r4k_blast_icache(); r4k_blast_icache();
else else {
protected_blast_icache_range(start, end); switch (boot_cpu_type()) {
case CPU_LOONGSON2:
protected_blast_icache_range(start, end);
break;
default:
protected_loongson23_blast_icache_range(start, end);
break;
}
}
} }
static inline void local_r4k_flush_icache_range_ipi(void *args) static inline void local_r4k_flush_icache_range_ipi(void *args)
...@@ -1109,15 +1123,14 @@ static void probe_pcache(void) ...@@ -1109,15 +1123,14 @@ static void probe_pcache(void)
case CPU_ALCHEMY: case CPU_ALCHEMY:
c->icache.flags |= MIPS_CACHE_IC_F_DC; c->icache.flags |= MIPS_CACHE_IC_F_DC;
break; break;
}
#ifdef CONFIG_CPU_LOONGSON2 case CPU_LOONGSON2:
/* /*
* LOONGSON2 has 4 way icache, but when using indexed cache op, * LOONGSON2 has 4 way icache, but when using indexed cache op,
* one op will act on all 4 ways * one op will act on all 4 ways
*/ */
c->icache.ways = 1; c->icache.ways = 1;
#endif }
printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
icache_size >> 10, icache_size >> 10,
...@@ -1193,7 +1206,6 @@ static int probe_scache(void) ...@@ -1193,7 +1206,6 @@ static int probe_scache(void)
return 1; return 1;
} }
#if defined(CONFIG_CPU_LOONGSON2)
static void __init loongson2_sc_init(void) static void __init loongson2_sc_init(void)
{ {
struct cpuinfo_mips *c = &current_cpu_data; struct cpuinfo_mips *c = &current_cpu_data;
...@@ -1209,7 +1221,6 @@ static void __init loongson2_sc_init(void) ...@@ -1209,7 +1221,6 @@ static void __init loongson2_sc_init(void)
c->options |= MIPS_CPU_INCLUSIVE_CACHES; c->options |= MIPS_CPU_INCLUSIVE_CACHES;
} }
#endif
extern int r5k_sc_init(void); extern int r5k_sc_init(void);
extern int rm7k_sc_init(void); extern int rm7k_sc_init(void);
...@@ -1259,11 +1270,10 @@ static void setup_scache(void) ...@@ -1259,11 +1270,10 @@ static void setup_scache(void)
#endif #endif
return; return;
#if defined(CONFIG_CPU_LOONGSON2)
case CPU_LOONGSON2: case CPU_LOONGSON2:
loongson2_sc_init(); loongson2_sc_init();
return; return;
#endif
case CPU_XLP: case CPU_XLP:
/* don't need to worry about L2, fully coherent */ /* don't need to worry about L2, fully coherent */
return; return;
......
...@@ -52,21 +52,26 @@ extern void build_tlb_refill_handler(void); ...@@ -52,21 +52,26 @@ extern void build_tlb_refill_handler(void);
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_MIPS_MT_SMTC */
#if defined(CONFIG_CPU_LOONGSON2)
/* /*
* LOONGSON2 has a 4 entry itlb which is a subset of dtlb, * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
* unfortrunately, itlb is not totally transparent to software. * unfortrunately, itlb is not totally transparent to software.
*/ */
#define FLUSH_ITLB write_c0_diag(4); static inline void flush_itlb(void)
{
#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); } switch (current_cpu_type()) {
case CPU_LOONGSON2:
#else write_c0_diag(4);
break;
#define FLUSH_ITLB default:
#define FLUSH_ITLB_VM(vma) break;
}
}
#endif static inline void flush_itlb_vm(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_EXEC)
flush_itlb();
}
void local_flush_tlb_all(void) void local_flush_tlb_all(void)
{ {
...@@ -93,7 +98,7 @@ void local_flush_tlb_all(void) ...@@ -93,7 +98,7 @@ void local_flush_tlb_all(void)
} }
tlbw_use_hazard(); tlbw_use_hazard();
write_c0_entryhi(old_ctx); write_c0_entryhi(old_ctx);
FLUSH_ITLB; flush_itlb();
EXIT_CRITICAL(flags); EXIT_CRITICAL(flags);
} }
EXPORT_SYMBOL(local_flush_tlb_all); EXPORT_SYMBOL(local_flush_tlb_all);
...@@ -155,7 +160,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -155,7 +160,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
} else { } else {
drop_mmu_context(mm, cpu); drop_mmu_context(mm, cpu);
} }
FLUSH_ITLB; flush_itlb();
EXIT_CRITICAL(flags); EXIT_CRITICAL(flags);
} }
} }
...@@ -197,7 +202,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -197,7 +202,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
} else { } else {
local_flush_tlb_all(); local_flush_tlb_all();
} }
FLUSH_ITLB; flush_itlb();
EXIT_CRITICAL(flags); EXIT_CRITICAL(flags);
} }
...@@ -230,7 +235,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -230,7 +235,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
finish: finish:
write_c0_entryhi(oldpid); write_c0_entryhi(oldpid);
FLUSH_ITLB_VM(vma); flush_itlb_vm(vma);
EXIT_CRITICAL(flags); EXIT_CRITICAL(flags);
} }
} }
...@@ -262,7 +267,7 @@ void local_flush_tlb_one(unsigned long page) ...@@ -262,7 +267,7 @@ void local_flush_tlb_one(unsigned long page)
tlbw_use_hazard(); tlbw_use_hazard();
} }
write_c0_entryhi(oldpid); write_c0_entryhi(oldpid);
FLUSH_ITLB; flush_itlb();
EXIT_CRITICAL(flags); EXIT_CRITICAL(flags);
} }
...@@ -335,7 +340,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ...@@ -335,7 +340,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
tlb_write_indexed(); tlb_write_indexed();
} }
tlbw_use_hazard(); tlbw_use_hazard();
FLUSH_ITLB_VM(vma); flush_itlb_vm(vma);
EXIT_CRITICAL(flags); EXIT_CRITICAL(flags);
} }
......
...@@ -1311,95 +1311,100 @@ static void build_r4000_tlb_refill_handler(void) ...@@ -1311,95 +1311,100 @@ static void build_r4000_tlb_refill_handler(void)
* need three, with the second nop'ed and the third being * need three, with the second nop'ed and the third being
* unused. * unused.
*/ */
/* Loongson2 ebase is different than r4k, we have more space */ switch (boot_cpu_type()) {
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) default:
if ((p - tlb_handler) > 64) if (sizeof(long) == 4) {
panic("TLB refill handler space exceeded"); case CPU_LOONGSON2:
#else /* Loongson2 ebase is different than r4k, we have more space */
if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) if ((p - tlb_handler) > 64)
|| (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) panic("TLB refill handler space exceeded");
&& uasm_insn_has_bdelay(relocs,
tlb_handler + MIPS64_REFILL_INSNS - 3)))
panic("TLB refill handler space exceeded");
#endif
/*
* Now fold the handler in the TLB refill handler space.
*/
#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
f = final_handler;
/* Simplest case, just copy the handler. */
uasm_copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler;
#else /* CONFIG_64BIT */
f = final_handler + MIPS64_REFILL_INSNS;
if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
/* Just copy the handler. */
uasm_copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler;
} else {
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
const enum label_id ls = label_tlb_huge_update;
#else
const enum label_id ls = label_vmalloc;
#endif
u32 *split;
int ov = 0;
int i;
for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
;
BUG_ON(i == ARRAY_SIZE(labels));
split = labels[i].addr;
/*
* See if we have overflown one way or the other.
*/
if (split > tlb_handler + MIPS64_REFILL_INSNS ||
split < p - MIPS64_REFILL_INSNS)
ov = 1;
if (ov) {
/* /*
* Split two instructions before the end. One * Now fold the handler in the TLB refill handler space.
* for the branch and one for the instruction
* in the delay slot.
*/ */
split = tlb_handler + MIPS64_REFILL_INSNS - 2; f = final_handler;
/* Simplest case, just copy the handler. */
uasm_copy_handler(relocs, labels, tlb_handler, p, f);
final_len = p - tlb_handler;
break;
} else {
if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
|| (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
&& uasm_insn_has_bdelay(relocs,
tlb_handler + MIPS64_REFILL_INSNS - 3)))
panic("TLB refill handler space exceeded");
/* /*
* If the branch would fall in a delay slot, * Now fold the handler in the TLB refill handler space.
* we must back up an additional instruction
* so that it is no longer in a delay slot.
*/ */
if (uasm_insn_has_bdelay(relocs, split - 1)) f = final_handler + MIPS64_REFILL_INSNS;
split--; if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
} /* Just copy the handler. */
/* Copy first part of the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, p, f);
uasm_copy_handler(relocs, labels, tlb_handler, split, f); final_len = p - tlb_handler;
f += split - tlb_handler; } else {
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
if (ov) { const enum label_id ls = label_tlb_huge_update;
/* Insert branch. */ #else
uasm_l_split(&l, final_handler); const enum label_id ls = label_vmalloc;
uasm_il_b(&f, &r, label_split); #endif
if (uasm_insn_has_bdelay(relocs, split)) u32 *split;
uasm_i_nop(&f); int ov = 0;
else { int i;
uasm_copy_handler(relocs, labels,
split, split + 1, f); for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
uasm_move_labels(labels, f, f + 1, -1); ;
f++; BUG_ON(i == ARRAY_SIZE(labels));
split++; split = labels[i].addr;
/*
* See if we have overflown one way or the other.
*/
if (split > tlb_handler + MIPS64_REFILL_INSNS ||
split < p - MIPS64_REFILL_INSNS)
ov = 1;
if (ov) {
/*
* Split two instructions before the end. One
* for the branch and one for the instruction
* in the delay slot.
*/
split = tlb_handler + MIPS64_REFILL_INSNS - 2;
/*
* If the branch would fall in a delay slot,
* we must back up an additional instruction
* so that it is no longer in a delay slot.
*/
if (uasm_insn_has_bdelay(relocs, split - 1))
split--;
}
/* Copy first part of the handler. */
uasm_copy_handler(relocs, labels, tlb_handler, split, f);
f += split - tlb_handler;
if (ov) {
/* Insert branch. */
uasm_l_split(&l, final_handler);
uasm_il_b(&f, &r, label_split);
if (uasm_insn_has_bdelay(relocs, split))
uasm_i_nop(&f);
else {
uasm_copy_handler(relocs, labels,
split, split + 1, f);
uasm_move_labels(labels, f, f + 1, -1);
f++;
split++;
}
}
/* Copy the rest of the handler. */
uasm_copy_handler(relocs, labels, split, p, final_handler);
final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
(p - split);
} }
} }
break;
/* Copy the rest of the handler. */
uasm_copy_handler(relocs, labels, split, p, final_handler);
final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
(p - split);
} }
#endif /* CONFIG_64BIT */
uasm_resolve_relocs(relocs, labels); uasm_resolve_relocs(relocs, labels);
pr_debug("Wrote TLB refill handler (%u instructions).\n", pr_debug("Wrote TLB refill handler (%u instructions).\n",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册