提交 37443ef3 编写于 作者: P Paul Mundt

sh: Migrate SH-4 cacheflush ops to function pointers.

This paves the way for allowing individual CPUs to overload the
individual flushing routines that they care about without having to
depend on weak aliases. SH-4 is converted over initially, as it wires
up pretty much everything. The majority of the other CPUs will simply use
the default no-op implementation with their own region flushers wired up.
Signed-off-by: NPaul Mundt <lethal@linux-sh.org>
上级 916e9783
#ifndef __ASM_SH_CACHEFLUSH_H #ifndef __ASM_SH_CACHEFLUSH_H
#define __ASM_SH_CACHEFLUSH_H #define __ASM_SH_CACHEFLUSH_H
#include <linux/mm.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifdef CONFIG_CACHE_OFF #include <linux/mm.h>
/*
* Nothing to do when the cache is disabled, initial flush and explicit
* disabling is handled at CPU init time.
*
* See arch/sh/kernel/cpu/init.c:cache_init().
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_cache_sigtramp(vaddr) do { } while (0)
#define __flush_wback_region(start, size) do { (void)(start); } while (0)
#define __flush_purge_region(start, size) do { (void)(start); } while (0)
#define __flush_invalidate_region(start, size) do { (void)(start); } while (0)
#else
#include <cpu/cacheflush.h> #include <cpu/cacheflush.h>
/*
* Consistent DMA requires that the __flush_xxx() primitives must be set
* for any of the enabled non-coherent caches (most of the UP CPUs),
* regardless of PIPT or VIPT cache configurations.
*/
/* Flush (write-back only) a region (smaller than a page) */
extern void __flush_wback_region(void *start, int size);
/* Flush (write-back & invalidate) a region (smaller than a page) */
extern void __flush_purge_region(void *start, int size);
/* Flush (invalidate only) a region (smaller than a page) */
extern void __flush_invalidate_region(void *start, int size);
#endif
#define ARCH_HAS_FLUSH_ANON_PAGE #define ARCH_HAS_FLUSH_ANON_PAGE
extern void __flush_anon_page(struct page *page, unsigned long); extern void __flush_anon_page(struct page *page, unsigned long);
......
/* /*
* include/asm-sh/cpu-sh2/cacheflush.h
*
* Copyright (C) 2003 Paul Mundt * Copyright (C) 2003 Paul Mundt
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
*/ */
#ifndef __ASM_CPU_SH2_CACHEFLUSH_H #ifndef __ASM_CPU_SH_CACHEFLUSH_H
#define __ASM_CPU_SH2_CACHEFLUSH_H #define __ASM_CPU_SH_CACHEFLUSH_H
/* /*
* Cache flushing: * Cache flushing:
...@@ -22,18 +20,23 @@ ...@@ -22,18 +20,23 @@
* - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
* - flush_icache_range(start, end) flushes(invalidates) a range for icache * - flush_icache_range(start, end) flushes(invalidates) a range for icache
* - flush_icache_page(vma, pg) flushes(invalidates) a page for icache * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
* * - flush_cache_sigtramp(vaddr) flushes the signal trampoline
* Caches are indexed (effectively) by physical address on SH-2, so
* we don't need them.
*/ */
#define flush_cache_all() do { } while (0) extern void (*flush_cache_all)(void);
#define flush_cache_mm(mm) do { } while (0) extern void (*flush_cache_mm)(struct mm_struct *mm);
#define flush_cache_dup_mm(mm) do { } while (0) extern void (*flush_cache_dup_mm)(struct mm_struct *mm);
#define flush_cache_range(vma, start, end) do { } while (0) extern void (*flush_cache_page)(struct vm_area_struct *vma,
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) unsigned long addr, unsigned long pfn);
#define flush_dcache_page(page) do { } while (0) extern void (*flush_cache_range)(struct vm_area_struct *vma,
#define flush_icache_range(start, end) do { } while (0) unsigned long start, unsigned long end);
#define flush_icache_page(vma,pg) do { } while (0) extern void (*flush_dcache_page)(struct page *page);
#define flush_cache_sigtramp(vaddr) do { } while (0) extern void (*flush_icache_range)(unsigned long start, unsigned long end);
extern void (*flush_icache_page)(struct vm_area_struct *vma,
struct page *page);
extern void (*flush_cache_sigtramp)(unsigned long address);
extern void (*__flush_wback_region)(void *start, int size);
extern void (*__flush_purge_region)(void *start, int size);
extern void (*__flush_invalidate_region)(void *start, int size);
#endif /* __ASM_CPU_SH2_CACHEFLUSH_H */ #endif /* __ASM_CPU_SH_CACHEFLUSH_H */
/*
* include/asm-sh/cpu-sh4/cacheflush.h
*
* Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2003 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#ifndef __ASM_CPU_SH4_CACHEFLUSH_H
#define __ASM_CPU_SH4_CACHEFLUSH_H
/*
* Caches are broken on SH-4 (unless we use write-through
* caching; in which case they're only semi-broken),
* so we need them.
*/
void flush_cache_all(void);
void flush_cache_mm(struct mm_struct *mm);
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
void flush_dcache_page(struct page *pg);
void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_page(vma,pg) do { } while (0)
#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
...@@ -3,10 +3,6 @@ ...@@ -3,10 +3,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct vm_area_struct;
struct page;
struct mm_struct;
extern void flush_cache_all(void); extern void flush_cache_all(void);
extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_sigtramp(unsigned long vaddr); extern void flush_cache_sigtramp(unsigned long vaddr);
...@@ -16,10 +12,14 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, uns ...@@ -16,10 +12,14 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, uns
extern void flush_dcache_page(struct page *pg); extern void flush_dcache_page(struct page *pg);
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
/* XXX .. */
extern void (*__flush_wback_region)(void *start, int size);
extern void (*__flush_purge_region)(void *start, int size);
extern void (*__flush_invalidate_region)(void *start, int size);
#define flush_cache_dup_mm(mm) flush_cache_mm(mm) #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
#define flush_icache_page(vma, page) do { } while (0) #define flush_icache_page(vma, page) do { } while (0)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_CPU_SH5_CACHEFLUSH_H */ #endif /* __ASM_SH_CPU_SH5_CACHEFLUSH_H */
...@@ -26,13 +26,6 @@ ...@@ -26,13 +26,6 @@
#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
#define MAX_ICACHE_PAGES 32 #define MAX_ICACHE_PAGES 32
static void __flush_dcache_segment_1way(unsigned long start,
unsigned long extent);
static void __flush_dcache_segment_2way(unsigned long start,
unsigned long extent);
static void __flush_dcache_segment_4way(unsigned long start,
unsigned long extent);
static void __flush_cache_4096(unsigned long addr, unsigned long phys, static void __flush_cache_4096(unsigned long addr, unsigned long phys,
unsigned long exec_offset); unsigned long exec_offset);
...@@ -44,39 +37,13 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys, ...@@ -44,39 +37,13 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
(void (*)(unsigned long, unsigned long))0xdeadbeef; (void (*)(unsigned long, unsigned long))0xdeadbeef;
/*
* SH-4 has virtually indexed and physically tagged cache.
*/
void __init sh4_cache_init(void)
{
printk("PVR=%08x CVR=%08x PRR=%08x\n",
ctrl_inl(CCN_PVR),
ctrl_inl(CCN_CVR),
ctrl_inl(CCN_PRR));
switch (boot_cpu_data.dcache.ways) {
case 1:
__flush_dcache_segment_fn = __flush_dcache_segment_1way;
break;
case 2:
__flush_dcache_segment_fn = __flush_dcache_segment_2way;
break;
case 4:
__flush_dcache_segment_fn = __flush_dcache_segment_4way;
break;
default:
panic("unknown number of cache ways\n");
break;
}
}
/* /*
* Write back the range of D-cache, and purge the I-cache. * Write back the range of D-cache, and purge the I-cache.
* *
* Called from kernel/module.c:sys_init_module and routine for a.out format, * Called from kernel/module.c:sys_init_module and routine for a.out format,
* signal handler code and kprobes code * signal handler code and kprobes code
*/ */
void flush_icache_range(unsigned long start, unsigned long end) static void sh4_flush_icache_range(unsigned long start, unsigned long end)
{ {
int icacheaddr; int icacheaddr;
unsigned long flags, v; unsigned long flags, v;
...@@ -137,7 +104,7 @@ static inline void flush_cache_4096(unsigned long start, ...@@ -137,7 +104,7 @@ static inline void flush_cache_4096(unsigned long start,
* Write back & invalidate the D-cache of the page. * Write back & invalidate the D-cache of the page.
* (To avoid "alias" issues) * (To avoid "alias" issues)
*/ */
void flush_dcache_page(struct page *page) static void sh4_flush_dcache_page(struct page *page)
{ {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
...@@ -188,7 +155,7 @@ static inline void flush_dcache_all(void) ...@@ -188,7 +155,7 @@ static inline void flush_dcache_all(void)
wmb(); wmb();
} }
void flush_cache_all(void) static void sh4_flush_cache_all(void)
{ {
flush_dcache_all(); flush_dcache_all();
flush_icache_all(); flush_icache_all();
...@@ -280,7 +247,7 @@ static void __flush_cache_mm(struct mm_struct *mm, unsigned long start, ...@@ -280,7 +247,7 @@ static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
* *
* Caller takes mm->mmap_sem. * Caller takes mm->mmap_sem.
*/ */
void flush_cache_mm(struct mm_struct *mm) static void sh4_flush_cache_mm(struct mm_struct *mm)
{ {
if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
return; return;
...@@ -320,8 +287,8 @@ void flush_cache_mm(struct mm_struct *mm) ...@@ -320,8 +287,8 @@ void flush_cache_mm(struct mm_struct *mm)
* ADDR: Virtual Address (U0 address) * ADDR: Virtual Address (U0 address)
* PFN: Physical page number * PFN: Physical page number
*/ */
void flush_cache_page(struct vm_area_struct *vma, unsigned long address, static void sh4_flush_cache_page(struct vm_area_struct *vma,
unsigned long pfn) unsigned long address, unsigned long pfn)
{ {
unsigned long phys = pfn << PAGE_SHIFT; unsigned long phys = pfn << PAGE_SHIFT;
unsigned int alias_mask; unsigned int alias_mask;
...@@ -368,8 +335,8 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, ...@@ -368,8 +335,8 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
* Flushing the cache lines for U0 only isn't enough. * Flushing the cache lines for U0 only isn't enough.
* We need to flush for P1 too, which may contain aliases. * We need to flush for P1 too, which may contain aliases.
*/ */
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, static void sh4_flush_cache_range(struct vm_area_struct *vma,
unsigned long end) unsigned long start, unsigned long end)
{ {
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
return; return;
...@@ -668,3 +635,41 @@ static void __flush_dcache_segment_4way(unsigned long start, ...@@ -668,3 +635,41 @@ static void __flush_dcache_segment_4way(unsigned long start,
a3 += linesz; a3 += linesz;
} while (a0 < a0e); } while (a0 < a0e);
} }
extern void __weak sh4__flush_region_init(void);
/*
* SH-4 has virtually indexed and physically tagged cache.
*/
void __init sh4_cache_init(void)
{
printk("PVR=%08x CVR=%08x PRR=%08x\n",
ctrl_inl(CCN_PVR),
ctrl_inl(CCN_CVR),
ctrl_inl(CCN_PRR));
switch (boot_cpu_data.dcache.ways) {
case 1:
__flush_dcache_segment_fn = __flush_dcache_segment_1way;
break;
case 2:
__flush_dcache_segment_fn = __flush_dcache_segment_2way;
break;
case 4:
__flush_dcache_segment_fn = __flush_dcache_segment_4way;
break;
default:
panic("unknown number of cache ways\n");
break;
}
flush_icache_range = sh4_flush_icache_range;
flush_dcache_page = sh4_flush_dcache_page;
flush_cache_all = sh4_flush_cache_all;
flush_cache_mm = sh4_flush_cache_mm;
flush_cache_dup_mm = sh4_flush_cache_mm;
flush_cache_page = sh4_flush_cache_page;
flush_cache_range = sh4_flush_cache_range;
sh4__flush_region_init();
}
...@@ -20,6 +20,8 @@ ...@@ -20,6 +20,8 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
extern void __weak sh4__flush_region_init(void);
/* Wired TLB entry for the D-cache */ /* Wired TLB entry for the D-cache */
static unsigned long long dtlb_cache_slot; static unsigned long long dtlb_cache_slot;
...@@ -27,6 +29,8 @@ void __init cpu_cache_init(void) ...@@ -27,6 +29,8 @@ void __init cpu_cache_init(void)
{ {
/* Reserve a slot for dcache colouring in the DTLB */ /* Reserve a slot for dcache colouring in the DTLB */
dtlb_cache_slot = sh64_get_wired_dtlb_entry(); dtlb_cache_slot = sh64_get_wired_dtlb_entry();
sh4__flush_region_init();
} }
void __init kmap_coherent_init(void) void __init kmap_coherent_init(void)
......
...@@ -15,6 +15,62 @@ ...@@ -15,6 +15,62 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
void (*flush_cache_all)(void);
void (*flush_cache_mm)(struct mm_struct *mm);
void (*flush_cache_dup_mm)(struct mm_struct *mm);
void (*flush_cache_page)(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn);
void (*flush_cache_range)(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
void (*flush_dcache_page)(struct page *page);
void (*flush_icache_range)(unsigned long start, unsigned long end);
void (*flush_icache_page)(struct vm_area_struct *vma,
struct page *page);
void (*flush_cache_sigtramp)(unsigned long address);
void (*__flush_wback_region)(void *start, int size);
void (*__flush_purge_region)(void *start, int size);
void (*__flush_invalidate_region)(void *start, int size);
static inline void noop_flush_cache_all(void)
{
}
static inline void noop_flush_cache_mm(struct mm_struct *mm)
{
}
static inline void noop_flush_cache_page(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn)
{
}
static inline void noop_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
}
static inline void noop_flush_dcache_page(struct page *page)
{
}
static inline void noop_flush_icache_range(unsigned long start,
unsigned long end)
{
}
static inline void noop_flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
static inline void noop_flush_cache_sigtramp(unsigned long address)
{
}
static inline void noop__flush_region(void *start, int size)
{
}
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src, unsigned long vaddr, void *dst, const void *src,
unsigned long len) unsigned long len)
...@@ -174,6 +230,20 @@ void __init cpu_cache_init(void) ...@@ -174,6 +230,20 @@ void __init cpu_cache_init(void)
compute_alias(&boot_cpu_data.dcache); compute_alias(&boot_cpu_data.dcache);
compute_alias(&boot_cpu_data.scache); compute_alias(&boot_cpu_data.scache);
flush_cache_all = noop_flush_cache_all;
flush_cache_mm = noop_flush_cache_mm;
flush_cache_dup_mm = noop_flush_cache_mm;
flush_cache_page = noop_flush_cache_page;
flush_cache_range = noop_flush_cache_range;
flush_dcache_page = noop_flush_dcache_page;
flush_icache_range = noop_flush_icache_range;
flush_icache_page = noop_flush_icache_page;
flush_cache_sigtramp = noop_flush_cache_sigtramp;
__flush_wback_region = noop__flush_region;
__flush_purge_region = noop__flush_region;
__flush_invalidate_region = noop__flush_region;
if ((boot_cpu_data.family == CPU_FAMILY_SH4) || if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
(boot_cpu_data.family == CPU_FAMILY_SH4A) || (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
(boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* START: Virtual Address (U0, P1, or P3) * START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region. * SIZE: Size of the region.
*/ */
void __weak __flush_wback_region(void *start, int size) static void sh4__flush_wback_region(void *start, int size)
{ {
reg_size_t aligned_start, v, cnt, end; reg_size_t aligned_start, v, cnt, end;
...@@ -51,7 +51,7 @@ void __weak __flush_wback_region(void *start, int size) ...@@ -51,7 +51,7 @@ void __weak __flush_wback_region(void *start, int size)
* START: Virtual Address (U0, P1, or P3) * START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region. * SIZE: Size of the region.
*/ */
void __weak __flush_purge_region(void *start, int size) static void sh4__flush_purge_region(void *start, int size)
{ {
reg_size_t aligned_start, v, cnt, end; reg_size_t aligned_start, v, cnt, end;
...@@ -90,7 +90,7 @@ void __weak __flush_purge_region(void *start, int size) ...@@ -90,7 +90,7 @@ void __weak __flush_purge_region(void *start, int size)
/* /*
* No write back please * No write back please
*/ */
void __weak __flush_invalidate_region(void *start, int size) static void sh4__flush_invalidate_region(void *start, int size)
{ {
reg_size_t aligned_start, v, cnt, end; reg_size_t aligned_start, v, cnt, end;
...@@ -126,3 +126,10 @@ void __weak __flush_invalidate_region(void *start, int size) ...@@ -126,3 +126,10 @@ void __weak __flush_invalidate_region(void *start, int size)
cnt--; cnt--;
} }
} }
void __init sh4__flush_region_init(void)
{
__flush_wback_region = sh4__flush_wback_region;
__flush_invalidate_region = sh4__flush_invalidate_region;
__flush_purge_region = sh4__flush_purge_region;
}
...@@ -210,6 +210,9 @@ void __init mem_init(void) ...@@ -210,6 +210,9 @@ void __init mem_init(void)
high_memory = node_high_memory; high_memory = node_high_memory;
} }
/* Set this up early, so we can take care of the zero page */
cpu_cache_init();
/* clear the zero-page */ /* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE); memset(empty_zero_page, 0, PAGE_SIZE);
__flush_wback_region(empty_zero_page, PAGE_SIZE); __flush_wback_region(empty_zero_page, PAGE_SIZE);
...@@ -230,8 +233,6 @@ void __init mem_init(void) ...@@ -230,8 +233,6 @@ void __init mem_init(void)
datasize >> 10, datasize >> 10,
initsize >> 10); initsize >> 10);
cpu_cache_init();
/* Initialize the vDSO */ /* Initialize the vDSO */
vsyscall_init(); vsyscall_init();
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册