提交 d9cdc901 编写于 作者: R Ralf Baechle

MIPS: cache: Provide cache flush operations for XFS

Until now flush_kernel_vmap_range() and invalidate_kernel_vmap_range() did
not exist on MIPS resulting in heavy cache corruption on XFS filesystems.

Left for the post-3.0 time: optimization and make this work with highmem,
too.  Since the combination of highmem + cache aliases atm doesn't work
this isn't a regression.
Signed-off-by: NRalf Baechle <ralf@linux-mips.org>
Patchwork: https://patchwork.linux-mips.org/patch/2505/
上级 2e5db86d
......@@ -114,4 +114,28 @@ unsigned long run_uncached(void *func);
extern void *kmap_coherent(struct page *page, unsigned long addr);
extern void kunmap_coherent(void);
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
{
BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
}
/*
* For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
* cache writeback and invalidate operation.
*/
extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
static inline void flush_kernel_vmap_range(void *vaddr, int size)
{
if (cpu_has_dc_aliases)
__flush_kernel_vmap_range((unsigned long) vaddr, size);
}
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
{
if (cpu_has_dc_aliases)
__flush_kernel_vmap_range((unsigned long) vaddr, size);
}
#endif /* _ASM_CACHEFLUSH_H */
......@@ -169,6 +169,10 @@ static void octeon_flush_cache_page(struct vm_area_struct *vma,
octeon_flush_icache_all_cores(vma);
}
static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
BUG();
}
/**
* Probe Octeon's caches
......@@ -273,6 +277,8 @@ void __cpuinit octeon_cache_init(void)
flush_icache_range = octeon_flush_icache_range;
local_flush_icache_range = local_octeon_flush_icache_range;
__flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
build_clear_page();
build_copy_page();
}
......
......@@ -299,6 +299,11 @@ static void r3k_flush_cache_sigtramp(unsigned long addr)
write_c0_status(flags);
}
static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
BUG();
}
static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
{
/* Catch bad driver code */
......@@ -323,6 +328,8 @@ void __cpuinit r3k_cache_init(void)
flush_icache_range = r3k_flush_icache_range;
local_flush_icache_range = r3k_flush_icache_range;
__flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
flush_cache_sigtramp = r3k_flush_cache_sigtramp;
local_flush_data_cache_page = local_r3k_flush_data_cache_page;
flush_data_cache_page = r3k_flush_data_cache_page;
......
......@@ -722,6 +722,39 @@ static void r4k_flush_icache_all(void)
r4k_blast_icache();
}
struct flush_kernel_vmap_range_args {
unsigned long vaddr;
int size;
};
static inline void local_r4k_flush_kernel_vmap_range(void *args)
{
struct flush_kernel_vmap_range_args *vmra = args;
unsigned long vaddr = vmra->vaddr;
int size = vmra->size;
/*
* Aliases only affect the primary caches so don't bother with
* S-caches or T-caches.
*/
if (cpu_has_safe_index_cacheops && size >= dcache_size)
r4k_blast_dcache();
else {
R4600_HIT_CACHEOP_WAR_IMPL;
blast_dcache_range(vaddr, vaddr + size);
}
}
static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
struct flush_kernel_vmap_range_args args;
args.vaddr = (unsigned long) vaddr;
args.size = size;
r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
}
static inline void rm7k_erratum31(void)
{
const unsigned long ic_lsize = 32;
......@@ -1403,6 +1436,8 @@ void __cpuinit r4k_cache_init(void)
flush_cache_page = r4k_flush_cache_page;
flush_cache_range = r4k_flush_cache_range;
__flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
flush_cache_sigtramp = r4k_flush_cache_sigtramp;
flush_icache_all = r4k_flush_icache_all;
local_flush_data_cache_page = local_r4k_flush_data_cache_page;
......
......@@ -253,6 +253,11 @@ static void tx39_flush_icache_range(unsigned long start, unsigned long end)
}
}
static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
{
BUG();
}
static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
unsigned long end;
......@@ -394,6 +399,8 @@ void __cpuinit tx39_cache_init(void)
flush_icache_range = tx39_flush_icache_range;
local_flush_icache_range = tx39_flush_icache_range;
__flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
flush_cache_sigtramp = tx39_flush_cache_sigtramp;
local_flush_data_cache_page = local_tx39_flush_data_cache_page;
flush_data_cache_page = tx39_flush_data_cache_page;
......
......@@ -35,6 +35,11 @@ void (*local_flush_icache_range)(unsigned long start, unsigned long end);
void (*__flush_cache_vmap)(void);
void (*__flush_cache_vunmap)(void);
void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
/* MIPS specific cache operations */
void (*flush_cache_sigtramp)(unsigned long addr);
void (*local_flush_data_cache_page)(void * addr);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册