提交 1bb77267 编写于 作者: N Nicolas Pitre

[ARM] Feroceon: add highmem support to L2 cache handling code

The choice is between looping over the physical range and performing
single cache line operations, or to map highmem pages somewhere, as
cache range ops are possible only on virtual addresses.

Because L2 range ops are much faster, we go with the later by factoring
the physical-to-virtual address conversion and use a fixmap entry for it
in the HIGHMEM case.

Possible future optimizations to avoid the pte setup cost:

 - do the pte setup for highmem pages only

 - determine a threshold for doing a line-by-line processing on physical
   addresses when the range is small
Signed-off-by: NNicolas Pitre <nico@marvell.com>
上级 58edb515
...@@ -18,6 +18,7 @@ enum km_type { ...@@ -18,6 +18,7 @@ enum km_type {
KM_IRQ1, KM_IRQ1,
KM_SOFTIRQ0, KM_SOFTIRQ0,
KM_SOFTIRQ1, KM_SOFTIRQ1,
KM_L2_CACHE,
KM_TYPE_NR KM_TYPE_NR
}; };
......
...@@ -14,8 +14,12 @@ ...@@ -14,8 +14,12 @@
#include <linux/init.h> #include <linux/init.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <plat/cache-feroceon-l2.h> #include <plat/cache-feroceon-l2.h>
#include "mm.h"
/* /*
* Low-level cache maintenance operations. * Low-level cache maintenance operations.
...@@ -34,14 +38,36 @@ ...@@ -34,14 +38,36 @@
* The range operations require two successive cp15 writes, in * The range operations require two successive cp15 writes, in
* between which we don't want to be preempted. * between which we don't want to be preempted.
*/ */
static inline unsigned long l2_start_va(unsigned long paddr)
{
#ifdef CONFIG_HIGHMEM
/*
* Let's do our own fixmap stuff in a minimal way here.
* Because range ops can't be done on physical addresses,
* we simply install a virtual mapping for it only for the
* TLB lookup to occur, hence no need to flush the untouched
* memory mapping. This is protected with the disabling of
* interrupts by the caller.
*/
unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
local_flush_tlb_kernel_page(vaddr);
return vaddr + (paddr & ~PAGE_MASK);
#else
return __phys_to_virt(paddr);
#endif
}
static inline void l2_clean_pa(unsigned long addr) static inline void l2_clean_pa(unsigned long addr)
{ {
__asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
} }
static inline void l2_clean_mva_range(unsigned long start, unsigned long end) static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
{ {
unsigned long flags; unsigned long va_start, va_end, flags;
/* /*
* Make sure 'start' and 'end' reference the same page, as * Make sure 'start' and 'end' reference the same page, as
...@@ -51,17 +77,14 @@ static inline void l2_clean_mva_range(unsigned long start, unsigned long end) ...@@ -51,17 +77,14 @@ static inline void l2_clean_mva_range(unsigned long start, unsigned long end)
BUG_ON((start ^ end) >> PAGE_SHIFT); BUG_ON((start ^ end) >> PAGE_SHIFT);
raw_local_irq_save(flags); raw_local_irq_save(flags);
va_start = l2_start_va(start);
va_end = va_start + (end - start);
__asm__("mcr p15, 1, %0, c15, c9, 4\n\t" __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
"mcr p15, 1, %1, c15, c9, 5" "mcr p15, 1, %1, c15, c9, 5"
: : "r" (start), "r" (end)); : : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
{
l2_clean_mva_range(__phys_to_virt(start), __phys_to_virt(end));
}
static inline void l2_clean_inv_pa(unsigned long addr) static inline void l2_clean_inv_pa(unsigned long addr)
{ {
__asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr)); __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
...@@ -72,9 +95,9 @@ static inline void l2_inv_pa(unsigned long addr) ...@@ -72,9 +95,9 @@ static inline void l2_inv_pa(unsigned long addr)
__asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr)); __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
} }
static inline void l2_inv_mva_range(unsigned long start, unsigned long end) static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
{ {
unsigned long flags; unsigned long va_start, va_end, flags;
/* /*
* Make sure 'start' and 'end' reference the same page, as * Make sure 'start' and 'end' reference the same page, as
...@@ -84,17 +107,14 @@ static inline void l2_inv_mva_range(unsigned long start, unsigned long end) ...@@ -84,17 +107,14 @@ static inline void l2_inv_mva_range(unsigned long start, unsigned long end)
BUG_ON((start ^ end) >> PAGE_SHIFT); BUG_ON((start ^ end) >> PAGE_SHIFT);
raw_local_irq_save(flags); raw_local_irq_save(flags);
va_start = l2_start_va(start);
va_end = va_start + (end - start);
__asm__("mcr p15, 1, %0, c15, c11, 4\n\t" __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
"mcr p15, 1, %1, c15, c11, 5" "mcr p15, 1, %1, c15, c11, 5"
: : "r" (start), "r" (end)); : : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
{
l2_inv_mva_range(__phys_to_virt(start), __phys_to_virt(end));
}
/* /*
* Linux primitives. * Linux primitives.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册