提交 07ffee59 编写于 作者: G Greg Ungerer

m68knommu: create optimal separate instruction and data cache for ColdFire

Create separate functions to deal with instruction and data cache flushing.
This way we can optimize them for the vairous cache types and arrangements
used across the ColdFire family.

For example the unified caches in the version 3 cores means we don't
need to flush the instruction cache. For the version 2 cores that do
not do data cacheing (or where we choose instruction cache only) we
don't need to do any data flushing.
Signed-off-by: NGreg Ungerer <gerg@uclinux.org>
上级 4a5bae41
......@@ -12,14 +12,12 @@
#define flush_cache_dup_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#ifndef flush_dcache_range
#define flush_dcache_range(start,len) __flush_cache_all()
#endif
#define flush_dcache_range(start, len) __flush_dcache_all()
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_range(start,len) __flush_cache_all()
#define flush_icache_range(start, len) __flush_icache_all()
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#define flush_cache_vmap(start, end) do { } while (0)
......@@ -46,4 +44,36 @@ static inline void __flush_cache_all(void)
#endif
}
/*
* Some ColdFire parts implement separate instruction and data caches,
* on those we should just flush the appropriate cache. If we don't need
* to do any specific flushing then this will be optimized away.
*/
static inline void __flush_icache_all(void)
{
#ifdef CACHE_INVALIDATEI
__asm__ __volatile__ (
"movel %0, %%d0\n\t"
"movec %%d0, %%CACR\n\t"
"nop\n\t"
: : "i" (CACHE_INVALIDATEI) : "d0" );
#endif
}
static inline void __flush_dcache_all(void)
{
#ifdef CACHE_PUSH
mcf_cache_push();
#endif
#ifdef CACHE_INVALIDATED
__asm__ __volatile__ (
"movel %0, %%d0\n\t"
"movec %%d0, %%CACR\n\t"
"nop\n\t"
: : "i" (CACHE_INVALIDATED) : "d0" );
#else
/* Flush the wrtite buffer */
__asm__ __volatile__ ( "nop" );
#endif
}
#endif /* _M68KNOMMU_CACHEFLUSH_H */
......@@ -59,22 +59,31 @@
* that as on.
*/
#if defined(CONFIG_CACHE_I)
#define CACHE_TYPE CACR_DISD
#define CACHE_TYPE (CACR_DISD + CACR_EUSP)
#define CACHE_INVTYPEI 0
#elif defined(CONFIG_CACHE_D)
#define CACHE_TYPE CACR_DISI
#define CACHE_TYPE (CACR_DISI + CACR_EUSP)
#define CACHE_INVTYPED 0
#elif defined(CONFIG_CACHE_BOTH)
#define CACHE_TYPE CACR_EUSP
#define CACHE_INVTYPEI CACR_INVI
#define CACHE_INVTYPED CACR_INVD
#else
#define CACHE_TYPE
/* This is the instruction cache only devices (no split cache, no eusp) */
#define CACHE_TYPE 0
#define CACHE_INVTYPEI 0
#endif
#if defined(CONFIG_HAVE_CACHE_SPLIT)
#define CACHE_INIT (CACR_CINV + CACHE_TYPE + CACR_EUSP)
#define CACHE_MODE (CACR_CENB + CACHE_TYPE + CACR_DCM + CACR_EUSP)
#else
#define CACHE_INIT (CACR_CINV)
#define CACHE_MODE (CACR_CENB + CACR_DCM)
#endif
#define CACHE_INIT (CACR_CINV + CACHE_TYPE)
#define CACHE_MODE (CACR_CENB + CACHE_TYPE + CACR_DCM)
#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINV)
#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINV)
#if defined(CACHE_INVTYPEI)
#define CACHE_INVALIDATEI (CACHE_MODE + CACR_CINV + CACHE_INVTYPEI)
#endif
#if defined(CACHE_INVTYPED)
#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINV + CACHE_INVTYPED)
#endif
#define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
(0x000f0000) + \
......
......@@ -48,6 +48,22 @@
#define ACR_CM_IMPRE 0x00000060 /* Cache inhibited, imprecise */
#define ACR_WPROTECT 0x00000004 /* Write protect region */
/*
* Define the cache type and arrangement (needed for pushes).
*/
#if defined(CONFIG_M5307)
#define CACHE_SIZE 0x2000 /* 8k of unified cache */
#define ICACHE_SIZE CACHE_SIZE
#define DCACHE_SIZE CACHE_SIZE
#elif defined(CONFIG_M532x)
#define CACHE_SIZE 0x4000 /* 32k of unified cache */
#define ICACHE_SIZE CACHE_SIZE
#define DCACHE_SIZE CACHE_SIZE
#endif
#define CACHE_LINE_SIZE 16 /* 16 byte line size */
#define CACHE_WAYS 4 /* 4 ways - set associative */
/*
* Set the cache controller settings we will use. This default in the
* CACR is cache inhibited, we use the ACR register to set cacheing
......@@ -55,6 +71,7 @@
*/
#if defined(CONFIG_CACHE_COPYBACK)
#define CACHE_TYPE ACR_CM_CB
#define CACHE_PUSH
#else
#define CACHE_TYPE ACR_CM_WT
#endif
......@@ -65,7 +82,15 @@
#define CACHE_MODE (CACR_EC + CACR_ESB + CACR_DCM_PRE + CACR_EUSP)
#endif
#define CACHE_INIT CACR_CINVA
/*
* Unified cache means we will never need to flush for coherency of
* instruction fetch. We will need to flush to maintain memory/DMA
* coherency though in all cases. And for copyback caches we will need
* to push cached data as well.
*/
#define CACHE_INIT CACR_CINVA
#define CACHE_INVALIDATE CACR_CINVA
#define CACHE_INVALIDATED CACR_CINVA
#define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
(0x000f0000) + \
......
......@@ -81,15 +81,14 @@
#define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY)
#define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
#define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
#define CACHE_INVALIDATE (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
#define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA)
#define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA)
#define ACR0_MODE (0x000f0000+DATA_CACHE_MODE)
#define ACR1_MODE 0
#define ACR2_MODE (0x000f0000+INSN_CACHE_MODE)
#define ACR3_MODE 0
#if ((DATA_CACHE_MODE & ACR_CM) == ACR_CM_WT)
#define flush_dcache_range(a, l) do { asm("nop"); } while (0)
#endif
#if ((DATA_CACHE_MODE & ACR_CM) == ACR_CM_CP)
/* Copyback cache mode must push dirty cache lines first */
#define CACHE_PUSH
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册