提交 23759dc6 编写于 作者: L Lennert Buytenhek 提交者: Russell King

[ARM] 3439/2: xsc3: add I/O coherency support

Patch from Lennert Buytenhek

This patch adds support for the I/O coherent cache available on the
xsc3.  The approach is to provide a simple API to determine whether the
chipset supports coherency by calling arch_is_coherent() and then
setting the appropriate system memory PTE and PMD bits.  In addition,
we call this API on dma_alloc_coherent() and dma_map_single() calls.
A generic version exists that will compile out all the coherency-related
code that is not needed on the majority of ARM systems.

Note that we do not check for coherency in the dma_alloc_writecombine()
function as that still requires a special PTE setting.  We also don't
touch dma_mmap_coherent() as that is a special ARM-only API that is by
definition only used on non-coherent system.
Signed-off-by: NDeepak Saxena <dsaxena@plexity.net>
Signed-off-by: NLennert Buytenhek <buytenh@wantstofly.org>
Signed-off-by: NRussell King <rmk+kernel@arm.linux.org.uk>
上级 d3f4c571
...@@ -252,6 +252,9 @@ static void __init dump_cpu_info(int cpu) ...@@ -252,6 +252,9 @@ static void __init dump_cpu_info(int cpu)
dump_cache("cache", cpu, CACHE_ISIZE(info)); dump_cache("cache", cpu, CACHE_ISIZE(info));
} }
} }
if (arch_is_coherent())
printk("Cache coherency enabled\n");
} }
int cpu_architecture(void) int cpu_architecture(void)
......
...@@ -219,6 +219,12 @@ static void __init ixp23xx_pci_common_init(void) ...@@ -219,6 +219,12 @@ static void __init ixp23xx_pci_common_init(void)
*IXP23XX_PCI_CPP_ADDR_BITS &= ~(1 << 1); *IXP23XX_PCI_CPP_ADDR_BITS &= ~(1 << 1);
} else { } else {
*IXP23XX_PCI_CPP_ADDR_BITS |= (1 << 1); *IXP23XX_PCI_CPP_ADDR_BITS |= (1 << 1);
/*
* Enable coherency on A2 silicon.
*/
if (arch_is_coherent())
*IXP23XX_CPP2XSI_CURR_XFER_REG3 &= ~IXP23XX_CPP2XSI_COH_OFF;
} }
} }
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/memory.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/sizes.h> #include <asm/sizes.h>
...@@ -272,6 +273,17 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, ...@@ -272,6 +273,17 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
void * void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
{ {
if (arch_is_coherent()) {
void *virt;
virt = kmalloc(size, gfp);
if (!virt)
return NULL;
*handle = virt_to_dma(dev, virt);
return virt;
}
return __dma_alloc(dev, size, handle, gfp, return __dma_alloc(dev, size, handle, gfp,
pgprot_noncached(pgprot_kernel)); pgprot_noncached(pgprot_kernel));
} }
...@@ -350,6 +362,11 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr ...@@ -350,6 +362,11 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
if (arch_is_coherent()) {
kfree(cpu_addr);
return;
}
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
spin_lock_irqsave(&consistent_lock, flags); spin_lock_irqsave(&consistent_lock, flags);
......
...@@ -388,6 +388,17 @@ void __init build_mem_type_table(void) ...@@ -388,6 +388,17 @@ void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy]; cp = &cache_policies[cachepolicy];
kern_pgprot = user_pgprot = cp->pte; kern_pgprot = user_pgprot = cp->pte;
/*
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
if (arch_is_coherent()) {
if (cpu_is_xsc3()) {
mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
mem_types[MT_MEMORY].prot_pte |= L_PTE_COHERENT;
}
}
/* /*
* ARMv6 and above have extended page tables. * ARMv6 and above have extended page tables.
*/ */
......
...@@ -371,7 +371,7 @@ ENTRY(cpu_xsc3_switch_mm) ...@@ -371,7 +371,7 @@ ENTRY(cpu_xsc3_switch_mm)
ENTRY(cpu_xsc3_set_pte) ENTRY(cpu_xsc3_set_pte)
str r1, [r0], #-2048 @ linux version str r1, [r0], #-2048 @ linux version
bic r2, r1, #0xff0 bic r2, r1, #0xdf0 @ Keep C, B, coherency bits
orr r2, r2, #PTE_TYPE_EXT @ extended page orr r2, r2, #PTE_TYPE_EXT @ extended page
eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
* to an address that the kernel can use. * to an address that the kernel can use.
*/ */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/mach-types.h>
#define __virt_to_bus(v) \ #define __virt_to_bus(v) \
({ unsigned int ret; \ ({ unsigned int ret; \
...@@ -40,6 +41,22 @@ ...@@ -40,6 +41,22 @@
data = *((volatile int *)IXP23XX_PCI_SDRAM_BAR); \ data = *((volatile int *)IXP23XX_PCI_SDRAM_BAR); \
__phys_to_virt((((b - (data & 0xfffffff0)) + 0x00000000))); }) __phys_to_virt((((b - (data & 0xfffffff0)) + 0x00000000))); })
/*
* Coherency support. Only supported on A2 CPUs or on A1
* systems that have the cache coherency workaround.
*/
static inline int __ixp23xx_arch_is_coherent(void)
{
extern unsigned int processor_id;
if (((processor_id & 15) >= 2) || machine_is_roadrunner())
return 1;
return 0;
}
#define arch_is_coherent() __ixp23xx_arch_is_coherent()
#endif #endif
......
...@@ -47,7 +47,7 @@ static inline int dma_get_cache_alignment(void) ...@@ -47,7 +47,7 @@ static inline int dma_get_cache_alignment(void)
static inline int dma_is_consistent(dma_addr_t handle) static inline int dma_is_consistent(dma_addr_t handle)
{ {
return 0; return !!arch_is_coherent();
} }
/* /*
...@@ -145,7 +145,9 @@ static inline dma_addr_t ...@@ -145,7 +145,9 @@ static inline dma_addr_t
dma_map_single(struct device *dev, void *cpu_addr, size_t size, dma_map_single(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
consistent_sync(cpu_addr, size, dir); if (!arch_is_coherent())
consistent_sync(cpu_addr, size, dir);
return virt_to_dma(dev, (unsigned long)cpu_addr); return virt_to_dma(dev, (unsigned long)cpu_addr);
} }
#else #else
...@@ -255,7 +257,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -255,7 +257,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
sg->dma_address = page_to_dma(dev, sg->page) + sg->offset; sg->dma_address = page_to_dma(dev, sg->page) + sg->offset;
virt = page_address(sg->page) + sg->offset; virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir);
if (!arch_is_coherent())
consistent_sync(virt, sg->length, dir);
} }
return nents; return nents;
...@@ -310,14 +314,16 @@ static inline void ...@@ -310,14 +314,16 @@ static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
consistent_sync((void *)dma_to_virt(dev, handle), size, dir); if (!arch_is_coherent())
consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
} }
static inline void static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
consistent_sync((void *)dma_to_virt(dev, handle), size, dir); if (!arch_is_coherent())
consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
} }
#else #else
extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction); extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
...@@ -347,7 +353,8 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, ...@@ -347,7 +353,8 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset; char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir); if (!arch_is_coherent())
consistent_sync(virt, sg->length, dir);
} }
} }
...@@ -359,7 +366,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, ...@@ -359,7 +366,8 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
for (i = 0; i < nents; i++, sg++) { for (i = 0; i < nents; i++, sg++) {
char *virt = page_address(sg->page) + sg->offset; char *virt = page_address(sg->page) + sg->offset;
consistent_sync(virt, sg->length, dir); if (!arch_is_coherent())
consistent_sync(virt, sg->length, dir);
} }
} }
#else #else
......
...@@ -234,6 +234,14 @@ static inline __deprecated void *bus_to_virt(unsigned long x) ...@@ -234,6 +234,14 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr)) #define virt_to_dma(dev, addr) (__arch_virt_to_dma(dev, addr))
#endif #endif
/*
* Optional coherency support. Currently used only by selected
* Intel XSC3-based systems.
*/
#ifndef arch_is_coherent
#define arch_is_coherent() 0
#endif
#endif #endif
#include <asm-generic/memory_model.h> #include <asm-generic/memory_model.h>
......
...@@ -73,6 +73,7 @@ ...@@ -73,6 +73,7 @@
#define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0) #define PTE_EXT_AP_URW_SRW (PTE_EXT_AP1|PTE_EXT_AP0)
#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */ #define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
#define PTE_EXT_APX (1 << 9) /* v6 */ #define PTE_EXT_APX (1 << 9) /* v6 */
#define PTE_EXT_COHERENT (1 << 9) /* XScale3 */
#define PTE_EXT_SHARED (1 << 10) /* v6 */ #define PTE_EXT_SHARED (1 << 10) /* v6 */
#define PTE_EXT_NG (1 << 11) /* v6 */ #define PTE_EXT_NG (1 << 11) /* v6 */
......
...@@ -156,6 +156,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); ...@@ -156,6 +156,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define L_PTE_WRITE (1 << 5) #define L_PTE_WRITE (1 << 5)
#define L_PTE_EXEC (1 << 6) #define L_PTE_EXEC (1 << 6)
#define L_PTE_DIRTY (1 << 7) #define L_PTE_DIRTY (1 << 7)
#define L_PTE_COHERENT (1 << 9) /* I/O coherent (xsc3) */
#define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */ #define L_PTE_SHARED (1 << 10) /* shared between CPUs (v6) */
#define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */ #define L_PTE_ASID (1 << 11) /* non-global (use ASID, v6) */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册