提交 a6eb9fe1 编写于 作者: F FUJITA Tomonori 提交者: Linus Torvalds

dma-mapping: rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN

Now each architecture has the own dma_get_cache_alignment implementation.

dma_get_cache_alignment returns the minimum DMA alignment.  Architectures
define it as ARCH_KMALLOC_MINALIGN (it's used to make sure that malloc'ed
buffer is DMA-safe; the buffer doesn't share a cache with the others).  So
we can unify dma_get_cache_alignment implementations.

This patch:

dma_get_cache_alignment() needs to know if an architecture defines
ARCH_KMALLOC_MINALIGN or not (needs to know if architecture has DMA
alignment restriction).  However, slab.h define ARCH_KMALLOC_MINALIGN if
architectures doesn't define it.

Let's rename ARCH_KMALLOC_MINALIGN to ARCH_DMA_MINALIGN.
ARCH_KMALLOC_MINALIGN is used only in the internals of slab/slob/slub
(except for crypto).
Signed-off-by: NFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: <linux-arch@vger.kernel.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 cd1542c8
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
* cache before the transfer is done, causing old data to be seen by * cache before the transfer is done, causing old data to be seen by
* the CPU. * the CPU.
*/ */
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/* /*
* With EABI on ARMv5 and above we must have 64-bit aligned slab pointers. * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
* cache before the transfer is done, causing old data to be seen by * cache before the transfer is done, causing old data to be seen by
* the CPU. * the CPU.
*/ */
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifndef __ASSEMBLER__ #ifndef __ASSEMBLER__
struct cache_info { struct cache_info {
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
#define SMP_CACHE_BYTES L1_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define __cacheline_aligned #define __cacheline_aligned
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
* the slab must be aligned such that load- and store-double instructions don't * the slab must be aligned such that load- and store-double instructions don't
* fault if used * fault if used
*/ */
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES #define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
/*****************************************************************************/ /*****************************************************************************/
......
...@@ -8,6 +8,6 @@ ...@@ -8,6 +8,6 @@
#define L1_CACHE_SHIFT 4 #define L1_CACHE_SHIFT 4
#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif #endif
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* MS be sure that SLAB allocates aligned objects */ /* MS be sure that SLAB allocates aligned objects */
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES #define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* Total overkill for most systems but need as a safe default. * Total overkill for most systems but need as a safe default.
* Set this one if any device in the system might do non-coherent DMA. * Set this one if any device in the system might do non-coherent DMA.
*/ */
#define ARCH_KMALLOC_MINALIGN 128 #define ARCH_DMA_MINALIGN 128
#endif #endif
#endif /* __ASM_MACH_GENERIC_KMALLOC_H */ #endif /* __ASM_MACH_GENERIC_KMALLOC_H */
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define __ASM_MACH_IP27_KMALLOC_H #define __ASM_MACH_IP27_KMALLOC_H
/* /*
* All happy, no need to define ARCH_KMALLOC_MINALIGN * All happy, no need to define ARCH_DMA_MINALIGN
*/ */
#endif /* __ASM_MACH_IP27_KMALLOC_H */ #endif /* __ASM_MACH_IP27_KMALLOC_H */
...@@ -3,9 +3,9 @@ ...@@ -3,9 +3,9 @@
#if defined(CONFIG_CPU_R5000) || defined(CONFIG_CPU_RM7000) #if defined(CONFIG_CPU_R5000) || defined(CONFIG_CPU_RM7000)
#define ARCH_KMALLOC_MINALIGN 32 #define ARCH_DMA_MINALIGN 32
#else #else
#define ARCH_KMALLOC_MINALIGN 128 #define ARCH_DMA_MINALIGN 128
#endif #endif
#endif /* __ASM_MACH_IP32_KMALLOC_H */ #endif /* __ASM_MACH_IP32_KMALLOC_H */
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES #define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES
#endif #endif
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
/* data cache purge registers /* data cache purge registers
* - read from the register to unconditionally purge that cache line * - read from the register to unconditionally purge that cache line
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 #define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
#ifdef CONFIG_NOT_COHERENT_CACHE #ifdef CONFIG_NOT_COHERENT_CACHE
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif #endif
#ifdef CONFIG_PTE_64BIT #ifdef CONFIG_PTE_64BIT
......
...@@ -180,7 +180,7 @@ typedef struct page *pgtable_t; ...@@ -180,7 +180,7 @@ typedef struct page *pgtable_t;
* Some drivers need to perform DMA into kmalloc'ed buffers * Some drivers need to perform DMA into kmalloc'ed buffers
* and so we have to increase the kmalloc minalign for this. * and so we have to increase the kmalloc minalign for this.
*/ */
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_SUPERH64 #ifdef CONFIG_SUPERH64
/* /*
......
...@@ -29,6 +29,6 @@ ...@@ -29,6 +29,6 @@
# define CACHE_WAY_SIZE ICACHE_WAY_SIZE # define CACHE_WAY_SIZE ICACHE_WAY_SIZE
#endif #endif
#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif /* _XTENSA_CACHE_H */ #endif /* _XTENSA_CACHE_H */
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <trace/events/kmem.h> #include <trace/events/kmem.h>
#ifndef ARCH_KMALLOC_MINALIGN
/* /*
* Enforce a minimum alignment for the kmalloc caches. * Enforce a minimum alignment for the kmalloc caches.
* Usually, the kmalloc caches are cache_line_size() aligned, except when * Usually, the kmalloc caches are cache_line_size() aligned, except when
...@@ -27,6 +26,9 @@ ...@@ -27,6 +26,9 @@
* ARCH_KMALLOC_MINALIGN allows that. * ARCH_KMALLOC_MINALIGN allows that.
* Note that increasing this value may disable some debug features. * Note that increasing this value may disable some debug features.
*/ */
#ifdef ARCH_DMA_MINALIGN
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif #endif
......
#ifndef __LINUX_SLOB_DEF_H #ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H #define __LINUX_SLOB_DEF_H
#ifndef ARCH_KMALLOC_MINALIGN #ifdef ARCH_DMA_MINALIGN
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
#endif #endif
......
...@@ -106,15 +106,17 @@ struct kmem_cache { ...@@ -106,15 +106,17 @@ struct kmem_cache {
/* /*
* Kmalloc subsystem. * Kmalloc subsystem.
*/ */
#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
#else #else
#define KMALLOC_MIN_SIZE 8 #define KMALLOC_MIN_SIZE 8
#endif #endif
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
#ifndef ARCH_KMALLOC_MINALIGN #ifdef ARCH_DMA_MINALIGN
#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
#else
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册