提交 55935a34 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] More slab.h cleanups

More cleanups for slab.h

1. Remove tabs from weird locations as suggested by Pekka

2. Drop the check for NUMA and SLAB_DEBUG from the fallback section
   as suggested by Pekka.

3. Uses static inline for the fallback defs as also suggested by Pekka.

4. Make kmem_ptr_valid take a const * argument.

5. Separate the NUMA fallback definitions from the kmalloc_track fallback
   definitions.
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 2e892f43
...@@ -20,11 +20,11 @@ typedef struct kmem_cache kmem_cache_t __deprecated; ...@@ -20,11 +20,11 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
* Flags to pass to kmem_cache_create(). * Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set. * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
*/ */
#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */ #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */ #define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */ #define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
...@@ -34,9 +34,9 @@ typedef struct kmem_cache kmem_cache_t __deprecated; ...@@ -34,9 +34,9 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
/* Flags passed to a constructor functions */ /* Flags passed to a constructor functions */
#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */ #define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */ #define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */
#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */ #define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */
/* /*
* struct kmem_cache related prototypes * struct kmem_cache related prototypes
...@@ -55,7 +55,7 @@ void *kmem_cache_zalloc(struct kmem_cache *, gfp_t); ...@@ -55,7 +55,7 @@ void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
void kmem_cache_free(struct kmem_cache *, void *); void kmem_cache_free(struct kmem_cache *, void *);
unsigned int kmem_cache_size(struct kmem_cache *); unsigned int kmem_cache_size(struct kmem_cache *);
const char *kmem_cache_name(struct kmem_cache *); const char *kmem_cache_name(struct kmem_cache *);
int kmem_ptr_validate(struct kmem_cache *cachep, void *ptr); int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
...@@ -93,19 +93,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) ...@@ -93,19 +93,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting
* the appropriate general cache at compile time. * the appropriate general cache at compile time.
*/ */
#ifdef CONFIG_SLAB #ifdef CONFIG_SLAB
#include <linux/slab_def.h> #include <linux/slab_def.h>
#else #else
/* /*
* Fallback definitions for an allocator not wanting to provide * Fallback definitions for an allocator not wanting to provide
* its own optimized kmalloc definitions (like SLOB). * its own optimized kmalloc definitions (like SLOB).
*/ */
#if defined(CONFIG_NUMA) || defined(CONFIG_DEBUG_SLAB)
#error "SLAB fallback definitions not usable for NUMA or Slab debug"
#endif
/** /**
* kmalloc - allocate memory * kmalloc - allocate memory
* @size: how many bytes of memory are required. * @size: how many bytes of memory are required.
...@@ -151,7 +147,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) ...@@ -151,7 +147,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
* *
* %__GFP_REPEAT - If allocation fails initially, try once more before failing. * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
*/ */
void *kmalloc(size_t size, gfp_t flags) static inline void *kmalloc(size_t size, gfp_t flags)
{ {
return __kmalloc(size, flags); return __kmalloc(size, flags);
} }
...@@ -161,12 +157,24 @@ void *kmalloc(size_t size, gfp_t flags) ...@@ -161,12 +157,24 @@ void *kmalloc(size_t size, gfp_t flags)
* @size: how many bytes of memory are required. * @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc). * @flags: the type of memory to allocate (see kmalloc).
*/ */
void *kzalloc(size_t size, gfp_t flags) static inline void *kzalloc(size_t size, gfp_t flags)
{ {
return __kzalloc(size, flags); return __kzalloc(size, flags);
} }
#endif #endif
#ifndef CONFIG_NUMA
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
return kmalloc(size, flags);
}
static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __kmalloc(size, flags);
}
#endif /* !CONFIG_NUMA */
/* /*
* kmalloc_track_caller is a special version of kmalloc that records the * kmalloc_track_caller is a special version of kmalloc that records the
* calling function of the routine calling it for slab leak tracking instead * calling function of the routine calling it for slab leak tracking instead
...@@ -208,12 +216,8 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); ...@@ -208,12 +216,8 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
#define kmalloc_node_track_caller(size, flags, node) \ #define kmalloc_node_track_caller(size, flags, node) \
kmalloc_track_caller(size, flags) kmalloc_track_caller(size, flags)
static inline void *kmalloc_node(size_t size, gfp_t flags, int node) #endif /* DEBUG_SLAB */
{
return kmalloc(size, flags);
}
#endif /* !CONFIG_NUMA */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_SLAB_H */ #endif /* _LINUX_SLAB_H */
...@@ -3541,7 +3541,7 @@ EXPORT_SYMBOL(kmem_cache_zalloc); ...@@ -3541,7 +3541,7 @@ EXPORT_SYMBOL(kmem_cache_zalloc);
* *
* Currently only used for dentry validation. * Currently only used for dentry validation.
*/ */
int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr) int fastcall kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
{ {
unsigned long addr = (unsigned long)ptr; unsigned long addr = (unsigned long)ptr;
unsigned long min_addr = PAGE_OFFSET; unsigned long min_addr = PAGE_OFFSET;
......
...@@ -334,7 +334,7 @@ int kmem_cache_shrink(struct kmem_cache *d) ...@@ -334,7 +334,7 @@ int kmem_cache_shrink(struct kmem_cache *d)
} }
EXPORT_SYMBOL(kmem_cache_shrink); EXPORT_SYMBOL(kmem_cache_shrink);
int kmem_ptr_validate(struct kmem_cache *a, void *b) int kmem_ptr_validate(struct kmem_cache *a, const void *b)
{ {
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册