提交 d3cb4871 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

[PATCH] atomic_long_t & include/asm-generic/atomic.h V2

Several counters already have the need to use 64 atomic variables on 64 bit
platforms (see mm_counter_t in sched.h).  We have to do ugly ifdefs to fall
back to 32 bit atomic on 32 bit platforms.

The VM statistics patch that I am working on will also make more extensive
use of atomic64.

This patch introduces a new type atomic_long_t by providing definitions in
asm-generic/atomic.h that works similar to the c "long" type.  Its 32 bits
on 32 bit platforms and 64 bits on 64 bit platforms.

Also cleans up the determination of the mm_counter_t in sched.h.
Signed-off-by: NChristoph Lameter <clameter@sgi.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 070f8032
...@@ -216,4 +216,5 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) ...@@ -216,4 +216,5 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
#define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb()
#include <asm-generic/atomic.h>
#endif /* _ALPHA_ATOMIC_H */ #endif /* _ALPHA_ATOMIC_H */
...@@ -205,5 +205,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) ...@@ -205,5 +205,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif #endif
#endif #endif
...@@ -118,5 +118,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ...@@ -118,5 +118,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif #endif
#endif #endif
...@@ -156,4 +156,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) ...@@ -156,4 +156,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif #endif
...@@ -426,4 +426,5 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new); ...@@ -426,4 +426,5 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
}) })
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#include <asm-generic/atomic.h>
#endif /* _ASM_ATOMIC_H */ #endif /* _ASM_ATOMIC_H */
#ifndef _ASM_GENERIC_ATOMIC_H
#define _ASM_GENERIC_ATOMIC_H
/*
* Copyright (C) 2005 Silicon Graphics, Inc.
* Christoph Lameter <clameter@sgi.com>
*
* Allows to provide arch independent atomic definitions without the need to
* edit all arch specific atomic.h files.
*/
/*
* Suppport for atomic_long_t
*
* Casts for parameters are avoided for existing atomic functions in order to
* avoid issues with cast-as-lval under gcc 4.x and other limitations that the
* macros of a platform may have.
*/
#if BITS_PER_LONG == 64
typedef atomic64_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
return (long)atomic64_read(v);
}
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic64_t *v = (atomic64_t *)l;
atomic_set(v, i);
}
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_inc(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_dec(v);
}
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_add(i, v);
}
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic64_t *v = (atomic64_t *)l;
atomic64_sub(i, v);
}
#else
typedef atomic_t atomic_long_t;
#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
static inline long atomic_long_read(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
return (long)atomic_read(v);
}
static inline void atomic_long_set(atomic_long_t *l, long i)
{
atomic_t *v = (atomic_t *)l;
atomic_set(v, i);
}
static inline void atomic_long_inc(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_inc(v);
}
static inline void atomic_long_dec(atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_dec(v);
}
static inline void atomic_long_add(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_add(i, v);
}
static inline void atomic_long_sub(long i, atomic_long_t *l)
{
atomic_t *v = (atomic_t *)l;
atomic_sub(i, v);
}
#endif
#endif
...@@ -137,4 +137,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) ...@@ -137,4 +137,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* __ARCH_H8300_ATOMIC __ */ #endif /* __ARCH_H8300_ATOMIC __ */
...@@ -254,4 +254,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ ...@@ -254,4 +254,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif #endif
...@@ -192,4 +192,5 @@ atomic64_add_negative (__s64 i, atomic64_t *v) ...@@ -192,4 +192,5 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* _ASM_IA64_ATOMIC_H */ #endif /* _ASM_IA64_ATOMIC_H */
...@@ -313,4 +313,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) ...@@ -313,4 +313,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* _ASM_M32R_ATOMIC_H */ #endif /* _ASM_M32R_ATOMIC_H */
...@@ -157,4 +157,5 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v) ...@@ -157,4 +157,5 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* __ARCH_M68K_ATOMIC __ */ #endif /* __ARCH_M68K_ATOMIC __ */
...@@ -143,4 +143,5 @@ static inline int atomic_sub_return(int i, atomic_t * v) ...@@ -143,4 +143,5 @@ static inline int atomic_sub_return(int i, atomic_t * v)
#define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v))
#include <asm-generic/atomic.h>
#endif /* __ARCH_M68KNOMMU_ATOMIC __ */ #endif /* __ARCH_M68KNOMMU_ATOMIC __ */
...@@ -713,4 +713,5 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -713,4 +713,5 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
#define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb()
#include <asm-generic/atomic.h>
#endif /* _ASM_ATOMIC_H */ #endif /* _ASM_ATOMIC_H */
...@@ -216,4 +216,5 @@ static __inline__ int atomic_read(const atomic_t *v) ...@@ -216,4 +216,5 @@ static __inline__ int atomic_read(const atomic_t *v)
#define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb()
#include <asm-generic/atomic.h>
#endif #endif
...@@ -402,5 +402,6 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v) ...@@ -402,5 +402,6 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */
#include <asm-generic/atomic.h>
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ATOMIC_H_ */ #endif /* _ASM_POWERPC_ATOMIC_H_ */
...@@ -215,5 +215,6 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) ...@@ -215,5 +215,6 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
#define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb()
#include <asm-generic/atomic.h>
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ARCH_S390_ATOMIC__ */ #endif /* __ARCH_S390_ATOMIC__ */
...@@ -140,4 +140,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) ...@@ -140,4 +140,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* __ASM_SH_ATOMIC_H */ #endif /* __ASM_SH_ATOMIC_H */
...@@ -152,4 +152,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v) ...@@ -152,4 +152,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* __ASM_SH64_ATOMIC_H */ #endif /* __ASM_SH64_ATOMIC_H */
...@@ -159,4 +159,5 @@ static inline int __atomic24_sub(int i, atomic24_t *v) ...@@ -159,4 +159,5 @@ static inline int __atomic24_sub(int i, atomic24_t *v)
#endif /* !(__KERNEL__) */ #endif /* !(__KERNEL__) */
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC_ATOMIC__) */ #endif /* !(__ARCH_SPARC_ATOMIC__) */
...@@ -96,4 +96,5 @@ extern int atomic64_sub_ret(int, atomic64_t *); ...@@ -96,4 +96,5 @@ extern int atomic64_sub_ret(int, atomic64_t *);
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#endif #endif
#include <asm-generic/atomic.h>
#endif /* !(__ARCH_SPARC64_ATOMIC__) */ #endif /* !(__ARCH_SPARC64_ATOMIC__) */
...@@ -126,4 +126,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) ...@@ -126,4 +126,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* __V850_ATOMIC_H__ */ #endif /* __V850_ATOMIC_H__ */
...@@ -424,4 +424,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \ ...@@ -424,4 +424,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif #endif
...@@ -286,6 +286,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) ...@@ -286,6 +286,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
#define smp_mb__before_atomic_inc() barrier() #define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier() #define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */ #endif /* _XTENSA_ATOMIC_H */
......
...@@ -254,25 +254,12 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); ...@@ -254,25 +254,12 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
* The mm counters are not protected by its page_table_lock, * The mm counters are not protected by its page_table_lock,
* so must be incremented atomically. * so must be incremented atomically.
*/ */
#ifdef ATOMIC64_INIT #define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
#define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value) #define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
#define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member)) #define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
#define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member) #define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
#define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member) #define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member) typedef atomic_long_t mm_counter_t;
typedef atomic64_t mm_counter_t;
#else /* !ATOMIC64_INIT */
/*
* The counters wrap back to 0 at 2^32 * PAGE_SIZE,
* that is, at 16TB if using 4kB page size.
*/
#define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value)
#define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member))
#define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member)
#define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member)
typedef atomic_t mm_counter_t;
#endif /* !ATOMIC64_INIT */
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ #else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册