diff --git a/arch/arm/atomic.h b/arch/arm/atomic.h index d4ba73f1b9cb7db56384620c3e6256274f6fa543..302e6d8f568c6abd9ca9f4463815d78d9590ff5f 100644 --- a/arch/arm/atomic.h +++ b/arch/arm/atomic.h @@ -70,11 +70,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s) return (void *)a_cas(p, (int)t, (int)s); } -static inline long a_cas_l(volatile void *p, long t, long s) -{ - return a_cas(p, t, s); -} - static inline int a_swap(volatile int *x, int v) { int old; diff --git a/arch/i386/atomic.h b/arch/i386/atomic.h index 77b0b3b73759721c9a1ccc7668ba356cde92100e..8a2a1234f088a2a6332ac6e5f58b425a1c63e498 100644 --- a/arch/i386/atomic.h +++ b/arch/i386/atomic.h @@ -30,11 +30,6 @@ static inline void a_or_64(volatile uint64_t *p, uint64_t v) : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" ); } -static inline void a_store_l(volatile void *p, long x) -{ - __asm__( "movl %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" ); -} - static inline void a_or_l(volatile void *p, long v) { __asm__( "lock ; orl %1, %0" @@ -48,13 +43,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s) return t; } -static inline long a_cas_l(volatile void *p, long t, long s) -{ - __asm__( "lock ; cmpxchg %3, %1" - : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" ); - return t; -} - static inline int a_cas(volatile int *p, int t, int s) { __asm__( "lock ; cmpxchg %3, %1" @@ -62,17 +50,6 @@ static inline int a_cas(volatile int *p, int t, int s) return t; } -static inline void *a_swap_p(void *volatile *x, void *v) -{ - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" ); - return v; -} -static inline long a_swap_l(volatile void *x, long v) -{ - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" ); - return v; -} - static inline void a_or(volatile void *p, int v) { __asm__( "lock ; orl %1, %0" diff --git a/arch/microblaze/atomic.h b/arch/microblaze/atomic.h index da9949aa723fcd4297f2f7aca85dc58eef6d0a81..96265fe6c3c706155f07a22465d1ab58770d3b40 100644 --- a/arch/microblaze/atomic.h +++ b/arch/microblaze/atomic.h @@ -45,11 +45,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s) return (void *)a_cas(p, (int)t, (int)s); } -static inline long a_cas_l(volatile void *p, long t, long s) -{ - return a_cas(p, t, s); -} - static inline int a_swap(volatile int *x, int v) { register int old, tmp; diff --git a/arch/mips/atomic.h b/arch/mips/atomic.h index 9dcd1555af68dd434a1bea5976837c26c57cb993..3ec035869663aa121466c1f7fdf3ca4e236a2f7a 100644 --- a/arch/mips/atomic.h +++ b/arch/mips/atomic.h @@ -48,12 +48,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s) return (void *)a_cas(p, (int)t, (int)s); } -static inline long a_cas_l(volatile void *p, long t, long s) -{ - return a_cas(p, t, s); -} - - static inline int a_swap(volatile int *x, int v) { int old, dummy; diff --git a/arch/powerpc/atomic.h b/arch/powerpc/atomic.h index a082c09b49f5443220c55e13610ee7fdab2367df..1044886d32790d3cacf5ec06f63130c212cbd135 100644 --- a/arch/powerpc/atomic.h +++ b/arch/powerpc/atomic.h @@ -43,12 +43,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s) return (void *)a_cas(p, (int)t, (int)s); } -static inline long a_cas_l(volatile void *p, long t, long s) -{ - return a_cas(p, t, s); -} - - static inline int a_swap(volatile int *x, int v) { int old; diff --git a/arch/sh/atomic.h b/arch/sh/atomic.h index 104a4f1292decdf6b4763100570b587df177fc06..93ab54fe11abdf01eeacb6330500ee996294e2b1 100644 --- a/arch/sh/atomic.h +++ b/arch/sh/atomic.h @@ -41,11 +41,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s) return (void *)a_cas(p, (int)t, (int)s); } -static inline long a_cas_l(volatile void *p, long t, long s) -{ - return a_cas(p, t, s); -} - static inline void a_inc(volatile int *x) { a_fetch_add(x, 1); diff --git a/arch/x32/atomic.h b/arch/x32/atomic.h index 124b37ac38853b1f816427c5bc1d1bb2c95691f0..ae0a576c11e0fd2ab4cdb8c9e3c8b3b255a04d27 100644 --- a/arch/x32/atomic.h +++ b/arch/x32/atomic.h @@ -27,11 +27,6 @@ static inline void a_or_64(volatile uint64_t *p, uint64_t v) : "=m"(*p) : "r"(v) : "memory" ); } -static inline void a_store_l(volatile void *p, long x) -{ - __asm__( "mov %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" ); -} - static inline void a_or_l(volatile void *p, long v) { __asm__( "lock ; or %1, %0" @@ -45,13 +40,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s) return t; } -static inline long a_cas_l(volatile void *p, long t, long s) -{ - __asm__( "lock ; cmpxchg %3, %1" - : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" ); - return t; -} - static inline int a_cas(volatile int *p, int t, int s) { __asm__( "lock ; cmpxchg %3, %1" @@ -59,17 +47,6 @@ static inline int a_cas(volatile int *p, int t, int s) return t; } -static inline void *a_swap_p(void *volatile *x, void *v) -{ - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" ); - return v; -} -static inline long a_swap_l(volatile void *x, long v) -{ - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" ); - return v; -} - static inline void a_or(volatile void *p, int v) { __asm__( "lock ; or %1, %0" @@ -88,8 +65,6 @@ static inline int a_swap(volatile int *x, int v) return v; } -#define a_xchg a_swap - static inline int a_fetch_add(volatile int *x, int v) { __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" ); diff --git a/arch/x86_64/atomic.h b/arch/x86_64/atomic.h index 124b37ac38853b1f816427c5bc1d1bb2c95691f0..ae0a576c11e0fd2ab4cdb8c9e3c8b3b255a04d27 100644 --- a/arch/x86_64/atomic.h +++ b/arch/x86_64/atomic.h @@ -27,11 +27,6 @@ static inline void a_or_64(volatile uint64_t *p, uint64_t v) : "=m"(*p) : "r"(v) : "memory" ); } -static inline void a_store_l(volatile void *p, long x) -{ - __asm__( "mov %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" ); -} - static inline void a_or_l(volatile void *p, long v) { __asm__( "lock ; or %1, %0" @@ -45,13 +40,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s) return t; } -static inline long a_cas_l(volatile void *p, long t, long s) -{ - __asm__( "lock ; cmpxchg %3, %1" - : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" ); - return t; -} - static inline int a_cas(volatile int *p, int t, int s) { __asm__( "lock ; cmpxchg %3, %1" @@ -59,17 +47,6 @@ static inline int a_cas(volatile int *p, int t, int s) return t; } -static inline void *a_swap_p(void *volatile *x, void *v) -{ - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" ); - return v; -} -static inline long a_swap_l(volatile void *x, long v) -{ - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" ); - return v; -} - static inline void a_or(volatile void *p, int v) { __asm__( "lock ; or %1, %0" @@ -88,8 +65,6 @@ static inline int a_swap(volatile int *x, int v) return v; } -#define a_xchg a_swap - static inline int a_fetch_add(volatile int *x, int v) { __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );