提交 90e51e45 编写于 作者: R Rich Felker

clean up unused and inconsistent atomics in arch dirs

the a_cas_l, a_swap_l, a_swap_p, and a_store_l operations were
probably used a long time ago when only i386 and x86_64 were
supported. as other archs were added, support for them was
inconsistent, and they are obviously not in use at present. having
them around potentially confuses readers working on new ports, and the
type-punning hacks and inconsistent use of types in their definitions
is not a style I wish to perpetuate in the source tree, so removing
them seems appropriate.
上级 c394763d
......@@ -70,11 +70,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s)
return (void *)a_cas(p, (int)t, (int)s);
}
static inline long a_cas_l(volatile void *p, long t, long s)
{
return a_cas(p, t, s);
}
static inline int a_swap(volatile int *x, int v)
{
int old;
......
......@@ -30,11 +30,6 @@ static inline void a_or_64(volatile uint64_t *p, uint64_t v)
: : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
}
static inline void a_store_l(volatile void *p, long x)
{
__asm__( "movl %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" );
}
static inline void a_or_l(volatile void *p, long v)
{
__asm__( "lock ; orl %1, %0"
......@@ -48,13 +43,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s)
return t;
}
static inline long a_cas_l(volatile void *p, long t, long s)
{
__asm__( "lock ; cmpxchg %3, %1"
: "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
return t;
}
static inline int a_cas(volatile int *p, int t, int s)
{
__asm__( "lock ; cmpxchg %3, %1"
......@@ -62,17 +50,6 @@ static inline int a_cas(volatile int *p, int t, int s)
return t;
}
static inline void *a_swap_p(void *volatile *x, void *v)
{
__asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" );
return v;
}
static inline long a_swap_l(volatile void *x, long v)
{
__asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" );
return v;
}
static inline void a_or(volatile void *p, int v)
{
__asm__( "lock ; orl %1, %0"
......
......@@ -45,11 +45,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s)
return (void *)a_cas(p, (int)t, (int)s);
}
static inline long a_cas_l(volatile void *p, long t, long s)
{
return a_cas(p, t, s);
}
static inline int a_swap(volatile int *x, int v)
{
register int old, tmp;
......
......@@ -48,12 +48,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s)
return (void *)a_cas(p, (int)t, (int)s);
}
static inline long a_cas_l(volatile void *p, long t, long s)
{
return a_cas(p, t, s);
}
static inline int a_swap(volatile int *x, int v)
{
int old, dummy;
......
......@@ -43,12 +43,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s)
return (void *)a_cas(p, (int)t, (int)s);
}
static inline long a_cas_l(volatile void *p, long t, long s)
{
return a_cas(p, t, s);
}
static inline int a_swap(volatile int *x, int v)
{
int old;
......
......@@ -41,11 +41,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s)
return (void *)a_cas(p, (int)t, (int)s);
}
static inline long a_cas_l(volatile void *p, long t, long s)
{
return a_cas(p, t, s);
}
static inline void a_inc(volatile int *x)
{
a_fetch_add(x, 1);
......
......@@ -27,11 +27,6 @@ static inline void a_or_64(volatile uint64_t *p, uint64_t v)
: "=m"(*p) : "r"(v) : "memory" );
}
static inline void a_store_l(volatile void *p, long x)
{
__asm__( "mov %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" );
}
static inline void a_or_l(volatile void *p, long v)
{
__asm__( "lock ; or %1, %0"
......@@ -45,13 +40,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s)
return t;
}
static inline long a_cas_l(volatile void *p, long t, long s)
{
__asm__( "lock ; cmpxchg %3, %1"
: "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
return t;
}
static inline int a_cas(volatile int *p, int t, int s)
{
__asm__( "lock ; cmpxchg %3, %1"
......@@ -59,17 +47,6 @@ static inline int a_cas(volatile int *p, int t, int s)
return t;
}
static inline void *a_swap_p(void *volatile *x, void *v)
{
__asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" );
return v;
}
static inline long a_swap_l(volatile void *x, long v)
{
__asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" );
return v;
}
static inline void a_or(volatile void *p, int v)
{
__asm__( "lock ; or %1, %0"
......@@ -88,8 +65,6 @@ static inline int a_swap(volatile int *x, int v)
return v;
}
#define a_xchg a_swap
static inline int a_fetch_add(volatile int *x, int v)
{
__asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
......
......@@ -27,11 +27,6 @@ static inline void a_or_64(volatile uint64_t *p, uint64_t v)
: "=m"(*p) : "r"(v) : "memory" );
}
static inline void a_store_l(volatile void *p, long x)
{
__asm__( "mov %1, %0" : "=m"(*(long *)p) : "r"(x) : "memory" );
}
static inline void a_or_l(volatile void *p, long v)
{
__asm__( "lock ; or %1, %0"
......@@ -45,13 +40,6 @@ static inline void *a_cas_p(volatile void *p, void *t, void *s)
return t;
}
static inline long a_cas_l(volatile void *p, long t, long s)
{
__asm__( "lock ; cmpxchg %3, %1"
: "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
return t;
}
static inline int a_cas(volatile int *p, int t, int s)
{
__asm__( "lock ; cmpxchg %3, %1"
......@@ -59,17 +47,6 @@ static inline int a_cas(volatile int *p, int t, int s)
return t;
}
static inline void *a_swap_p(void *volatile *x, void *v)
{
__asm__( "xchg %0, %1" : "=r"(v), "=m"(*(void **)x) : "0"(v) : "memory" );
return v;
}
static inline long a_swap_l(volatile void *x, long v)
{
__asm__( "xchg %0, %1" : "=r"(v), "=m"(*(long *)x) : "0"(v) : "memory" );
return v;
}
static inline void a_or(volatile void *p, int v)
{
__asm__( "lock ; or %1, %0"
......@@ -88,8 +65,6 @@ static inline int a_swap(volatile int *x, int v)
return v;
}
#define a_xchg a_swap
static inline int a_fetch_add(volatile int *x, int v)
{
__asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册