diff --git a/arch/i386/atomic.h b/arch/i386/atomic.h index 4fe7bded328befa977a175a54b162be00a1bd6d4..95fecbdcc51eb7faee9fff264388a3f7bc6d8bcd 100644 --- a/arch/i386/atomic.h +++ b/arch/i386/atomic.h @@ -50,16 +50,16 @@ static inline int a_cas(volatile int *p, int t, int s) return t; } -static inline void a_or(volatile void *p, int v) +static inline void a_or(volatile int *p, int v) { __asm__( "lock ; orl %1, %0" - : "=m"(*(int *)p) : "r"(v) : "memory" ); + : "=m"(*p) : "r"(v) : "memory" ); } -static inline void a_and(volatile void *p, int v) +static inline void a_and(volatile int *p, int v) { __asm__( "lock ; andl %1, %0" - : "=m"(*(int *)p) : "r"(v) : "memory" ); + : "=m"(*p) : "r"(v) : "memory" ); } static inline int a_swap(volatile int *x, int v) diff --git a/arch/x32/atomic.h b/arch/x32/atomic.h index 333098c399e95d065f51f9f9628663a906e7c887..b2014cc0da38bdb14dd605a0a32ac48cff181a95 100644 --- a/arch/x32/atomic.h +++ b/arch/x32/atomic.h @@ -47,16 +47,16 @@ static inline int a_cas(volatile int *p, int t, int s) return t; } -static inline void a_or(volatile void *p, int v) +static inline void a_or(volatile int *p, int v) { __asm__( "lock ; or %1, %0" - : "=m"(*(int *)p) : "r"(v) : "memory" ); + : "=m"(*p) : "r"(v) : "memory" ); } -static inline void a_and(volatile void *p, int v) +static inline void a_and(volatile int *p, int v) { __asm__( "lock ; and %1, %0" - : "=m"(*(int *)p) : "r"(v) : "memory" ); + : "=m"(*p) : "r"(v) : "memory" ); } static inline int a_swap(volatile int *x, int v) diff --git a/arch/x86_64/atomic.h b/arch/x86_64/atomic.h index 333098c399e95d065f51f9f9628663a906e7c887..b2014cc0da38bdb14dd605a0a32ac48cff181a95 100644 --- a/arch/x86_64/atomic.h +++ b/arch/x86_64/atomic.h @@ -47,16 +47,16 @@ static inline int a_cas(volatile int *p, int t, int s) return t; } -static inline void a_or(volatile void *p, int v) +static inline void a_or(volatile int *p, int v) { __asm__( "lock ; or %1, %0" - : "=m"(*(int *)p) : "r"(v) : "memory" ); + : "=m"(*p) : "r"(v) : "memory" ); } -static inline void a_and(volatile void *p, int v) +static inline void a_and(volatile int *p, int v) { __asm__( "lock ; and %1, %0" - : "=m"(*(int *)p) : "r"(v) : "memory" ); + : "=m"(*p) : "r"(v) : "memory" ); } static inline int a_swap(volatile int *x, int v)