提交 feaf7cf1 编写于 作者: B Becky Bruce 提交者: Paul Mackerras

[PATCH] powerpc: merge atomic.h, memory.h

powerpc: Merge atomic.h and memory.h into powerpc

Merged atomic.h into include/powerpc.  Moved asm-style HMT_ defines from
memory.h into ppc_asm.h, where there were already HMT_defines; moved c-style
HMT_ defines to processor.h. Renamed memory.h to synch.h to better reflect
its contents.
Signed-off-by: NKumar Gala <kumar.gala@freescale.com>
Signed-off-by: NBecky Bruce <becky.bruce@freescale.com>
Signed-off-by: NJon Loeliger <linuxppc@jdl.com>
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 2bfadee3
#ifndef _ASM_POWERPC_ATOMIC_H_
#define _ASM_POWERPC_ATOMIC_H_
/* /*
* PowerPC atomic operations * PowerPC atomic operations
*/ */
#ifndef _ASM_PPC_ATOMIC_H_
#define _ASM_PPC_ATOMIC_H_
typedef struct { volatile int counter; } atomic_t; typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/synch.h>
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
extern void atomic_clear_mask(unsigned long mask, unsigned long *addr);
#ifdef CONFIG_SMP
#define SMP_SYNC "sync"
#define SMP_ISYNC "\n\tisync"
#else
#define SMP_SYNC ""
#define SMP_ISYNC
#endif
/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx. /* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
* The old ATOMIC_SYNC_FIX covered some but not all of this. * The old ATOMIC_SYNC_FIX covered some but not all of this.
*/ */
...@@ -53,12 +44,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v) ...@@ -53,12 +44,13 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
int t; int t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%2 # atomic_add_return\n\ "1: lwarx %0,0,%2 # atomic_add_return\n\
add %0,%1,%0\n" add %0,%1,%0\n"
PPC405_ERR77(0,%2) PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\ " stwcx. %0,0,%2 \n\
bne- 1b" bne- 1b"
SMP_ISYNC ISYNC_ON_SMP
: "=&r" (t) : "=&r" (t)
: "r" (a), "r" (&v->counter) : "r" (a), "r" (&v->counter)
: "cc", "memory"); : "cc", "memory");
...@@ -88,12 +80,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v) ...@@ -88,12 +80,13 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
int t; int t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%2 # atomic_sub_return\n\ "1: lwarx %0,0,%2 # atomic_sub_return\n\
subf %0,%1,%0\n" subf %0,%1,%0\n"
PPC405_ERR77(0,%2) PPC405_ERR77(0,%2)
" stwcx. %0,0,%2 \n\ " stwcx. %0,0,%2 \n\
bne- 1b" bne- 1b"
SMP_ISYNC ISYNC_ON_SMP
: "=&r" (t) : "=&r" (t)
: "r" (a), "r" (&v->counter) : "r" (a), "r" (&v->counter)
: "cc", "memory"); : "cc", "memory");
...@@ -121,12 +114,13 @@ static __inline__ int atomic_inc_return(atomic_t *v) ...@@ -121,12 +114,13 @@ static __inline__ int atomic_inc_return(atomic_t *v)
int t; int t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%1 # atomic_inc_return\n\ "1: lwarx %0,0,%1 # atomic_inc_return\n\
addic %0,%0,1\n" addic %0,%0,1\n"
PPC405_ERR77(0,%1) PPC405_ERR77(0,%1)
" stwcx. %0,0,%1 \n\ " stwcx. %0,0,%1 \n\
bne- 1b" bne- 1b"
SMP_ISYNC ISYNC_ON_SMP
: "=&r" (t) : "=&r" (t)
: "r" (&v->counter) : "r" (&v->counter)
: "cc", "memory"); : "cc", "memory");
...@@ -164,12 +158,13 @@ static __inline__ int atomic_dec_return(atomic_t *v) ...@@ -164,12 +158,13 @@ static __inline__ int atomic_dec_return(atomic_t *v)
int t; int t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_return\n\ "1: lwarx %0,0,%1 # atomic_dec_return\n\
addic %0,%0,-1\n" addic %0,%0,-1\n"
PPC405_ERR77(0,%1) PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\ " stwcx. %0,0,%1\n\
bne- 1b" bne- 1b"
SMP_ISYNC ISYNC_ON_SMP
: "=&r" (t) : "=&r" (t)
: "r" (&v->counter) : "r" (&v->counter)
: "cc", "memory"); : "cc", "memory");
...@@ -189,13 +184,14 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) ...@@ -189,13 +184,14 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
int t; int t;
__asm__ __volatile__( __asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\ "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
addic. %0,%0,-1\n\ addic. %0,%0,-1\n\
blt- 2f\n" blt- 2f\n"
PPC405_ERR77(0,%1) PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\ " stwcx. %0,0,%1\n\
bne- 1b" bne- 1b"
SMP_ISYNC ISYNC_ON_SMP
"\n\ "\n\
2:" : "=&r" (t) 2:" : "=&r" (t)
: "r" (&v->counter) : "r" (&v->counter)
...@@ -204,11 +200,10 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v) ...@@ -204,11 +200,10 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t; return t;
} }
#define __MB __asm__ __volatile__ (SMP_SYNC : : : "memory") #define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__before_atomic_dec() __MB #define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() __MB #define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__before_atomic_inc() __MB #define smp_mb__after_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() __MB
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_PPC_ATOMIC_H_ */ #endif /* _ASM_POWERPC_ATOMIC_H_ */
...@@ -75,8 +75,11 @@ ...@@ -75,8 +75,11 @@
#define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base) #define REST_32EVRS(n,s,base) REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
/* Macros to adjust thread priority for Iseries hardware multithreading */ /* Macros to adjust thread priority for Iseries hardware multithreading */
#define HMT_VERY_LOW or 31,31,31 # very low priority\n"
#define HMT_LOW or 1,1,1 #define HMT_LOW or 1,1,1
#define HMT_MEDIUM_LOW or 6,6,6 # medium low priority\n"
#define HMT_MEDIUM or 2,2,2 #define HMT_MEDIUM or 2,2,2
#define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority\n"
#define HMT_HIGH or 3,3,3 #define HMT_HIGH or 3,3,3
/* handle instructions that older assemblers may not know */ /* handle instructions that older assemblers may not know */
......
#ifndef _ASM_PPC64_MEMORY_H_ #ifndef _ASM_POWERPC_SYNCH_H
#define _ASM_PPC64_MEMORY_H_ #define _ASM_POWERPC_SYNCH_H
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h> #include <linux/config.h>
#ifdef __powerpc64__
#define __SUBARCH_HAS_LWSYNC
#endif
#ifdef __SUBARCH_HAS_LWSYNC
# define LWSYNC lwsync
#else
# define LWSYNC sync
#endif
/* /*
* Arguably the bitops and *xchg operations don't imply any memory barrier * Arguably the bitops and *xchg operations don't imply any memory barrier
* or SMP ordering, but in fact a lot of drivers expect them to imply * or SMP ordering, but in fact a lot of drivers expect them to imply
...@@ -18,7 +22,7 @@ ...@@ -18,7 +22,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define EIEIO_ON_SMP "eieio\n" #define EIEIO_ON_SMP "eieio\n"
#define ISYNC_ON_SMP "\n\tisync" #define ISYNC_ON_SMP "\n\tisync"
#define SYNC_ON_SMP "lwsync\n\t" #define SYNC_ON_SMP __stringify(LWSYNC) "\n"
#else #else
#define EIEIO_ON_SMP #define EIEIO_ON_SMP
#define ISYNC_ON_SMP #define ISYNC_ON_SMP
...@@ -43,19 +47,5 @@ static inline void isync(void) ...@@ -43,19 +47,5 @@ static inline void isync(void)
#define isync_on_smp() __asm__ __volatile__("": : :"memory") #define isync_on_smp() __asm__ __volatile__("": : :"memory")
#endif #endif
/* Macros for adjusting thread priority (hardware multi-threading) */ #endif /* _ASM_POWERPC_SYNCH_H */
#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
#define HMT_low() asm volatile("or 1,1,1 # low priority")
#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
#define HMT_high() asm volatile("or 3,3,3 # high priority")
#define HMT_VERY_LOW "\tor 31,31,31 # very low priority\n"
#define HMT_LOW "\tor 1,1,1 # low priority\n"
#define HMT_MEDIUM_LOW "\tor 6,6,6 # medium low priority\n"
#define HMT_MEDIUM "\tor 2,2,2 # medium priority\n"
#define HMT_MEDIUM_HIGH "\tor 5,5,5 # medium high priority\n"
#define HMT_HIGH "\tor 3,3,3 # high priority\n"
#endif
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/synch.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#define SIO_CONFIG_RA 0x398 #define SIO_CONFIG_RA 0x398
...@@ -440,16 +441,6 @@ extern inline void * phys_to_virt(unsigned long address) ...@@ -440,16 +441,6 @@ extern inline void * phys_to_virt(unsigned long address)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET) #define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
/*
* Enforce In-order Execution of I/O:
* Acts as a barrier to ensure all previous I/O accesses have
* completed before any further ones are issued.
*/
extern inline void eieio(void)
{
__asm__ __volatile__ ("eieio" : : : "memory");
}
/* Enforce in-order execution of data I/O. /* Enforce in-order execution of data I/O.
* No distinction between read/write on PPC; use eieio for all three. * No distinction between read/write on PPC; use eieio for all three.
*/ */
......
/*
* PowerPC64 atomic operations
*
* Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _ASM_PPC64_ATOMIC_H_
#define _ASM_PPC64_ATOMIC_H_
#include <asm/memory.h>
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic_add(int a, atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%3 # atomic_add\n\
add %0,%2,%0\n\
stwcx. %0,0,%3\n\
bne- 1b"
: "=&r" (t), "=m" (v->counter)
: "r" (a), "r" (&v->counter), "m" (v->counter)
: "cc");
}
static __inline__ int atomic_add_return(int a, atomic_t *v)
{
int t;
__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%2 # atomic_add_return\n\
add %0,%1,%0\n\
stwcx. %0,0,%2\n\
bne- 1b"
ISYNC_ON_SMP
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
return t;
}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static __inline__ void atomic_sub(int a, atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%3 # atomic_sub\n\
subf %0,%2,%0\n\
stwcx. %0,0,%3\n\
bne- 1b"
: "=&r" (t), "=m" (v->counter)
: "r" (a), "r" (&v->counter), "m" (v->counter)
: "cc");
}
static __inline__ int atomic_sub_return(int a, atomic_t *v)
{
int t;
__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%2 # atomic_sub_return\n\
subf %0,%1,%0\n\
stwcx. %0,0,%2\n\
bne- 1b"
ISYNC_ON_SMP
: "=&r" (t)
: "r" (a), "r" (&v->counter)
: "cc", "memory");
return t;
}
static __inline__ void atomic_inc(atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_inc\n\
addic %0,%0,1\n\
stwcx. %0,0,%2\n\
bne- 1b"
: "=&r" (t), "=m" (v->counter)
: "r" (&v->counter), "m" (v->counter)
: "cc");
}
static __inline__ int atomic_inc_return(atomic_t *v)
{
int t;
__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%1 # atomic_inc_return\n\
addic %0,%0,1\n\
stwcx. %0,0,%1\n\
bne- 1b"
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
: "cc", "memory");
return t;
}
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
static __inline__ void atomic_dec(atomic_t *v)
{
int t;
__asm__ __volatile__(
"1: lwarx %0,0,%2 # atomic_dec\n\
addic %0,%0,-1\n\
stwcx. %0,0,%2\n\
bne- 1b"
: "=&r" (t), "=m" (v->counter)
: "r" (&v->counter), "m" (v->counter)
: "cc");
}
static __inline__ int atomic_dec_return(atomic_t *v)
{
int t;
__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_return\n\
addic %0,%0,-1\n\
stwcx. %0,0,%1\n\
bne- 1b"
ISYNC_ON_SMP
: "=&r" (t)
: "r" (&v->counter)
: "cc", "memory");
return t;
}
#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
*/
static __inline__ int atomic_dec_if_positive(atomic_t *v)
{
int t;
__asm__ __volatile__(
EIEIO_ON_SMP
"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
addic. %0,%0,-1\n\
blt- 2f\n\
stwcx. %0,0,%1\n\
bne- 1b"
ISYNC_ON_SMP
"\n\
2:" : "=&r" (t)
: "r" (&v->counter)
: "cc", "memory");
return t;
}
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
#endif /* _ASM_PPC64_ATOMIC_H_ */
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <asm/memory.h> #include <asm/synch.h>
/* /*
* clear_bit doesn't imply a memory barrier * clear_bit doesn't imply a memory barrier
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <linux/futex.h> #include <linux/futex.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/memory.h> #include <asm/synch.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#ifdef CONFIG_PPC_ISERIES #ifdef CONFIG_PPC_ISERIES
#include <asm/iSeries/iSeries_io.h> #include <asm/iSeries/iSeries_io.h>
#endif #endif
#include <asm/memory.h> #include <asm/synch.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm-generic/iomap.h> #include <asm-generic/iomap.h>
......
...@@ -368,6 +368,14 @@ GLUE(.,name): ...@@ -368,6 +368,14 @@ GLUE(.,name):
#define mfasr() ({unsigned long rval; \ #define mfasr() ({unsigned long rval; \
asm volatile("mfasr %0" : "=r" (rval)); rval;}) asm volatile("mfasr %0" : "=r" (rval)); rval;})
/* Macros for adjusting thread priority (hardware multi-threading) */
#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
#define HMT_low() asm volatile("or 1,1,1 # low priority")
#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
#define HMT_high() asm volatile("or 3,3,3 # high priority")
static inline void set_tb(unsigned int upper, unsigned int lower) static inline void set_tb(unsigned int upper, unsigned int lower)
{ {
mttbl(0); mttbl(0);
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/memory.h> #include <asm/synch.h>
/* /*
* Memory barrier. * Memory barrier.
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define smp_mb() mb() #define smp_mb() mb()
#define smp_rmb() rmb() #define smp_rmb() rmb()
#define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory") #define smp_wmb() eieio()
#define smp_read_barrier_depends() read_barrier_depends() #define smp_read_barrier_depends() read_barrier_depends()
#else #else
#define smp_mb() __asm__ __volatile__("": : :"memory") #define smp_mb() __asm__ __volatile__("": : :"memory")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册