Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
fef74705
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fef74705
编写于
10月 01, 2007
作者:
R
Ralf Baechle
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[MIPS] Type proof reimplementation of cmpxchg.
Signed-off-by:
N
Ralf Baechle
<
ralf@linux-mips.org
>
上级
f6a9e6de
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
109 addition
and
260 deletion
+109
-260
include/asm-mips/cmpxchg.h
include/asm-mips/cmpxchg.h
+107
-0
include/asm-mips/local.h
include/asm-mips/local.h
+1
-0
include/asm-mips/system.h
include/asm-mips/system.h
+1
-260
未找到文件。
include/asm-mips/cmpxchg.h
0 → 100644
浏览文件 @
fef74705
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
*/
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
#include <linux/irqflags.h>
#define __HAVE_ARCH_CMPXCHG 1
#define __cmpxchg_asm(ld, st, m, old, new) \
({ \
__typeof(*(m)) __ret; \
\
if (cpu_has_llsc && R10000_LLSC_WAR) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
" .set mips3 \n" \
" " st " $1, %1 \n" \
" beqzl $1, 1b \n" \
"2: \n" \
" .set pop \n" \
: "=&r" (__ret), "=R" (*m) \
: "R" (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
} else if (cpu_has_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
" .set mips3 \n" \
" " st " $1, %1 \n" \
" beqz $1, 3f \n" \
"2: \n" \
" .subsection 2 \n" \
"3: b 1b \n" \
" .previous \n" \
" .set pop \n" \
: "=&r" (__ret), "=R" (*m) \
: "R" (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
} else { \
unsigned long __flags; \
\
raw_local_irq_save(__flags); \
__ret = *m; \
if (__ret == old) \
*m = new; \
raw_local_irq_restore(__flags); \
} \
\
__ret; \
})
/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
*/
extern
void
__cmpxchg_called_with_bad_pointer
(
void
);
#define __cmpxchg(ptr,old,new,barrier) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __res = 0; \
\
barrier; \
\
switch (sizeof(*(__ptr))) { \
case 4: \
__res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
break; \
case 8: \
if (sizeof(long) == 8) { \
__res = __cmpxchg_asm("lld", "scd", __ptr, \
__old, __new); \
break; \
} \
default: \
__cmpxchg_called_with_bad_pointer(); \
break; \
} \
\
barrier; \
\
__res; \
})
#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_llsc_mb())
#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new,)
#endif
/* __ASM_CMPXCHG_H */
include/asm-mips/local.h
浏览文件 @
fef74705
...
...
@@ -4,6 +4,7 @@
#include <linux/percpu.h>
#include <linux/bitops.h>
#include <asm/atomic.h>
#include <asm/cmpxchg.h>
#include <asm/war.h>
typedef
struct
...
...
include/asm-mips/system.h
浏览文件 @
fef74705
...
...
@@ -17,6 +17,7 @@
#include <asm/addrspace.h>
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#include <asm/cpu-features.h>
#include <asm/dsp.h>
#include <asm/war.h>
...
...
@@ -194,266 +195,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define __HAVE_ARCH_CMPXCHG 1
static
inline
unsigned
long
__cmpxchg_u32
(
volatile
int
*
m
,
unsigned
long
old
,
unsigned
long
new
)
{
__u32
retval
;
if
(
cpu_has_llsc
&&
R10000_LLSC_WAR
)
{
__asm__
__volatile__
(
" .set push
\n
"
" .set noat
\n
"
" .set mips3
\n
"
"1: ll %0, %2 # __cmpxchg_u32
\n
"
" bne %0, %z3, 2f
\n
"
" .set mips0
\n
"
" move $1, %z4
\n
"
" .set mips3
\n
"
" sc $1, %1
\n
"
" beqzl $1, 1b
\n
"
"2:
\n
"
" .set pop
\n
"
:
"=&r"
(
retval
),
"=R"
(
*
m
)
:
"R"
(
*
m
),
"Jr"
(
old
),
"Jr"
(
new
)
:
"memory"
);
}
else
if
(
cpu_has_llsc
)
{
__asm__
__volatile__
(
" .set push
\n
"
" .set noat
\n
"
" .set mips3
\n
"
"1: ll %0, %2 # __cmpxchg_u32
\n
"
" bne %0, %z3, 2f
\n
"
" .set mips0
\n
"
" move $1, %z4
\n
"
" .set mips3
\n
"
" sc $1, %1
\n
"
" beqz $1, 3f
\n
"
"2:
\n
"
" .subsection 2
\n
"
"3: b 1b
\n
"
" .previous
\n
"
" .set pop
\n
"
:
"=&r"
(
retval
),
"=R"
(
*
m
)
:
"R"
(
*
m
),
"Jr"
(
old
),
"Jr"
(
new
)
:
"memory"
);
}
else
{
unsigned
long
flags
;
raw_local_irq_save
(
flags
);
retval
=
*
m
;
if
(
retval
==
old
)
*
m
=
new
;
raw_local_irq_restore
(
flags
);
/* implies memory barrier */
}
smp_llsc_mb
();
return
retval
;
}
static
inline
unsigned
long
__cmpxchg_u32_local
(
volatile
int
*
m
,
unsigned
long
old
,
unsigned
long
new
)
{
__u32
retval
;
if
(
cpu_has_llsc
&&
R10000_LLSC_WAR
)
{
__asm__
__volatile__
(
" .set push
\n
"
" .set noat
\n
"
" .set mips3
\n
"
"1: ll %0, %2 # __cmpxchg_u32
\n
"
" bne %0, %z3, 2f
\n
"
" .set mips0
\n
"
" move $1, %z4
\n
"
" .set mips3
\n
"
" sc $1, %1
\n
"
" beqzl $1, 1b
\n
"
"2:
\n
"
" .set pop
\n
"
:
"=&r"
(
retval
),
"=R"
(
*
m
)
:
"R"
(
*
m
),
"Jr"
(
old
),
"Jr"
(
new
)
:
"memory"
);
}
else
if
(
cpu_has_llsc
)
{
__asm__
__volatile__
(
" .set push
\n
"
" .set noat
\n
"
" .set mips3
\n
"
"1: ll %0, %2 # __cmpxchg_u32
\n
"
" bne %0, %z3, 2f
\n
"
" .set mips0
\n
"
" move $1, %z4
\n
"
" .set mips3
\n
"
" sc $1, %1
\n
"
" beqz $1, 1b
\n
"
"2:
\n
"
" .set pop
\n
"
:
"=&r"
(
retval
),
"=R"
(
*
m
)
:
"R"
(
*
m
),
"Jr"
(
old
),
"Jr"
(
new
)
:
"memory"
);
}
else
{
unsigned
long
flags
;
local_irq_save
(
flags
);
retval
=
*
m
;
if
(
retval
==
old
)
*
m
=
new
;
local_irq_restore
(
flags
);
/* implies memory barrier */
}
return
retval
;
}
#ifdef CONFIG_64BIT
static
inline
unsigned
long
__cmpxchg_u64
(
volatile
int
*
m
,
unsigned
long
old
,
unsigned
long
new
)
{
__u64
retval
;
if
(
cpu_has_llsc
&&
R10000_LLSC_WAR
)
{
__asm__
__volatile__
(
" .set push
\n
"
" .set noat
\n
"
" .set mips3
\n
"
"1: lld %0, %2 # __cmpxchg_u64
\n
"
" bne %0, %z3, 2f
\n
"
" move $1, %z4
\n
"
" scd $1, %1
\n
"
" beqzl $1, 1b
\n
"
"2:
\n
"
" .set pop
\n
"
:
"=&r"
(
retval
),
"=R"
(
*
m
)
:
"R"
(
*
m
),
"Jr"
(
old
),
"Jr"
(
new
)
:
"memory"
);
}
else
if
(
cpu_has_llsc
)
{
__asm__
__volatile__
(
" .set push
\n
"
" .set noat
\n
"
" .set mips3
\n
"
"1: lld %0, %2 # __cmpxchg_u64
\n
"
" bne %0, %z3, 2f
\n
"
" move $1, %z4
\n
"
" scd $1, %1
\n
"
" beqz $1, 3f
\n
"
"2:
\n
"
" .subsection 2
\n
"
"3: b 1b
\n
"
" .previous
\n
"
" .set pop
\n
"
:
"=&r"
(
retval
),
"=R"
(
*
m
)
:
"R"
(
*
m
),
"Jr"
(
old
),
"Jr"
(
new
)
:
"memory"
);
}
else
{
unsigned
long
flags
;
raw_local_irq_save
(
flags
);
retval
=
*
m
;
if
(
retval
==
old
)
*
m
=
new
;
raw_local_irq_restore
(
flags
);
/* implies memory barrier */
}
smp_llsc_mb
();
return
retval
;
}
static
inline
unsigned
long
__cmpxchg_u64_local
(
volatile
int
*
m
,
unsigned
long
old
,
unsigned
long
new
)
{
__u64
retval
;
if
(
cpu_has_llsc
&&
R10000_LLSC_WAR
)
{
__asm__
__volatile__
(
" .set push
\n
"
" .set noat
\n
"
" .set mips3
\n
"
"1: lld %0, %2 # __cmpxchg_u64
\n
"
" bne %0, %z3, 2f
\n
"
" move $1, %z4
\n
"
" scd $1, %1
\n
"
" beqzl $1, 1b
\n
"
"2:
\n
"
" .set pop
\n
"
:
"=&r"
(
retval
),
"=R"
(
*
m
)
:
"R"
(
*
m
),
"Jr"
(
old
),
"Jr"
(
new
)
:
"memory"
);
}
else
if
(
cpu_has_llsc
)
{
__asm__
__volatile__
(
" .set push
\n
"
" .set noat
\n
"
" .set mips3
\n
"
"1: lld %0, %2 # __cmpxchg_u64
\n
"
" bne %0, %z3, 2f
\n
"
" move $1, %z4
\n
"
" scd $1, %1
\n
"
" beqz $1, 1b
\n
"
"2:
\n
"
" .set pop
\n
"
:
"=&r"
(
retval
),
"=R"
(
*
m
)
:
"R"
(
*
m
),
"Jr"
(
old
),
"Jr"
(
new
)
:
"memory"
);
}
else
{
unsigned
long
flags
;
local_irq_save
(
flags
);
retval
=
*
m
;
if
(
retval
==
old
)
*
m
=
new
;
local_irq_restore
(
flags
);
/* implies memory barrier */
}
return
retval
;
}
#else
extern
unsigned
long
__cmpxchg_u64_unsupported_on_32bit_kernels
(
volatile
int
*
m
,
unsigned
long
old
,
unsigned
long
new
);
#define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels
extern
unsigned
long
__cmpxchg_u64_local_unsupported_on_32bit_kernels
(
volatile
int
*
m
,
unsigned
long
old
,
unsigned
long
new
);
#define __cmpxchg_u64_local __cmpxchg_u64_local_unsupported_on_32bit_kernels
#endif
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */
extern
void
__cmpxchg_called_with_bad_pointer
(
void
);
static
inline
unsigned
long
__cmpxchg
(
volatile
void
*
ptr
,
unsigned
long
old
,
unsigned
long
new
,
int
size
)
{
switch
(
size
)
{
case
4
:
return
__cmpxchg_u32
(
ptr
,
old
,
new
);
case
8
:
return
__cmpxchg_u64
(
ptr
,
old
,
new
);
}
__cmpxchg_called_with_bad_pointer
();
return
old
;
}
static
inline
unsigned
long
__cmpxchg_local
(
volatile
void
*
ptr
,
unsigned
long
old
,
unsigned
long
new
,
int
size
)
{
switch
(
size
)
{
case
4
:
return
__cmpxchg_u32_local
(
ptr
,
old
,
new
);
case
8
:
return
__cmpxchg_u64_local
(
ptr
,
old
,
new
);
}
__cmpxchg_called_with_bad_pointer
();
return
old
;
}
#define cmpxchg(ptr,old,new) \
((__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
#define cmpxchg_local(ptr,old,new) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
(unsigned long)(old), (unsigned long)(new),sizeof(*(ptr))))
extern
void
set_handler
(
unsigned
long
offset
,
void
*
addr
,
unsigned
long
len
);
extern
void
set_uncached_handler
(
unsigned
long
offset
,
void
*
addr
,
unsigned
long
len
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录