Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
OpenHarmony
kernel_linux
提交
48fade6c
K
kernel_linux
项目概览
OpenHarmony
/
kernel_linux
上一次同步 4 年多
通知
15
Star
8
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
kernel_linux
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
48fade6c
编写于
10月 23, 2009
作者:
T
Tony Luck
浏览文件
操作
浏览文件
下载
差异文件
Pull ticket4byte into release branch
上级
b94b0808
1502f08e
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
42 addition
and
22 deletion
+42
-22
arch/ia64/include/asm/spinlock.h
arch/ia64/include/asm/spinlock.h
+41
-21
arch/ia64/include/asm/spinlock_types.h
arch/ia64/include/asm/spinlock_types.h
+1
-1
未找到文件。
arch/ia64/include/asm/spinlock.h
浏览文件 @
48fade6c
...
@@ -25,61 +25,82 @@
...
@@ -25,61 +25,82 @@
* by atomically noting the tail and incrementing it by one (thus adding
* by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head
* ourself to the queue and noting our position), then waiting until the head
* becomes equal to the the initial value of the tail.
* becomes equal to the the initial value of the tail.
* The pad bits in the middle are used to prevent the next_ticket number
* overflowing into the now_serving number.
*
*
*
63 32 31
0
*
31 17 16 15 14
0
* +----------------------------------------------------+
* +----------------------------------------------------+
* | n
ext_ticket_number | now_serving
|
* | n
ow_serving | padding | next_ticket
|
* +----------------------------------------------------+
* +----------------------------------------------------+
*/
*/
#define TICKET_SHIFT 32
#define TICKET_SHIFT 17
#define TICKET_BITS 15
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
static
__always_inline
void
__ticket_spin_lock
(
raw_spinlock_t
*
lock
)
static
__always_inline
void
__ticket_spin_lock
(
raw_spinlock_t
*
lock
)
{
{
int
*
p
=
(
int
*
)
&
lock
->
lock
,
t
urn
,
now_serving
;
int
*
p
=
(
int
*
)
&
lock
->
lock
,
t
icket
,
serve
;
now_serving
=
*
p
;
ticket
=
ia64_fetchadd
(
1
,
p
,
acq
);
turn
=
ia64_fetchadd
(
1
,
p
+
1
,
acq
);
if
(
turn
==
now_serving
)
if
(
!
(((
ticket
>>
TICKET_SHIFT
)
^
ticket
)
&
TICKET_MASK
)
)
return
;
return
;
do
{
ia64_invala
();
for
(;;)
{
asm
volatile
(
"ld4.c.nc %0=[%1]"
:
"=r"
(
serve
)
:
"r"
(
p
)
:
"memory"
);
if
(
!
(((
serve
>>
TICKET_SHIFT
)
^
ticket
)
&
TICKET_MASK
))
return
;
cpu_relax
();
cpu_relax
();
}
while
(
ACCESS_ONCE
(
*
p
)
!=
turn
);
}
}
}
static
__always_inline
int
__ticket_spin_trylock
(
raw_spinlock_t
*
lock
)
static
__always_inline
int
__ticket_spin_trylock
(
raw_spinlock_t
*
lock
)
{
{
long
tmp
=
ACCESS_ONCE
(
lock
->
lock
),
try
;
int
tmp
=
ACCESS_ONCE
(
lock
->
lock
)
;
if
(
!
(((
tmp
>>
TICKET_SHIFT
)
^
tmp
)
&
((
1L
<<
TICKET_SHIFT
)
-
1
)))
{
if
(
!
(((
tmp
>>
TICKET_SHIFT
)
^
tmp
)
&
TICKET_MASK
))
try
=
tmp
+
(
1L
<<
TICKET_SHIFT
);
return
ia64_cmpxchg
(
acq
,
&
lock
->
lock
,
tmp
,
tmp
+
1
,
sizeof
(
tmp
))
==
tmp
;
return
ia64_cmpxchg
(
acq
,
&
lock
->
lock
,
tmp
,
try
,
sizeof
(
tmp
))
==
tmp
;
}
return
0
;
return
0
;
}
}
static
__always_inline
void
__ticket_spin_unlock
(
raw_spinlock_t
*
lock
)
static
__always_inline
void
__ticket_spin_unlock
(
raw_spinlock_t
*
lock
)
{
{
int
*
p
=
(
int
*
)
&
lock
->
lock
;
unsigned
short
*
p
=
(
unsigned
short
*
)
&
lock
->
lock
+
1
,
tmp
;
(
void
)
ia64_fetchadd
(
1
,
p
,
rel
);
asm
volatile
(
"ld2.bias %0=[%1]"
:
"=r"
(
tmp
)
:
"r"
(
p
));
ACCESS_ONCE
(
*
p
)
=
(
tmp
+
2
)
&
~
1
;
}
static
__always_inline
void
__ticket_spin_unlock_wait
(
raw_spinlock_t
*
lock
)
{
int
*
p
=
(
int
*
)
&
lock
->
lock
,
ticket
;
ia64_invala
();
for
(;;)
{
asm
volatile
(
"ld4.c.nc %0=[%1]"
:
"=r"
(
ticket
)
:
"r"
(
p
)
:
"memory"
);
if
(
!
(((
ticket
>>
TICKET_SHIFT
)
^
ticket
)
&
TICKET_MASK
))
return
;
cpu_relax
();
}
}
}
static
inline
int
__ticket_spin_is_locked
(
raw_spinlock_t
*
lock
)
static
inline
int
__ticket_spin_is_locked
(
raw_spinlock_t
*
lock
)
{
{
long
tmp
=
ACCESS_ONCE
(
lock
->
lock
);
long
tmp
=
ACCESS_ONCE
(
lock
->
lock
);
return
!!
(((
tmp
>>
TICKET_SHIFT
)
^
tmp
)
&
((
1L
<<
TICKET_SHIFT
)
-
1
)
);
return
!!
(((
tmp
>>
TICKET_SHIFT
)
^
tmp
)
&
TICKET_MASK
);
}
}
static
inline
int
__ticket_spin_is_contended
(
raw_spinlock_t
*
lock
)
static
inline
int
__ticket_spin_is_contended
(
raw_spinlock_t
*
lock
)
{
{
long
tmp
=
ACCESS_ONCE
(
lock
->
lock
);
long
tmp
=
ACCESS_ONCE
(
lock
->
lock
);
return
((
(
tmp
>>
TICKET_SHIFT
)
-
tmp
)
&
((
1L
<<
TICKET_SHIFT
)
-
1
)
)
>
1
;
return
((
tmp
-
(
tmp
>>
TICKET_SHIFT
))
&
TICKET_MASK
)
>
1
;
}
}
static
inline
int
__raw_spin_is_locked
(
raw_spinlock_t
*
lock
)
static
inline
int
__raw_spin_is_locked
(
raw_spinlock_t
*
lock
)
...
@@ -116,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
...
@@ -116,8 +137,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
static
inline
void
__raw_spin_unlock_wait
(
raw_spinlock_t
*
lock
)
static
inline
void
__raw_spin_unlock_wait
(
raw_spinlock_t
*
lock
)
{
{
while
(
__raw_spin_is_locked
(
lock
))
__ticket_spin_unlock_wait
(
lock
);
cpu_relax
();
}
}
#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
...
...
arch/ia64/include/asm/spinlock_types.h
浏览文件 @
48fade6c
...
@@ -6,7 +6,7 @@
...
@@ -6,7 +6,7 @@
#endif
#endif
typedef
struct
{
typedef
struct
{
volatile
unsigned
long
lock
;
volatile
unsigned
int
lock
;
}
raw_spinlock_t
;
}
raw_spinlock_t
;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录