Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
ae0b5c2f
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
161
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
ae0b5c2f
编写于
6月 08, 2016
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'locking/urgent' into locking/core, to pick up dependency
Signed-off-by:
N
Ingo Molnar
<
mingo@kernel.org
>
上级
331b6d8c
2c610022
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
77 addition
and
36 deletion
+77
-36
include/asm-generic/qspinlock.h
include/asm-generic/qspinlock.h
+17
-36
kernel/locking/qspinlock.c
kernel/locking/qspinlock.c
+60
-0
未找到文件。
include/asm-generic/qspinlock.h
浏览文件 @
ae0b5c2f
...
...
@@ -21,38 +21,34 @@
#include <asm-generic/qspinlock_types.h>
/**
* queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
* @lock : Pointer to queued spinlock structure
*
* There is a very slight possibility of live-lock if the lockers keep coming
* and the waiter is just unfortunate enough to not see any unlock state.
*/
#ifndef queued_spin_unlock_wait
extern
void
queued_spin_unlock_wait
(
struct
qspinlock
*
lock
);
#endif
/**
* queued_spin_is_locked - is the spinlock locked?
* @lock: Pointer to queued spinlock structure
* Return: 1 if it is locked, 0 otherwise
*/
#ifndef queued_spin_is_locked
static
__always_inline
int
queued_spin_is_locked
(
struct
qspinlock
*
lock
)
{
/*
* queued_spin_lock_slowpath() can ACQUIRE the lock before
* issuing the unordered store that sets _Q_LOCKED_VAL.
*
* See both smp_cond_acquire() sites for more detail.
*
* This however means that in code like:
*
* spin_lock(A) spin_lock(B)
* spin_unlock_wait(B) spin_is_locked(A)
* do_something() do_something()
*
* Both CPUs can end up running do_something() because the store
* setting _Q_LOCKED_VAL will pass through the loads in
* spin_unlock_wait() and/or spin_is_locked().
* See queued_spin_unlock_wait().
*
* Avoid this by issuing a full memory barrier between the spin_lock()
* and the loads in spin_unlock_wait() and spin_is_locked().
*
* Note that regular mutual exclusion doesn't care about this
* delayed store.
* Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
* isn't immediately observable.
*/
smp_mb
();
return
atomic_read
(
&
lock
->
val
)
&
_Q_LOCKED_MASK
;
return
atomic_read
(
&
lock
->
val
);
}
#endif
/**
* queued_spin_value_unlocked - is the spinlock structure unlocked?
...
...
@@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
}
#endif
/**
* queued_spin_unlock_wait - wait until current lock holder releases the lock
* @lock : Pointer to queued spinlock structure
*
* There is a very slight possibility of live-lock if the lockers keep coming
* and the waiter is just unfortunate enough to not see any unlock state.
*/
static
inline
void
queued_spin_unlock_wait
(
struct
qspinlock
*
lock
)
{
/* See queued_spin_is_locked() */
smp_mb
();
while
(
atomic_read
(
&
lock
->
val
)
&
_Q_LOCKED_MASK
)
cpu_relax
();
}
#ifndef virt_spin_lock
static
__always_inline
bool
virt_spin_lock
(
struct
qspinlock
*
lock
)
{
...
...
kernel/locking/qspinlock.c
浏览文件 @
ae0b5c2f
...
...
@@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
#endif
/*
* queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
* issuing an _unordered_ store to set _Q_LOCKED_VAL.
*
* This means that the store can be delayed, but no later than the
* store-release from the unlock. This means that simply observing
* _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
*
* There are two paths that can issue the unordered store:
*
* (1) clear_pending_set_locked(): *,1,0 -> *,0,1
*
* (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
* atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
*
* However, in both cases we have other !0 state we've set before to queue
* ourseves:
*
* For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
* load is constrained by that ACQUIRE to not pass before that, and thus must
* observe the store.
*
* For (2) we have a more intersting scenario. We enqueue ourselves using
* xchg_tail(), which ends up being a RELEASE. This in itself is not
* sufficient, however that is followed by an smp_cond_acquire() on the same
* word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
* guarantees we must observe that store.
*
* Therefore both cases have other !0 state that is observable before the
* unordered locked byte store comes through. This means we can use that to
* wait for the lock store, and then wait for an unlock.
*/
#ifndef queued_spin_unlock_wait
void
queued_spin_unlock_wait
(
struct
qspinlock
*
lock
)
{
u32
val
;
for
(;;)
{
val
=
atomic_read
(
&
lock
->
val
);
if
(
!
val
)
/* not locked, we're done */
goto
done
;
if
(
val
&
_Q_LOCKED_MASK
)
/* locked, go wait for unlock */
break
;
/* not locked, but pending, wait until we observe the lock */
cpu_relax
();
}
/* any unlock is good */
while
(
atomic_read
(
&
lock
->
val
)
&
_Q_LOCKED_MASK
)
cpu_relax
();
done:
smp_rmb
();
/* CTRL + RMB -> ACQUIRE */
}
EXPORT_SYMBOL
(
queued_spin_unlock_wait
);
#endif
#endif
/* _GEN_PV_LOCK_SLOWPATH */
/**
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录