未验证 提交 e0569fdc 编写于 作者: O openeuler-ci-bot 提交者: Gitee

!1026 [sync] PR-947: locking/rwsem: Prevent potential lock starvation

Merge Pull Request from: @openeuler-sync-bot 
 

Origin pull request: 
https://gitee.com/openeuler/kernel/pulls/947 
 
PR sync from:  Yang Yingliang <yangyingliang@huawei.com>
 https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/thread/YNL7N26QMD5Q6UDTPNELYGIOXBXP4EWZ/ 
Prevent potential lock starvation by read lock.

Peter Zijlstra (1):
  locking/rwsem: Better collate rwsem_read_trylock()

Waiman Long (2):
  locking/rwsem: Pass the current atomic count to
    rwsem_down_read_slowpath()
  locking/rwsem: Prevent potential lock starvation


-- 
2.25.1
 
 
Link:https://gitee.com/openeuler/kernel/pulls/1026 

Reviewed-by: Xie XiuQi <xiexiuqi@huawei.com> 
Signed-off-by: Jialin Zhang <zhangjialin11@huawei.com> 
......@@ -270,12 +270,19 @@ static inline void rwsem_set_nonspinnable(struct rw_semaphore *sem)
owner | RWSEM_NONSPINNABLE));
}
static inline bool rwsem_read_trylock(struct rw_semaphore *sem)
static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
{
long cnt = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
if (WARN_ON_ONCE(cnt < 0))
*cntp = atomic_long_add_return_acquire(RWSEM_READER_BIAS, &sem->count);
if (WARN_ON_ONCE(*cntp < 0))
rwsem_set_nonspinnable(sem);
return !(cnt & RWSEM_READ_FAILED_MASK);
if (!(*cntp & RWSEM_READ_FAILED_MASK)) {
rwsem_set_reader_owned(sem);
return true;
}
return false;
}
/*
......@@ -989,18 +996,29 @@ rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
* Wait for the read lock to be granted
*/
static struct rw_semaphore __sched *
rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state)
{
long count, adjustment = -RWSEM_READER_BIAS;
long owner, adjustment = -RWSEM_READER_BIAS;
long rcnt = (count >> RWSEM_READER_SHIFT);
struct rwsem_waiter waiter;
DEFINE_WAKE_Q(wake_q);
bool wake = false;
/*
* To prevent a constant stream of readers from starving a sleeping
* waiter, don't attempt optimistic spinning if the lock is currently
* owned by readers.
*/
owner = atomic_long_read(&sem->owner);
if ((owner & RWSEM_READER_OWNED) && (rcnt > 1) &&
!(count & RWSEM_WRITER_LOCKED))
goto queue;
/*
* Save the current read-owner of rwsem, if available, and the
* reader nonspinnable bit.
*/
waiter.last_rowner = atomic_long_read(&sem->owner);
waiter.last_rowner = owner;
if (!(waiter.last_rowner & RWSEM_READER_OWNED))
waiter.last_rowner &= RWSEM_RD_NONSPINNABLE;
......@@ -1337,34 +1355,34 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (!rwsem_read_trylock(sem)) {
rwsem_down_read_slowpath(sem, TASK_UNINTERRUPTIBLE);
long count;
if (!rwsem_read_trylock(sem, &count)) {
rwsem_down_read_slowpath(sem, count, TASK_UNINTERRUPTIBLE);
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
} else {
rwsem_set_reader_owned(sem);
}
}
static inline int __down_read_interruptible(struct rw_semaphore *sem)
{
if (!rwsem_read_trylock(sem)) {
if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_INTERRUPTIBLE)))
long count;
if (!rwsem_read_trylock(sem, &count)) {
if (IS_ERR(rwsem_down_read_slowpath(sem, count, TASK_INTERRUPTIBLE)))
return -EINTR;
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
} else {
rwsem_set_reader_owned(sem);
}
return 0;
}
static inline int __down_read_killable(struct rw_semaphore *sem)
{
if (!rwsem_read_trylock(sem)) {
if (IS_ERR(rwsem_down_read_slowpath(sem, TASK_KILLABLE)))
long count;
if (!rwsem_read_trylock(sem, &count)) {
if (IS_ERR(rwsem_down_read_slowpath(sem, count, TASK_KILLABLE)))
return -EINTR;
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
} else {
rwsem_set_reader_owned(sem);
}
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册