提交 e65cfad0 编写于 作者: P Pavel Begunkov 提交者: Joseph Qi

pcpu_ref: add percpu_ref_tryget_many()

to #26323588

commit 4e5ef02317b12e2ed3d604281ffb6b75261f7612 upstream.

Add percpu_ref_tryget_many(), which works the same way as
percpu_ref_tryget(), but grabs specified number of refs.
Signed-off-by: NPavel Begunkov <asml.silence@gmail.com>
Acked-by: NTejun Heo <tj@kernel.org>
Acked-by: NDennis Zhou <dennis@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: NJens Axboe <axboe@kernel.dk>
Signed-off-by: NJoseph Qi <joseph.qi@linux.alibaba.com>
Acked-by: NXiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
上级 745a29cc
...@@ -209,15 +209,17 @@ static inline void percpu_ref_get(struct percpu_ref *ref) ...@@ -209,15 +209,17 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
} }
/** /**
* percpu_ref_tryget - try to increment a percpu refcount * percpu_ref_tryget_many - try to increment a percpu refcount
* @ref: percpu_ref to try-get * @ref: percpu_ref to try-get
* @nr: number of references to get
* *
* Increment a percpu refcount unless its count already reached zero. * Increment a percpu refcount by @nr unless its count already reached zero.
* Returns %true on success; %false on failure. * Returns %true on success; %false on failure.
* *
* This function is safe to call as long as @ref is between init and exit. * This function is safe to call as long as @ref is between init and exit.
*/ */
static inline bool percpu_ref_tryget(struct percpu_ref *ref) static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
unsigned long nr)
{ {
unsigned long __percpu *percpu_count; unsigned long __percpu *percpu_count;
bool ret; bool ret;
...@@ -225,10 +227,10 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) ...@@ -225,10 +227,10 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
rcu_read_lock_sched(); rcu_read_lock_sched();
if (__ref_is_percpu(ref, &percpu_count)) { if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count); this_cpu_add(*percpu_count, nr);
ret = true; ret = true;
} else { } else {
ret = atomic_long_inc_not_zero(&ref->count); ret = atomic_long_add_unless(&ref->count, nr, 0);
} }
rcu_read_unlock_sched(); rcu_read_unlock_sched();
...@@ -236,6 +238,20 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) ...@@ -236,6 +238,20 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
return ret; return ret;
} }
/**
* percpu_ref_tryget - try to increment a percpu refcount
* @ref: percpu_ref to try-get
*
* Increment a percpu refcount unless its count already reached zero.
* Returns %true on success; %false on failure.
*
* This function is safe to call as long as @ref is between init and exit.
*/
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
{
return percpu_ref_tryget_many(ref, 1);
}
/** /**
* percpu_ref_tryget_live - try to increment a live percpu refcount * percpu_ref_tryget_live - try to increment a live percpu refcount
* @ref: percpu_ref to try-get * @ref: percpu_ref to try-get
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册