提交 2aad2a86 编写于 作者: T Tejun Heo

percpu_ref: add PERCPU_REF_INIT_* flags

With the recent addition of percpu_ref_reinit(), percpu_ref now can be
used as a persistent switch which can be turned on and off repeatedly
where turning off maps to killing the ref and waiting for it to drain;
however, there currently isn't a way to initialize a percpu_ref in its
off (killed and drained) state, which can be inconvenient for certain
persistent switch use cases.

Similarly, percpu_ref_switch_to_atomic/percpu() allow dynamic
selection of operation mode; however, currently a newly initialized
percpu_ref is always in percpu mode making it impossible to avoid the
latency overhead of switching to atomic mode.

This patch adds @flags to percpu_ref_init() and implements the
following flags.

* PERCPU_REF_INIT_ATOMIC	: start ref in atomic mode
* PERCPU_REF_INIT_DEAD		: start ref killed and drained

These flags should be able to serve the above two use cases.

v2: target_core_tpg.c conversion was missing.  Fixed.
Signed-off-by: NTejun Heo <tj@kernel.org>
Reviewed-by: NKent Overstreet <kmo@daterainc.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
上级 f47ad457
...@@ -1796,7 +1796,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) ...@@ -1796,7 +1796,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
goto err_hctxs; goto err_hctxs;
if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release, if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
GFP_KERNEL)) 0, GFP_KERNEL))
goto err_map; goto err_map;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
......
...@@ -819,7 +819,7 @@ int core_tpg_add_lun( ...@@ -819,7 +819,7 @@ int core_tpg_add_lun(
{ {
int ret; int ret;
ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
GFP_KERNEL); GFP_KERNEL);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -661,10 +661,10 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -661,10 +661,10 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
INIT_LIST_HEAD(&ctx->active_reqs); INIT_LIST_HEAD(&ctx->active_reqs);
if (percpu_ref_init(&ctx->users, free_ioctx_users, GFP_KERNEL)) if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
goto err; goto err;
if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, GFP_KERNEL)) if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
goto err; goto err;
ctx->cpu = alloc_percpu(struct kioctx_cpu); ctx->cpu = alloc_percpu(struct kioctx_cpu);
......
...@@ -63,6 +63,21 @@ enum { ...@@ -63,6 +63,21 @@ enum {
__PERCPU_REF_FLAG_BITS = 2, __PERCPU_REF_FLAG_BITS = 2,
}; };
/* @flags for percpu_ref_init() */
enum {
/*
* Start w/ ref == 1 in atomic mode. Can be switched to percpu
* operation using percpu_ref_switch_to_percpu().
*/
PERCPU_REF_INIT_ATOMIC = 1 << 0,
/*
* Start dead w/ ref == 0 in atomic mode. Must be revived with
* percpu_ref_reinit() before used. Implies INIT_ATOMIC.
*/
PERCPU_REF_INIT_DEAD = 1 << 1,
};
struct percpu_ref { struct percpu_ref {
atomic_long_t count; atomic_long_t count;
/* /*
...@@ -76,7 +91,8 @@ struct percpu_ref { ...@@ -76,7 +91,8 @@ struct percpu_ref {
}; };
int __must_check percpu_ref_init(struct percpu_ref *ref, int __must_check percpu_ref_init(struct percpu_ref *ref,
percpu_ref_func_t *release, gfp_t gfp); percpu_ref_func_t *release, unsigned int flags,
gfp_t gfp);
void percpu_ref_exit(struct percpu_ref *ref); void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_switch_to_atomic(struct percpu_ref *ref, void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch); percpu_ref_func_t *confirm_switch);
......
...@@ -1634,7 +1634,8 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask) ...@@ -1634,7 +1634,8 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
goto out; goto out;
root_cgrp->id = ret; root_cgrp->id = ret;
ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, GFP_KERNEL); ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
GFP_KERNEL);
if (ret) if (ret)
goto out; goto out;
...@@ -4510,7 +4511,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss, ...@@ -4510,7 +4511,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
init_and_link_css(css, ss, cgrp); init_and_link_css(css, ss, cgrp);
err = percpu_ref_init(&css->refcnt, css_release, GFP_KERNEL); err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
if (err) if (err)
goto err_free_css; goto err_free_css;
...@@ -4583,7 +4584,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, ...@@ -4583,7 +4584,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
goto out_unlock; goto out_unlock;
} }
ret = percpu_ref_init(&cgrp->self.refcnt, css_release, GFP_KERNEL); ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
if (ret) if (ret)
goto out_free_cgrp; goto out_free_cgrp;
......
...@@ -45,27 +45,40 @@ static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) ...@@ -45,27 +45,40 @@ static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
* percpu_ref_init - initialize a percpu refcount * percpu_ref_init - initialize a percpu refcount
* @ref: percpu_ref to initialize * @ref: percpu_ref to initialize
* @release: function which will be called when refcount hits 0 * @release: function which will be called when refcount hits 0
* @flags: PERCPU_REF_INIT_* flags
* @gfp: allocation mask to use * @gfp: allocation mask to use
* *
* Initializes the refcount in single atomic counter mode with a refcount of 1; * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
* analagous to atomic_long_set(ref, 1). * refcount of 1; analagous to atomic_long_set(ref, 1). See the
* definitions of PERCPU_REF_INIT_* flags for flag behaviors.
* *
* Note that @release must not sleep - it may potentially be called from RCU * Note that @release must not sleep - it may potentially be called from RCU
* callback context by percpu_ref_kill(). * callback context by percpu_ref_kill().
*/ */
int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
gfp_t gfp) unsigned int flags, gfp_t gfp)
{ {
size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
__alignof__(unsigned long)); __alignof__(unsigned long));
unsigned long start_count = 0;
atomic_long_set(&ref->count, 1 + PERCPU_COUNT_BIAS);
ref->percpu_count_ptr = (unsigned long) ref->percpu_count_ptr = (unsigned long)
__alloc_percpu_gfp(sizeof(unsigned long), align, gfp); __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
if (!ref->percpu_count_ptr) if (!ref->percpu_count_ptr)
return -ENOMEM; return -ENOMEM;
if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
else
start_count += PERCPU_COUNT_BIAS;
if (flags & PERCPU_REF_INIT_DEAD)
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
else
start_count++;
atomic_long_set(&ref->count, start_count);
ref->release = release; ref->release = release;
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册