提交 9a1049da 编写于 作者: T Tejun Heo

percpu-refcount: require percpu_ref to be exited explicitly

Currently, a percpu_ref undoes percpu_ref_init() automatically by
freeing the allocated percpu area when the percpu_ref is killed.
While seemingly convenient, this has the following niggles.

* It's impossible to re-init a released reference counter without
  going through re-allocation.

* In the similar vein, it's impossible to initialize a percpu_ref
  count with static percpu variables.

* We need and have an explicit destructor anyway for failure paths -
  percpu_ref_cancel_init().

This patch removes the automatic percpu counter freeing in
percpu_ref_kill_rcu() and repurposes percpu_ref_cancel_init() into a
generic destructor now named percpu_ref_exit().  percpu_ref_destroy()
is considered but it gets confusing with percpu_ref_kill() while
"exit" clearly indicates that it's the counterpart of
percpu_ref_init().

All percpu_ref_cancel_init() users are updated to invoke
percpu_ref_exit() instead and explicit percpu_ref_exit() calls are
added to the destruction path of all percpu_ref users.
Signed-off-by: NTejun Heo <tj@kernel.org>
Acked-by: NBenjamin LaHaise <bcrl@kvack.org>
Cc: Kent Overstreet <kmo@daterainc.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Cc: Nicholas A. Bellinger <nab@linux-iscsi.org>
Cc: Li Zefan <lizefan@huawei.com>
上级 7d742075
...@@ -825,7 +825,7 @@ int core_tpg_add_lun( ...@@ -825,7 +825,7 @@ int core_tpg_add_lun(
ret = core_dev_export(dev, tpg, lun); ret = core_dev_export(dev, tpg, lun);
if (ret < 0) { if (ret < 0) {
percpu_ref_cancel_init(&lun->lun_ref); percpu_ref_exit(&lun->lun_ref);
return ret; return ret;
} }
...@@ -880,5 +880,7 @@ int core_tpg_post_dellun( ...@@ -880,5 +880,7 @@ int core_tpg_post_dellun(
lun->lun_status = TRANSPORT_LUN_STATUS_FREE; lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
spin_unlock(&tpg->tpg_lun_lock); spin_unlock(&tpg->tpg_lun_lock);
percpu_ref_exit(&lun->lun_ref);
return 0; return 0;
} }
...@@ -506,6 +506,8 @@ static void free_ioctx(struct work_struct *work) ...@@ -506,6 +506,8 @@ static void free_ioctx(struct work_struct *work)
aio_free_ring(ctx); aio_free_ring(ctx);
free_percpu(ctx->cpu); free_percpu(ctx->cpu);
percpu_ref_exit(&ctx->reqs);
percpu_ref_exit(&ctx->users);
kmem_cache_free(kioctx_cachep, ctx); kmem_cache_free(kioctx_cachep, ctx);
} }
...@@ -715,8 +717,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -715,8 +717,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
err: err:
mutex_unlock(&ctx->ring_lock); mutex_unlock(&ctx->ring_lock);
free_percpu(ctx->cpu); free_percpu(ctx->cpu);
percpu_ref_cancel_init(&ctx->reqs); percpu_ref_exit(&ctx->reqs);
percpu_ref_cancel_init(&ctx->users); percpu_ref_exit(&ctx->users);
kmem_cache_free(kioctx_cachep, ctx); kmem_cache_free(kioctx_cachep, ctx);
pr_debug("error allocating ioctx %d\n", err); pr_debug("error allocating ioctx %d\n", err);
return ERR_PTR(err); return ERR_PTR(err);
......
...@@ -57,9 +57,7 @@ struct percpu_ref { ...@@ -57,9 +57,7 @@ struct percpu_ref {
atomic_t count; atomic_t count;
/* /*
* The low bit of the pointer indicates whether the ref is in percpu * The low bit of the pointer indicates whether the ref is in percpu
* mode; if set, then get/put will manipulate the atomic_t (this is a * mode; if set, then get/put will manipulate the atomic_t.
* hack because we need to keep the pointer around for
* percpu_ref_kill_rcu())
*/ */
unsigned long pcpu_count_ptr; unsigned long pcpu_count_ptr;
percpu_ref_func_t *release; percpu_ref_func_t *release;
...@@ -69,7 +67,7 @@ struct percpu_ref { ...@@ -69,7 +67,7 @@ struct percpu_ref {
int __must_check percpu_ref_init(struct percpu_ref *ref, int __must_check percpu_ref_init(struct percpu_ref *ref,
percpu_ref_func_t *release); percpu_ref_func_t *release);
void percpu_ref_cancel_init(struct percpu_ref *ref); void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref, void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill); percpu_ref_func_t *confirm_kill);
......
...@@ -1638,7 +1638,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask) ...@@ -1638,7 +1638,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
exit_root_id: exit_root_id:
cgroup_exit_root_id(root); cgroup_exit_root_id(root);
cancel_ref: cancel_ref:
percpu_ref_cancel_init(&root_cgrp->self.refcnt); percpu_ref_exit(&root_cgrp->self.refcnt);
out: out:
free_cgrp_cset_links(&tmp_links); free_cgrp_cset_links(&tmp_links);
return ret; return ret;
...@@ -4133,6 +4133,8 @@ static void css_free_work_fn(struct work_struct *work) ...@@ -4133,6 +4133,8 @@ static void css_free_work_fn(struct work_struct *work)
container_of(work, struct cgroup_subsys_state, destroy_work); container_of(work, struct cgroup_subsys_state, destroy_work);
struct cgroup *cgrp = css->cgroup; struct cgroup *cgrp = css->cgroup;
percpu_ref_exit(&css->refcnt);
if (css->ss) { if (css->ss) {
/* css free path */ /* css free path */
if (css->parent) if (css->parent)
...@@ -4330,7 +4332,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss) ...@@ -4330,7 +4332,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
err_free_id: err_free_id:
cgroup_idr_remove(&ss->css_idr, css->id); cgroup_idr_remove(&ss->css_idr, css->id);
err_free_percpu_ref: err_free_percpu_ref:
percpu_ref_cancel_init(&css->refcnt); percpu_ref_exit(&css->refcnt);
err_free_css: err_free_css:
call_rcu(&css->rcu_head, css_free_rcu_fn); call_rcu(&css->rcu_head, css_free_rcu_fn);
return err; return err;
...@@ -4441,7 +4443,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, ...@@ -4441,7 +4443,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
out_free_id: out_free_id:
cgroup_idr_remove(&root->cgroup_idr, cgrp->id); cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
out_cancel_ref: out_cancel_ref:
percpu_ref_cancel_init(&cgrp->self.refcnt); percpu_ref_exit(&cgrp->self.refcnt);
out_free_cgrp: out_free_cgrp:
kfree(cgrp); kfree(cgrp);
out_unlock: out_unlock:
......
...@@ -61,36 +61,25 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) ...@@ -61,36 +61,25 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
EXPORT_SYMBOL_GPL(percpu_ref_init); EXPORT_SYMBOL_GPL(percpu_ref_init);
/** /**
* percpu_ref_cancel_init - cancel percpu_ref_init() * percpu_ref_exit - undo percpu_ref_init()
* @ref: percpu_ref to cancel init for * @ref: percpu_ref to exit
* *
* Once a percpu_ref is initialized, its destruction is initiated by * This function exits @ref. The caller is responsible for ensuring that
* percpu_ref_kill() and completes asynchronously, which can be painful to * @ref is no longer in active use. The usual places to invoke this
* do when destroying a half-constructed object in init failure path. * function from are the @ref->release() callback or in init failure path
* * where percpu_ref_init() succeeded but other parts of the initialization
* This function destroys @ref without invoking @ref->release and the * of the embedding object failed.
* memory area containing it can be freed immediately on return. To
* prevent accidental misuse, it's required that @ref has finished
* percpu_ref_init(), whether successful or not, but never used.
*
* The weird name and usage restriction are to prevent people from using
* this function by mistake for normal shutdown instead of
* percpu_ref_kill().
*/ */
void percpu_ref_cancel_init(struct percpu_ref *ref) void percpu_ref_exit(struct percpu_ref *ref)
{ {
unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
int cpu;
WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);
if (pcpu_count) { if (pcpu_count) {
for_each_possible_cpu(cpu)
WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
free_percpu(pcpu_count); free_percpu(pcpu_count);
ref->pcpu_count_ptr = PCPU_REF_DEAD;
} }
} }
EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); EXPORT_SYMBOL_GPL(percpu_ref_exit);
static void percpu_ref_kill_rcu(struct rcu_head *rcu) static void percpu_ref_kill_rcu(struct rcu_head *rcu)
{ {
...@@ -102,8 +91,6 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) ...@@ -102,8 +91,6 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
count += *per_cpu_ptr(pcpu_count, cpu); count += *per_cpu_ptr(pcpu_count, cpu);
free_percpu(pcpu_count);
pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册