提交 4f4b1b64 编写于 作者: G Greg Kroah-Hartman

Revert "kernfs: restructure removal path to fix possible premature return"

This reverts commit 45a140e5.

Tejun writes:
        I'm sorry but can you please revert the whole series?
        get_active() waiting while a node is deactivated has potential
        to lead to deadlock and that deactivate/reactivate interface is
        something fundamentally flawed and that cgroup will have to work
        with the remove_self() like everybody else.  IOW, I think the
        first posting was correct.

Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 55f6e30d
...@@ -181,38 +181,14 @@ void kernfs_put_active(struct kernfs_node *kn) ...@@ -181,38 +181,14 @@ void kernfs_put_active(struct kernfs_node *kn)
* kernfs_drain - drain kernfs_node * kernfs_drain - drain kernfs_node
* @kn: kernfs_node to drain * @kn: kernfs_node to drain
* *
* Drain existing usages of @kn. Mutiple removers may invoke this function * Drain existing usages.
* concurrently on @kn and all will return after draining is complete.
* Returns %true if drain is performed and kernfs_mutex was temporarily
* released. %false if @kn was already drained and no operation was
* necessary.
*
* The caller is responsible for ensuring @kn stays pinned while this
* function is in progress even if it gets removed by someone else.
*/ */
static bool kernfs_drain(struct kernfs_node *kn) static void kernfs_drain(struct kernfs_node *kn)
__releases(&kernfs_mutex) __acquires(&kernfs_mutex)
{ {
struct kernfs_root *root = kernfs_root(kn); struct kernfs_root *root = kernfs_root(kn);
lockdep_assert_held(&kernfs_mutex);
WARN_ON_ONCE(atomic_read(&kn->active) >= 0); WARN_ON_ONCE(atomic_read(&kn->active) >= 0);
/*
* We want to go through the active ref lockdep annotation at least
* once for all node removals, but the lockdep annotation can't be
* nested inside kernfs_mutex and deactivation can't make forward
* progress if we keep dropping the mutex. Use JUST_ACTIVATED to
* force the slow path once for each deactivation if lockdep is
* enabled.
*/
if ((!kernfs_lockdep(kn) || !(kn->flags & KERNFS_JUST_DEACTIVATED)) &&
atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
return false;
kn->flags &= ~KERNFS_JUST_DEACTIVATED;
mutex_unlock(&kernfs_mutex);
if (kernfs_lockdep(kn)) { if (kernfs_lockdep(kn)) {
rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
...@@ -226,9 +202,6 @@ static bool kernfs_drain(struct kernfs_node *kn) ...@@ -226,9 +202,6 @@ static bool kernfs_drain(struct kernfs_node *kn)
lock_acquired(&kn->dep_map, _RET_IP_); lock_acquired(&kn->dep_map, _RET_IP_);
rwsem_release(&kn->dep_map, 1, _RET_IP_); rwsem_release(&kn->dep_map, 1, _RET_IP_);
} }
mutex_lock(&kernfs_mutex);
return true;
} }
/** /**
...@@ -473,6 +446,49 @@ int kernfs_add_one(struct kernfs_addrm_cxt *acxt, struct kernfs_node *kn, ...@@ -473,6 +446,49 @@ int kernfs_add_one(struct kernfs_addrm_cxt *acxt, struct kernfs_node *kn,
return 0; return 0;
} }
/**
* kernfs_remove_one - remove kernfs_node from parent
* @acxt: addrm context to use
* @kn: kernfs_node to be removed
*
* Mark @kn removed and drop nlink of parent inode if @kn is a
* directory. @kn is unlinked from the children list.
*
* This function should be called between calls to
* kernfs_addrm_start() and kernfs_addrm_finish() and should be
* passed the same @acxt as passed to kernfs_addrm_start().
*
* LOCKING:
* Determined by kernfs_addrm_start().
*/
static void kernfs_remove_one(struct kernfs_addrm_cxt *acxt,
struct kernfs_node *kn)
{
struct kernfs_iattrs *ps_iattr;
/*
* Removal can be called multiple times on the same node. Only the
* first invocation is effective and puts the base ref.
*/
if (atomic_read(&kn->active) < 0)
return;
if (kn->parent) {
kernfs_unlink_sibling(kn);
/* Update timestamps on the parent */
ps_iattr = kn->parent->iattr;
if (ps_iattr) {
ps_iattr->ia_iattr.ia_ctime = CURRENT_TIME;
ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME;
}
}
atomic_add(KN_DEACTIVATED_BIAS, &kn->active);
kn->u.removed_list = acxt->removed;
acxt->removed = kn;
}
/** /**
* kernfs_addrm_finish - finish up kernfs_node add/remove * kernfs_addrm_finish - finish up kernfs_node add/remove
* @acxt: addrm context to finish up * @acxt: addrm context to finish up
...@@ -496,6 +512,7 @@ void kernfs_addrm_finish(struct kernfs_addrm_cxt *acxt) ...@@ -496,6 +512,7 @@ void kernfs_addrm_finish(struct kernfs_addrm_cxt *acxt)
acxt->removed = kn->u.removed_list; acxt->removed = kn->u.removed_list;
kernfs_drain(kn);
kernfs_unmap_bin_file(kn); kernfs_unmap_bin_file(kn);
kernfs_put(kn); kernfs_put(kn);
} }
...@@ -805,73 +822,23 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, ...@@ -805,73 +822,23 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
return pos->parent; return pos->parent;
} }
static void __kernfs_deactivate(struct kernfs_node *kn)
{
struct kernfs_node *pos;
lockdep_assert_held(&kernfs_mutex);
/* prevent any new usage under @kn by deactivating all nodes */
pos = NULL;
while ((pos = kernfs_next_descendant_post(pos, kn))) {
if (atomic_read(&pos->active) >= 0) {
atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
pos->flags |= KERNFS_JUST_DEACTIVATED;
}
}
/*
* Drain the subtree. If kernfs_drain() blocked to drain, which is
* indicated by %true return, it temporarily released kernfs_mutex
* and the rbtree might have been modified inbetween breaking our
* future walk. Restart the walk after each %true return.
*/
pos = NULL;
while ((pos = kernfs_next_descendant_post(pos, kn))) {
bool drained;
kernfs_get(pos);
drained = kernfs_drain(pos);
kernfs_put(pos);
if (drained)
pos = NULL;
}
}
static void __kernfs_remove(struct kernfs_addrm_cxt *acxt, static void __kernfs_remove(struct kernfs_addrm_cxt *acxt,
struct kernfs_node *kn) struct kernfs_node *kn)
{ {
struct kernfs_node *pos; struct kernfs_node *pos, *next;
lockdep_assert_held(&kernfs_mutex);
if (!kn) if (!kn)
return; return;
pr_debug("kernfs %s: removing\n", kn->name); pr_debug("kernfs %s: removing\n", kn->name);
__kernfs_deactivate(kn); next = NULL;
/* unlink the subtree node-by-node */
do { do {
struct kernfs_iattrs *ps_iattr; pos = next;
next = kernfs_next_descendant_post(pos, kn);
pos = kernfs_leftmost_descendant(kn); if (pos)
kernfs_remove_one(acxt, pos);
if (pos->parent) { } while (next);
kernfs_unlink_sibling(pos);
/* update timestamps on the parent */
ps_iattr = pos->parent->iattr;
if (ps_iattr) {
ps_iattr->ia_iattr.ia_ctime = CURRENT_TIME;
ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME;
}
}
pos->u.removed_list = acxt->removed;
acxt->removed = pos;
} while (pos != kn);
} }
/** /**
......
...@@ -37,7 +37,6 @@ enum kernfs_node_type { ...@@ -37,7 +37,6 @@ enum kernfs_node_type {
#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK #define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
enum kernfs_node_flag { enum kernfs_node_flag {
KERNFS_JUST_DEACTIVATED = 0x0010, /* used to aid lockdep annotation */
KERNFS_NS = 0x0020, KERNFS_NS = 0x0020,
KERNFS_HAS_SEQ_SHOW = 0x0040, KERNFS_HAS_SEQ_SHOW = 0x0040,
KERNFS_HAS_MMAP = 0x0080, KERNFS_HAS_MMAP = 0x0080,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册