提交 b647c35f 编写于 作者: J Jeff Layton 提交者: Steve French

cifs: convert tlink_tree to a rbtree

Radix trees are ideal when you want to track a bunch of pointers and
can't embed a tracking structure within the target of those pointers.
The tradeoff is an increase in memory, particularly if the tree is
sparse.

In CIFS, we use the tlink_tree to track tcon_link structs. A tcon_link
can never be in more than one tlink_tree, so there's no impediment to
using a rb_tree here instead of a radix tree.

Convert the new multiuser mount code to use a rb_tree instead. This
should reduce the memory required to manage the tlink_tree.
Signed-off-by: NJeff Layton <jlayton@redhat.com>
Signed-off-by: NSteve French <sfrench@us.ibm.com>
上级 413e661c
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* the GNU Lesser General Public License for more details. * the GNU Lesser General Public License for more details.
* *
*/ */
#include <linux/radix-tree.h> #include <linux/rbtree.h>
#ifndef _CIFS_FS_SB_H #ifndef _CIFS_FS_SB_H
#define _CIFS_FS_SB_H #define _CIFS_FS_SB_H
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ #define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
struct cifs_sb_info { struct cifs_sb_info {
struct radix_tree_root tlink_tree; struct rb_root tlink_tree;
spinlock_t tlink_tree_lock; spinlock_t tlink_tree_lock;
struct tcon_link *master_tlink; struct tcon_link *master_tlink;
struct nls_table *local_nls; struct nls_table *local_nls;
......
...@@ -116,7 +116,7 @@ cifs_read_super(struct super_block *sb, void *data, ...@@ -116,7 +116,7 @@ cifs_read_super(struct super_block *sb, void *data,
return -ENOMEM; return -ENOMEM;
spin_lock_init(&cifs_sb->tlink_tree_lock); spin_lock_init(&cifs_sb->tlink_tree_lock);
INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL); cifs_sb->tlink_tree = RB_ROOT;
rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
if (rc) { if (rc) {
......
...@@ -336,7 +336,8 @@ struct cifsTconInfo { ...@@ -336,7 +336,8 @@ struct cifsTconInfo {
* "get" on the container. * "get" on the container.
*/ */
struct tcon_link { struct tcon_link {
unsigned long tl_index; struct rb_node tl_rbnode;
uid_t tl_uid;
unsigned long tl_flags; unsigned long tl_flags;
#define TCON_LINK_MASTER 0 #define TCON_LINK_MASTER 0
#define TCON_LINK_PENDING 1 #define TCON_LINK_PENDING 1
......
...@@ -116,6 +116,7 @@ struct smb_vol { ...@@ -116,6 +116,7 @@ struct smb_vol {
static int ipv4_connect(struct TCP_Server_Info *server); static int ipv4_connect(struct TCP_Server_Info *server);
static int ipv6_connect(struct TCP_Server_Info *server); static int ipv6_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
static void cifs_prune_tlinks(struct work_struct *work); static void cifs_prune_tlinks(struct work_struct *work);
/* /*
...@@ -2900,24 +2901,16 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, ...@@ -2900,24 +2901,16 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
goto mount_fail_check; goto mount_fail_check;
} }
tlink->tl_index = pSesInfo->linux_uid; tlink->tl_uid = pSesInfo->linux_uid;
tlink->tl_tcon = tcon; tlink->tl_tcon = tcon;
tlink->tl_time = jiffies; tlink->tl_time = jiffies;
set_bit(TCON_LINK_MASTER, &tlink->tl_flags); set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
rc = radix_tree_preload(GFP_KERNEL); cifs_sb->master_tlink = tlink;
if (rc == -ENOMEM) {
kfree(tlink);
goto mount_fail_check;
}
spin_lock(&cifs_sb->tlink_tree_lock); spin_lock(&cifs_sb->tlink_tree_lock);
radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink); tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock); spin_unlock(&cifs_sb->tlink_tree_lock);
radix_tree_preload_end();
cifs_sb->master_tlink = tlink;
queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE); TLINK_IDLE_EXPIRE);
...@@ -3107,33 +3100,26 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, ...@@ -3107,33 +3100,26 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
int int
cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
{ {
int i, ret; struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node;
struct tcon_link *tlink;
char *tmp; char *tmp;
struct tcon_link *tlink[8];
unsigned long index = 0;
cancel_delayed_work_sync(&cifs_sb->prune_tlinks); cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
do {
spin_lock(&cifs_sb->tlink_tree_lock); spin_lock(&cifs_sb->tlink_tree_lock);
ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree, while ((node = rb_first(root))) {
(void **)tlink, index, tlink = rb_entry(node, struct tcon_link, tl_rbnode);
ARRAY_SIZE(tlink)); cifs_get_tlink(tlink);
/* increment index for next pass */ clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
if (ret > 0) rb_erase(node, root);
index = tlink[ret - 1]->tl_index + 1;
for (i = 0; i < ret; i++) { spin_unlock(&cifs_sb->tlink_tree_lock);
cifs_get_tlink(tlink[i]); cifs_put_tlink(tlink);
clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags); spin_lock(&cifs_sb->tlink_tree_lock);
radix_tree_delete(&cifs_sb->tlink_tree,
tlink[i]->tl_index);
} }
spin_unlock(&cifs_sb->tlink_tree_lock); spin_unlock(&cifs_sb->tlink_tree_lock);
for (i = 0; i < ret; i++)
cifs_put_tlink(tlink[i]);
} while (ret != 0);
tmp = cifs_sb->prepath; tmp = cifs_sb->prepath;
cifs_sb->prepathlen = 0; cifs_sb->prepathlen = 0;
cifs_sb->prepath = NULL; cifs_sb->prepath = NULL;
...@@ -3290,6 +3276,47 @@ cifs_sb_tcon_pending_wait(void *unused) ...@@ -3290,6 +3276,47 @@ cifs_sb_tcon_pending_wait(void *unused)
return signal_pending(current) ? -ERESTARTSYS : 0; return signal_pending(current) ? -ERESTARTSYS : 0;
} }
/* find and return a tlink with given uid */
static struct tcon_link *
tlink_rb_search(struct rb_root *root, uid_t uid)
{
struct rb_node *node = root->rb_node;
struct tcon_link *tlink;
while (node) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
if (tlink->tl_uid > uid)
node = node->rb_left;
else if (tlink->tl_uid < uid)
node = node->rb_right;
else
return tlink;
}
return NULL;
}
/* insert a tcon_link into the tree */
static void
tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct tcon_link *tlink;
while (*new) {
tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
parent = *new;
if (tlink->tl_uid > new_tlink->tl_uid)
new = &((*new)->rb_left);
else
new = &((*new)->rb_right);
}
rb_link_node(&new_tlink->tl_rbnode, parent, new);
rb_insert_color(&new_tlink->tl_rbnode, root);
}
/* /*
* Find or construct an appropriate tcon given a cifs_sb and the fsuid of the * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
* current task. * current task.
...@@ -3310,14 +3337,14 @@ struct tcon_link * ...@@ -3310,14 +3337,14 @@ struct tcon_link *
cifs_sb_tlink(struct cifs_sb_info *cifs_sb) cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
{ {
int ret; int ret;
unsigned long fsuid = (unsigned long) current_fsuid(); uid_t fsuid = current_fsuid();
struct tcon_link *tlink, *newtlink; struct tcon_link *tlink, *newtlink;
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
spin_lock(&cifs_sb->tlink_tree_lock); spin_lock(&cifs_sb->tlink_tree_lock);
tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid); tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
if (tlink) if (tlink)
cifs_get_tlink(tlink); cifs_get_tlink(tlink);
spin_unlock(&cifs_sb->tlink_tree_lock); spin_unlock(&cifs_sb->tlink_tree_lock);
...@@ -3326,36 +3353,24 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb) ...@@ -3326,36 +3353,24 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
if (newtlink == NULL) if (newtlink == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
newtlink->tl_index = fsuid; newtlink->tl_uid = fsuid;
newtlink->tl_tcon = ERR_PTR(-EACCES); newtlink->tl_tcon = ERR_PTR(-EACCES);
set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
cifs_get_tlink(newtlink); cifs_get_tlink(newtlink);
ret = radix_tree_preload(GFP_KERNEL);
if (ret != 0) {
kfree(newtlink);
return ERR_PTR(ret);
}
spin_lock(&cifs_sb->tlink_tree_lock); spin_lock(&cifs_sb->tlink_tree_lock);
/* was one inserted after previous search? */ /* was one inserted after previous search? */
tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid); tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
if (tlink) { if (tlink) {
cifs_get_tlink(tlink); cifs_get_tlink(tlink);
spin_unlock(&cifs_sb->tlink_tree_lock); spin_unlock(&cifs_sb->tlink_tree_lock);
radix_tree_preload_end();
kfree(newtlink); kfree(newtlink);
goto wait_for_construction; goto wait_for_construction;
} }
ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
radix_tree_preload_end();
if (ret) {
kfree(newtlink);
return ERR_PTR(ret);
}
tlink = newtlink; tlink = newtlink;
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
} else { } else {
wait_for_construction: wait_for_construction:
ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
...@@ -3401,39 +3416,39 @@ cifs_prune_tlinks(struct work_struct *work) ...@@ -3401,39 +3416,39 @@ cifs_prune_tlinks(struct work_struct *work)
{ {
struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
prune_tlinks.work); prune_tlinks.work);
struct tcon_link *tlink[8]; struct rb_root *root = &cifs_sb->tlink_tree;
unsigned long now = jiffies; struct rb_node *node = rb_first(root);
unsigned long index = 0; struct rb_node *tmp;
int i, ret; struct tcon_link *tlink;
do { /*
* Because we drop the spinlock in the loop in order to put the tlink
* it's not guarded against removal of links from the tree. The only
* places that remove entries from the tree are this function and
* umounts. Because this function is non-reentrant and is canceled
* before umount can proceed, this is safe.
*/
spin_lock(&cifs_sb->tlink_tree_lock); spin_lock(&cifs_sb->tlink_tree_lock);
ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree, node = rb_first(root);
(void **)tlink, index, while (node != NULL) {
ARRAY_SIZE(tlink)); tmp = node;
/* increment index for next pass */ node = rb_next(tmp);
if (ret > 0) tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
index = tlink[ret - 1]->tl_index + 1;
for (i = 0; i < ret; i++) { if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) || atomic_read(&tlink->tl_count) != 0 ||
atomic_read(&tlink[i]->tl_count) != 0 || time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE,
now)) {
tlink[i] = NULL;
continue; continue;
}
cifs_get_tlink(tlink[i]);
clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
radix_tree_delete(&cifs_sb->tlink_tree,
tlink[i]->tl_index);
}
spin_unlock(&cifs_sb->tlink_tree_lock);
for (i = 0; i < ret; i++) { cifs_get_tlink(tlink);
if (tlink[i] != NULL) clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
cifs_put_tlink(tlink[i]); rb_erase(tmp, root);
spin_unlock(&cifs_sb->tlink_tree_lock);
cifs_put_tlink(tlink);
spin_lock(&cifs_sb->tlink_tree_lock);
} }
} while (ret != 0); spin_unlock(&cifs_sb->tlink_tree_lock);
queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE); TLINK_IDLE_EXPIRE);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册