提交 8b671b80 编写于 作者: J J. Bruce Fields

nfsd4: remove use of mutex for file_hashtable

As part of reducing the scope of the client_mutex, and in order to
remove the need for mutexes from the callback code (so that callbacks
can be done as asynchronous rpc calls), move manipulations of the
file_hashtable under the recall_lock.

Update the relevant comments while we're here.
Signed-off-by: NJ. Bruce Fields <bfields@citi.umich.edu>
Cc: Alexandros Batsakis <batsakis@netapp.com>
Reviewed-by: NBenny Halevy <bhalevy@panasas.com>
上级 d7fdcfe0
......@@ -491,8 +491,6 @@ nfsd4_cb_recall(struct nfs4_delegation *dp)
* or deleg_return.
*/
put_nfs4_client(clp);
nfs4_lock_state();
nfs4_put_delegation(dp);
nfs4_unlock_state();
return;
}
......@@ -78,14 +78,18 @@ static struct nfs4_delegation * find_delegation_stateid(struct inode *ino, state
static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
static void nfs4_set_recdir(char *recdir);
/* Locking:
*
* client_mutex:
* protects clientid_hashtbl[], clientstr_hashtbl[],
* unconfstr_hashtbl[], uncofid_hashtbl[].
*/
/* Locking: */
/* Currently used for almost all code touching nfsv4 state: */
static DEFINE_MUTEX(client_mutex);
/*
* Currently used for the del_recall_lru and file hash table. In an
* effort to decrease the scope of the client_mutex, this spinlock may
* eventually cover more:
*/
static DEFINE_SPINLOCK(recall_lock);
static struct kmem_cache *stateowner_slab = NULL;
static struct kmem_cache *file_slab = NULL;
static struct kmem_cache *stateid_slab = NULL;
......@@ -116,33 +120,23 @@ opaque_hashval(const void *ptr, int nbytes)
return x;
}
/*
* Delegation state
*/
/* recall_lock protects the del_recall_lru */
static DEFINE_SPINLOCK(recall_lock);
static struct list_head del_recall_lru;
static void
free_nfs4_file(struct kref *kref)
{
struct nfs4_file *fp = container_of(kref, struct nfs4_file, fi_ref);
list_del(&fp->fi_hash);
iput(fp->fi_inode);
kmem_cache_free(file_slab, fp);
}
static inline void
put_nfs4_file(struct nfs4_file *fi)
{
kref_put(&fi->fi_ref, free_nfs4_file);
if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
list_del(&fi->fi_hash);
spin_unlock(&recall_lock);
iput(fi->fi_inode);
kmem_cache_free(file_slab, fi);
}
}
static inline void
get_nfs4_file(struct nfs4_file *fi)
{
kref_get(&fi->fi_ref);
atomic_inc(&fi->fi_ref);
}
static int num_delegations;
......@@ -1000,11 +994,13 @@ alloc_init_file(struct inode *ino)
fp = kmem_cache_alloc(file_slab, GFP_KERNEL);
if (fp) {
kref_init(&fp->fi_ref);
atomic_set(&fp->fi_ref, 1);
INIT_LIST_HEAD(&fp->fi_hash);
INIT_LIST_HEAD(&fp->fi_stateids);
INIT_LIST_HEAD(&fp->fi_delegations);
spin_lock(&recall_lock);
list_add(&fp->fi_hash, &file_hashtbl[hashval]);
spin_unlock(&recall_lock);
fp->fi_inode = igrab(ino);
fp->fi_id = current_fileid++;
fp->fi_had_conflict = false;
......@@ -1177,12 +1173,15 @@ find_file(struct inode *ino)
unsigned int hashval = file_hashval(ino);
struct nfs4_file *fp;
spin_lock(&recall_lock);
list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
if (fp->fi_inode == ino) {
get_nfs4_file(fp);
spin_unlock(&recall_lock);
return fp;
}
}
spin_unlock(&recall_lock);
return NULL;
}
......
......@@ -214,7 +214,7 @@ struct nfs4_stateowner {
* share_acces, share_deny on the file.
*/
struct nfs4_file {
struct kref fi_ref;
atomic_t fi_ref;
struct list_head fi_hash; /* hash by "struct inode *" */
struct list_head fi_stateids;
struct list_head fi_delegations;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册