提交 b9151761 编写于 作者: L Linus Torvalds

Merge tag 'nfsd-4.15-1' of git://linux-nfs.org/~bfields/linux

Pull nfsd fixes from Bruce Fields:
 "I screwed up my merge window pull request; I only sent half of what I
  meant to.

  There were no new features, just bugfixes of various importance and
  some very minor cleanup, so I think it's all still appropriate for
  -rc2.

  Highlights:

   - Fixes from Trond for some races in the NFSv4 state code.

   - Fix from Naofumi Honda for a typo in the blocked lock notificiation
     code

   - Fixes from Vasily Averin for some problems starting and stopping
     lockd especially in network namespaces"

* tag 'nfsd-4.15-1' of git://linux-nfs.org/~bfields/linux: (23 commits)
  lockd: fix "list_add double add" caused by legacy signal interface
  nlm_shutdown_hosts_net() cleanup
  race of nfsd inetaddr notifiers vs nn->nfsd_serv change
  race of lockd inetaddr notifiers vs nlmsvc_rqst change
  SUNRPC: make cache_detail structures const
  NFSD: make cache_detail structures const
  sunrpc: make the function arg as const
  nfsd: check for use of the closed special stateid
  nfsd: fix panic in posix_unblock_lock called from nfs4_laundromat
  lockd: lost rollback of set_grace_period() in lockd_down_net()
  lockd: added cleanup checks in exit_net hook
  grace: replace BUG_ON by WARN_ONCE in exit_net hook
  nfsd: fix locking validator warning on nfs4_ol_stateid->st_mutex class
  lockd: remove net pointer from messages
  nfsd: remove net pointer from debug messages
  nfsd: Fix races with check_stateid_generation()
  nfsd: Ensure we check stateid validity in the seqid operation checks
  nfsd: Fix race in lock stateid creation
  nfsd4: move find_lock_stateid
  nfsd: Ensure we don't recognise lock stateids after freeing them
  ...
......@@ -578,8 +578,10 @@ static void nlm_complain_hosts(struct net *net)
if (ln->nrhosts == 0)
return;
printk(KERN_WARNING "lockd: couldn't shutdown host module for net %p!\n", net);
dprintk("lockd: %lu hosts left in net %p:\n", ln->nrhosts, net);
pr_warn("lockd: couldn't shutdown host module for net %x!\n",
net->ns.inum);
dprintk("lockd: %lu hosts left in net %x:\n", ln->nrhosts,
net->ns.inum);
} else {
if (nrhosts == 0)
return;
......@@ -590,9 +592,9 @@ static void nlm_complain_hosts(struct net *net)
for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
dprintk(" %s (cnt %d use %d exp %ld net %x)\n",
host->h_name, atomic_read(&host->h_count),
host->h_inuse, host->h_expires, host->net);
host->h_inuse, host->h_expires, host->net->ns.inum);
}
}
......@@ -605,7 +607,8 @@ nlm_shutdown_hosts_net(struct net *net)
mutex_lock(&nlm_host_mutex);
/* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts in net %p...\n", net);
dprintk("lockd: nuking all hosts in net %x...\n",
net ? net->ns.inum : 0);
for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
......@@ -618,9 +621,8 @@ nlm_shutdown_hosts_net(struct net *net)
/* Then, perform a garbage collection pass */
nlm_gc_hosts(net);
mutex_unlock(&nlm_host_mutex);
nlm_complain_hosts(net);
mutex_unlock(&nlm_host_mutex);
}
/*
......@@ -646,7 +648,8 @@ nlm_gc_hosts(struct net *net)
struct hlist_node *next;
struct nlm_host *host;
dprintk("lockd: host garbage collection for net %p\n", net);
dprintk("lockd: host garbage collection for net %x\n",
net ? net->ns.inum : 0);
for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
......@@ -662,9 +665,10 @@ nlm_gc_hosts(struct net *net)
if (atomic_read(&host->h_count) || host->h_inuse
|| time_before(jiffies, host->h_expires)) {
dprintk("nlm_gc_hosts skipping %s "
"(cnt %d use %d exp %ld net %p)\n",
"(cnt %d use %d exp %ld net %x)\n",
host->h_name, atomic_read(&host->h_count),
host->h_inuse, host->h_expires, host->net);
host->h_inuse, host->h_expires,
host->net->ns.inum);
continue;
}
nlm_destroy_host_locked(host);
......
......@@ -110,7 +110,8 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
clnt = nsm_create(host->net, host->nodename);
if (IS_ERR(clnt)) {
dprintk("lockd: failed to create NSM upcall transport, "
"status=%ld, net=%p\n", PTR_ERR(clnt), host->net);
"status=%ld, net=%x\n", PTR_ERR(clnt),
host->net->ns.inum);
return PTR_ERR(clnt);
}
......
......@@ -57,6 +57,9 @@ static struct task_struct *nlmsvc_task;
static struct svc_rqst *nlmsvc_rqst;
unsigned long nlmsvc_timeout;
atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0);
DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
unsigned int lockd_net_id;
/*
......@@ -259,7 +262,7 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net)
if (error < 0)
goto err_bind;
set_grace_period(net);
dprintk("lockd_up_net: per-net data created; net=%p\n", net);
dprintk("%s: per-net data created; net=%x\n", __func__, net->ns.inum);
return 0;
err_bind:
......@@ -274,12 +277,15 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
svc_shutdown_net(serv, net);
dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
dprintk("%s: per-net data destroyed; net=%x\n",
__func__, net->ns.inum);
}
} else {
printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
nlmsvc_task, net);
pr_err("%s: no users! task=%p, net=%x\n",
__func__, nlmsvc_task, net->ns.inum);
BUG();
}
}
......@@ -290,7 +296,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct sockaddr_in sin;
if (event != NETDEV_DOWN)
if ((event != NETDEV_DOWN) ||
!atomic_inc_not_zero(&nlm_ntf_refcnt))
goto out;
if (nlmsvc_rqst) {
......@@ -301,6 +308,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
(struct sockaddr *)&sin);
}
atomic_dec(&nlm_ntf_refcnt);
wake_up(&nlm_ntf_wq);
out:
return NOTIFY_DONE;
......@@ -317,7 +326,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct sockaddr_in6 sin6;
if (event != NETDEV_DOWN)
if ((event != NETDEV_DOWN) ||
!atomic_inc_not_zero(&nlm_ntf_refcnt))
goto out;
if (nlmsvc_rqst) {
......@@ -329,6 +339,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
(struct sockaddr *)&sin6);
}
atomic_dec(&nlm_ntf_refcnt);
wake_up(&nlm_ntf_wq);
out:
return NOTIFY_DONE;
......@@ -345,10 +357,12 @@ static void lockd_unregister_notifiers(void)
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0);
}
static void lockd_svc_exit_thread(void)
{
atomic_dec(&nlm_ntf_refcnt);
lockd_unregister_notifiers();
svc_exit_thread(nlmsvc_rqst);
}
......@@ -373,6 +387,7 @@ static int lockd_start_svc(struct svc_serv *serv)
goto out_rqst;
}
atomic_inc(&nlm_ntf_refcnt);
svc_sock_update_bufs(serv);
serv->sv_maxconn = nlm_max_connections;
......@@ -676,6 +691,17 @@ static int lockd_init_net(struct net *net)
static void lockd_exit_net(struct net *net)
{
struct lockd_net *ln = net_generic(net, lockd_net_id);
WARN_ONCE(!list_empty(&ln->lockd_manager.list),
"net %x %s: lockd_manager.list is not empty\n",
net->ns.inum, __func__);
WARN_ONCE(!list_empty(&ln->nsm_handles),
"net %x %s: nsm_handles list is not empty\n",
net->ns.inum, __func__);
WARN_ONCE(delayed_work_pending(&ln->grace_period_end),
"net %x %s: grace_period_end was not cancelled\n",
net->ns.inum, __func__);
}
static struct pernet_operations lockd_net_ops = {
......
......@@ -370,7 +370,7 @@ nlmsvc_mark_resources(struct net *net)
{
struct nlm_host hint;
dprintk("lockd: nlmsvc_mark_resources for net %p\n", net);
dprintk("lockd: %s for net %x\n", __func__, net ? net->ns.inum : 0);
hint.net = net;
nlm_traverse_files(&hint, nlmsvc_mark_host, NULL);
}
......
......@@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
struct list_head *grace_list = net_generic(net, grace_net_id);
spin_lock(&grace_lock);
list_add(&lm->list, grace_list);
if (list_empty(&lm->list))
list_add(&lm->list, grace_list);
else
WARN(1, "double list_add attempt detected in net %x %s\n",
net->ns.inum, (net == &init_net) ? "(init_net)" : "");
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_start_grace);
......@@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
{
struct list_head *grace_list = net_generic(net, grace_net_id);
BUG_ON(!list_empty(grace_list));
WARN_ONCE(!list_empty(grace_list),
"net %x %s: grace_list is not empty\n",
net->ns.inum, __func__);
}
static struct pernet_operations grace_net_ops = {
......
......@@ -232,7 +232,7 @@ static struct cache_head *expkey_alloc(void)
return NULL;
}
static struct cache_detail svc_expkey_cache_template = {
static const struct cache_detail svc_expkey_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPKEY_HASHMAX,
.name = "nfsd.fh",
......@@ -748,7 +748,7 @@ static struct cache_head *svc_export_alloc(void)
return NULL;
}
static struct cache_detail svc_export_cache_template = {
static const struct cache_detail svc_export_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPORT_HASHMAX,
.name = "nfsd.export",
......@@ -1230,7 +1230,7 @@ nfsd_export_init(struct net *net)
int rv;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("nfsd: initializing export module (net: %p).\n", net);
dprintk("nfsd: initializing export module (net: %x).\n", net->ns.inum);
nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
if (IS_ERR(nn->svc_export_cache))
......@@ -1278,7 +1278,7 @@ nfsd_export_shutdown(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
dprintk("nfsd: shutting down export module (net: %p).\n", net);
dprintk("nfsd: shutting down export module (net: %x).\n", net->ns.inum);
cache_unregister_net(nn->svc_expkey_cache, net);
cache_unregister_net(nn->svc_export_cache, net);
......@@ -1286,5 +1286,5 @@ nfsd_export_shutdown(struct net *net)
cache_destroy_net(nn->svc_export_cache, net);
svcauth_unix_purge(net);
dprintk("nfsd: export shutdown complete (net: %p).\n", net);
dprintk("nfsd: export shutdown complete (net: %x).\n", net->ns.inum);
}
......@@ -119,6 +119,9 @@ struct nfsd_net {
u32 clverifier_counter;
struct svc_serv *nfsd_serv;
wait_queue_head_t ntf_wq;
atomic_t ntf_refcnt;
};
/* Simple check to find out if a given net was properly initialized */
......
......@@ -178,7 +178,7 @@ static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
static struct ent *idtoname_update(struct cache_detail *, struct ent *,
struct ent *);
static struct cache_detail idtoname_cache_template = {
static const struct cache_detail idtoname_cache_template = {
.owner = THIS_MODULE,
.hash_size = ENT_HASHMAX,
.name = "nfs4.idtoname",
......@@ -341,7 +341,7 @@ static struct ent *nametoid_update(struct cache_detail *, struct ent *,
struct ent *);
static int nametoid_parse(struct cache_detail *, char *, int);
static struct cache_detail nametoid_cache_template = {
static const struct cache_detail nametoid_cache_template = {
.owner = THIS_MODULE,
.hash_size = ENT_HASHMAX,
.name = "nfs4.nametoid",
......
......@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
static const stateid_t currentstateid = {
.si_generation = 1,
};
static const stateid_t close_stateid = {
.si_generation = 0xffffffffU,
};
static u64 current_sessionid = 1;
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
/* forward declarations */
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
......@@ -83,6 +87,11 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
*/
static DEFINE_SPINLOCK(state_lock);
enum nfsd4_st_mutex_lock_subclass {
OPEN_STATEID_MUTEX = 0,
LOCK_STATEID_MUTEX = 1,
};
/*
* A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
* the refcount on the open stateid to drop.
......@@ -3562,7 +3571,9 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
/* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0)
continue;
if (local->st_stateowner == &oo->oo_owner) {
if (local->st_stateowner != &oo->oo_owner)
continue;
if (local->st_stid.sc_type == NFS4_OPEN_STID) {
ret = local;
refcount_inc(&ret->st_stid.sc_count);
break;
......@@ -3571,6 +3582,52 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
return ret;
}
static __be32
nfsd4_verify_open_stid(struct nfs4_stid *s)
{
__be32 ret = nfs_ok;
switch (s->sc_type) {
default:
break;
case NFS4_CLOSED_STID:
case NFS4_CLOSED_DELEG_STID:
ret = nfserr_bad_stateid;
break;
case NFS4_REVOKED_DELEG_STID:
ret = nfserr_deleg_revoked;
}
return ret;
}
/* Lock the stateid st_mutex, and deal with races with CLOSE */
static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
{
__be32 ret;
mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
ret = nfsd4_verify_open_stid(&stp->st_stid);
if (ret != nfs_ok)
mutex_unlock(&stp->st_mutex);
return ret;
}
static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
{
struct nfs4_ol_stateid *stp;
for (;;) {
spin_lock(&fp->fi_lock);
stp = nfsd4_find_existing_open(fp, open);
spin_unlock(&fp->fi_lock);
if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
break;
nfs4_put_stid(&stp->st_stid);
}
return stp;
}
static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
struct nfsd4_compound_state *cstate)
......@@ -3613,8 +3670,9 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
stp = open->op_stp;
/* We are moving these outside of the spinlocks to avoid the warnings */
mutex_init(&stp->st_mutex);
mutex_lock(&stp->st_mutex);
mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
retry:
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock);
......@@ -3639,7 +3697,11 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
spin_unlock(&fp->fi_lock);
spin_unlock(&oo->oo_owner.so_client->cl_lock);
if (retstp) {
mutex_lock(&retstp->st_mutex);
/* Handle races with CLOSE */
if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
nfs4_put_stid(&retstp->st_stid);
goto retry;
}
/* To keep mutex tracking happy */
mutex_unlock(&stp->st_mutex);
stp = retstp;
......@@ -4449,6 +4511,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL;
__be32 status;
bool new_stp = false;
/*
* Lookup file; if found, lookup stateid and check open request,
......@@ -4460,9 +4523,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
status = nfs4_check_deleg(cl, open, &dp);
if (status)
goto out;
spin_lock(&fp->fi_lock);
stp = nfsd4_find_existing_open(fp, open);
spin_unlock(&fp->fi_lock);
stp = nfsd4_find_and_lock_existing_open(fp, open);
} else {
open->op_file = NULL;
status = nfserr_bad_stateid;
......@@ -4470,35 +4531,31 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
goto out;
}
if (!stp) {
stp = init_open_stateid(fp, open);
if (!open->op_stp)
new_stp = true;
}
/*
* OPEN the file, or upgrade an existing OPEN.
* If truncate fails, the OPEN fails.
*
* stp is already locked.
*/
if (stp) {
if (!new_stp) {
/* Stateid was found, this is an OPEN upgrade */
mutex_lock(&stp->st_mutex);
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
goto out;
}
} else {
/* stp is returned locked. */
stp = init_open_stateid(fp, open);
/* See if we lost the race to some other thread */
if (stp->st_access_bmap != 0) {
status = nfs4_upgrade_open(rqstp, fp, current_fh,
stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
goto out;
}
goto upgrade_out;
}
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
goto out;
}
......@@ -4507,7 +4564,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
if (stp->st_clnt_odstate == open->op_odstate)
open->op_odstate = NULL;
}
upgrade_out:
nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
......@@ -4734,7 +4791,7 @@ nfs4_laundromat(struct nfsd_net *nn)
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
posix_unblock_lock(&nbl->nbl_lock);
......@@ -4855,6 +4912,18 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
return nfserr_old_stateid;
}
static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
{
__be32 ret;
spin_lock(&s->sc_lock);
ret = nfsd4_verify_open_stid(s);
if (ret == nfs_ok)
ret = check_stateid_generation(in, &s->sc_stateid, has_session);
spin_unlock(&s->sc_lock);
return ret;
}
static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
{
if (ols->st_stateowner->so_is_open_owner &&
......@@ -4868,7 +4937,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
struct nfs4_stid *s;
__be32 status = nfserr_bad_stateid;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return status;
/* Client debugging aid. */
if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
......@@ -4883,7 +4953,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
s = find_stateid_locked(cl, stateid);
if (!s)
goto out_unlock;
status = check_stateid_generation(stateid, &s->sc_stateid, 1);
status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
if (status)
goto out_unlock;
switch (s->sc_type) {
......@@ -4926,7 +4996,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
else if (typemask & NFS4_DELEG_STID)
typemask |= NFS4_REVOKED_DELEG_STID;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return nfserr_bad_stateid;
status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
if (status == nfserr_stale_clientid) {
......@@ -5044,7 +5115,7 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
&s, nn);
if (status)
return status;
status = check_stateid_generation(stateid, &s->sc_stateid,
status = nfsd4_stid_check_stateid_generation(stateid, s,
nfsd4_has_session(cstate));
if (status)
goto out;
......@@ -5098,7 +5169,9 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
struct nfs4_ol_stateid *stp = openlockstateid(s);
__be32 ret;
mutex_lock(&stp->st_mutex);
ret = nfsd4_lock_ol_stateid(stp);
if (ret)
goto out_put_stid;
ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (ret)
......@@ -5109,11 +5182,13 @@ nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
lockowner(stp->st_stateowner)))
goto out;
stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_lock_stateid(stp);
ret = nfs_ok;
out:
mutex_unlock(&stp->st_mutex);
out_put_stid:
nfs4_put_stid(s);
return ret;
}
......@@ -5133,6 +5208,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
s = find_stateid_locked(cl, stateid);
if (!s)
goto out_unlock;
spin_lock(&s->sc_lock);
switch (s->sc_type) {
case NFS4_DELEG_STID:
ret = nfserr_locks_held;
......@@ -5144,11 +5220,13 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
ret = nfserr_locks_held;
break;
case NFS4_LOCK_STID:
spin_unlock(&s->sc_lock);
refcount_inc(&s->sc_count);
spin_unlock(&cl->cl_lock);
ret = nfsd4_free_lock_stateid(stateid, s);
goto out;
case NFS4_REVOKED_DELEG_STID:
spin_unlock(&s->sc_lock);
dp = delegstateid(s);
list_del_init(&dp->dl_recall_lru);
spin_unlock(&cl->cl_lock);
......@@ -5157,6 +5235,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
/* Default falls through and returns nfserr_bad_stateid */
}
spin_unlock(&s->sc_lock);
out_unlock:
spin_unlock(&cl->cl_lock);
out:
......@@ -5179,15 +5258,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
status = nfsd4_check_seqid(cstate, sop, seqid);
if (status)
return status;
if (stp->st_stid.sc_type == NFS4_CLOSED_STID
|| stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
/*
* "Closed" stateid's exist *only* to return
* nfserr_replay_me from the previous step, and
* revoked delegations are kept only for free_stateid.
*/
return nfserr_bad_stateid;
mutex_lock(&stp->st_mutex);
status = nfsd4_lock_ol_stateid(stp);
if (status != nfs_ok)
return status;
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status == nfs_ok)
status = nfs4_check_fh(current_fh, &stp->st_stid);
......@@ -5367,7 +5440,6 @@ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
bool unhashed;
LIST_HEAD(reaplist);
s->st_stid.sc_type = NFS4_CLOSED_STID;
spin_lock(&clp->cl_lock);
unhashed = unhash_open_stateid(s, &reaplist);
......@@ -5407,10 +5479,17 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfsd4_bump_seqid(cstate, status);
if (status)
goto out;
stp->st_stid.sc_type = NFS4_CLOSED_STID;
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
nfsd4_close_open_stateid(stp);
mutex_unlock(&stp->st_mutex);
/* See RFC5661 sectionm 18.2.4 */
if (stp->st_stid.sc_client->cl_minorversion)
memcpy(&close->cl_stateid, &close_stateid,
sizeof(close->cl_stateid));
/* put reference from nfs4_preprocess_seqid_op */
nfs4_put_stid(&stp->st_stid);
......@@ -5436,7 +5515,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
dp = delegstateid(s);
status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
if (status)
goto put_stateid;
......@@ -5642,14 +5721,41 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
return ret;
}
static void
static struct nfs4_ol_stateid *
find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
{
struct nfs4_ol_stateid *lst;
struct nfs4_client *clp = lo->lo_owner.so_client;
lockdep_assert_held(&clp->cl_lock);
list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
if (lst->st_stid.sc_type != NFS4_LOCK_STID)
continue;
if (lst->st_stid.sc_file == fp) {
refcount_inc(&lst->st_stid.sc_count);
return lst;
}
}
return NULL;
}
static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
struct nfs4_file *fp, struct inode *inode,
struct nfs4_ol_stateid *open_stp)
{
struct nfs4_client *clp = lo->lo_owner.so_client;
struct nfs4_ol_stateid *retstp;
lockdep_assert_held(&clp->cl_lock);
mutex_init(&stp->st_mutex);
mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
retry:
spin_lock(&clp->cl_lock);
spin_lock(&fp->fi_lock);
retstp = find_lock_stateid(lo, fp);
if (retstp)
goto out_unlock;
refcount_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_LOCK_STID;
......@@ -5659,29 +5765,22 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
mutex_init(&stp->st_mutex);
list_add(&stp->st_locks, &open_stp->st_locks);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
spin_lock(&fp->fi_lock);
list_add(&stp->st_perfile, &fp->fi_stateids);
out_unlock:
spin_unlock(&fp->fi_lock);
}
static struct nfs4_ol_stateid *
find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
{
struct nfs4_ol_stateid *lst;
struct nfs4_client *clp = lo->lo_owner.so_client;
lockdep_assert_held(&clp->cl_lock);
list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
if (lst->st_stid.sc_file == fp) {
refcount_inc(&lst->st_stid.sc_count);
return lst;
spin_unlock(&clp->cl_lock);
if (retstp) {
if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
nfs4_put_stid(&retstp->st_stid);
goto retry;
}
/* To keep mutex tracking happy */
mutex_unlock(&stp->st_mutex);
stp = retstp;
}
return NULL;
return stp;
}
static struct nfs4_ol_stateid *
......@@ -5694,26 +5793,25 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
struct nfs4_openowner *oo = openowner(ost->st_stateowner);
struct nfs4_client *clp = oo->oo_owner.so_client;
*new = false;
spin_lock(&clp->cl_lock);
lst = find_lock_stateid(lo, fi);
if (lst == NULL) {
spin_unlock(&clp->cl_lock);
ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
if (ns == NULL)
return NULL;
spin_lock(&clp->cl_lock);
lst = find_lock_stateid(lo, fi);
if (likely(!lst)) {
lst = openlockstateid(ns);
init_lock_stateid(lst, lo, fi, inode, ost);
ns = NULL;
*new = true;
}
}
spin_unlock(&clp->cl_lock);
if (ns)
if (lst != NULL) {
if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
goto out;
nfs4_put_stid(&lst->st_stid);
}
ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
if (ns == NULL)
return NULL;
lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
if (lst == openlockstateid(ns))
*new = true;
else
nfs4_put_stid(ns);
out:
return lst;
}
......@@ -5750,7 +5848,6 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
struct nfs4_lockowner *lo;
struct nfs4_ol_stateid *lst;
unsigned int strhashval;
bool hashed;
lo = find_lockowner_str(cl, &lock->lk_new_owner);
if (!lo) {
......@@ -5766,25 +5863,12 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
goto out;
}
retry:
lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
if (lst == NULL) {
status = nfserr_jukebox;
goto out;
}
mutex_lock(&lst->st_mutex);
/* See if it's still hashed to avoid race with FREE_STATEID */
spin_lock(&cl->cl_lock);
hashed = !list_empty(&lst->st_perfile);
spin_unlock(&cl->cl_lock);
if (!hashed) {
mutex_unlock(&lst->st_mutex);
nfs4_put_stid(&lst->st_stid);
goto retry;
}
status = nfs_ok;
*plst = lst;
out:
......@@ -5990,14 +6074,16 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
seqid_mutating_err(ntohl(status)))
lock_sop->lo_owner.so_seqid++;
mutex_unlock(&lock_stp->st_mutex);
/*
* If this is a new, never-before-used stateid, and we are
* returning an error, then just go ahead and release it.
*/
if (status && new)
if (status && new) {
lock_stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_lock_stateid(lock_stp);
}
mutex_unlock(&lock_stp->st_mutex);
nfs4_put_stid(&lock_stp->st_stid);
}
......@@ -7017,6 +7103,10 @@ static int nfs4_state_create_net(struct net *net)
INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
nn->conf_name_tree = RB_ROOT;
nn->unconf_name_tree = RB_ROOT;
nn->boot_time = get_seconds();
nn->grace_ended = false;
nn->nfsd4_manager.block_opens = true;
INIT_LIST_HEAD(&nn->nfsd4_manager.list);
INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
......@@ -7074,9 +7164,6 @@ nfs4_state_start_net(struct net *net)
ret = nfs4_state_create_net(net);
if (ret)
return ret;
nn->boot_time = get_seconds();
nn->grace_ended = false;
nn->nfsd4_manager.block_opens = true;
locks_start_grace(net, &nn->nfsd4_manager);
nfsd4_client_tracking_init(net);
printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
......@@ -7153,7 +7240,7 @@ nfs4_state_shutdown_net(struct net *net)
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
nbl = list_first_entry(&nn->blocked_locks_lru,
nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
posix_unblock_lock(&nbl->nbl_lock);
......
......@@ -1241,6 +1241,9 @@ static __net_init int nfsd_init_net(struct net *net)
nn->nfsd4_grace = 90;
nn->clverifier_counter = prandom_u32();
nn->clientid_counter = prandom_u32();
atomic_set(&nn->ntf_refcnt, 0);
init_waitqueue_head(&nn->ntf_wq);
return 0;
out_idmap_error:
......
......@@ -335,7 +335,8 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in sin;
if (event != NETDEV_DOWN)
if ((event != NETDEV_DOWN) ||
!atomic_inc_not_zero(&nn->ntf_refcnt))
goto out;
if (nn->nfsd_serv) {
......@@ -344,6 +345,8 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
sin.sin_addr.s_addr = ifa->ifa_local;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
}
atomic_dec(&nn->ntf_refcnt);
wake_up(&nn->ntf_wq);
out:
return NOTIFY_DONE;
......@@ -363,7 +366,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in6 sin6;
if (event != NETDEV_DOWN)
if ((event != NETDEV_DOWN) ||
!atomic_inc_not_zero(&nn->ntf_refcnt))
goto out;
if (nn->nfsd_serv) {
......@@ -374,7 +378,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
sin6.sin6_scope_id = ifa->idev->dev->ifindex;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
}
atomic_dec(&nn->ntf_refcnt);
wake_up(&nn->ntf_wq);
out:
return NOTIFY_DONE;
}
......@@ -391,6 +396,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
atomic_dec(&nn->ntf_refcnt);
/* check if the notifier still has clients */
if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
......@@ -398,6 +404,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
wait_event(nn->ntf_wq, atomic_read(&nn->ntf_refcnt) == 0);
/*
* write_ports can create the server without actually starting
......@@ -517,6 +524,7 @@ int nfsd_create_serv(struct net *net)
register_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
atomic_inc(&nn->ntf_refcnt);
ktime_get_real_ts64(&nn->nfssvc_boot); /* record boot time */
return 0;
}
......
......@@ -213,7 +213,7 @@ extern void __init cache_initialize(void);
extern int cache_register_net(struct cache_detail *cd, struct net *net);
extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
extern struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net);
extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net);
extern void cache_destroy_net(struct cache_detail *cd, struct net *net);
extern void sunrpc_init_cache_detail(struct cache_detail *cd);
......
......@@ -264,7 +264,7 @@ static int rsi_parse(struct cache_detail *cd,
return status;
}
static struct cache_detail rsi_cache_template = {
static const struct cache_detail rsi_cache_template = {
.owner = THIS_MODULE,
.hash_size = RSI_HASHMAX,
.name = "auth.rpcsec.init",
......@@ -524,7 +524,7 @@ static int rsc_parse(struct cache_detail *cd,
return status;
}
static struct cache_detail rsc_cache_template = {
static const struct cache_detail rsc_cache_template = {
.owner = THIS_MODULE,
.hash_size = RSC_HASHMAX,
.name = "auth.rpcsec.context",
......
......@@ -1674,7 +1674,7 @@ void cache_unregister_net(struct cache_detail *cd, struct net *net)
}
EXPORT_SYMBOL_GPL(cache_unregister_net);
struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
{
struct cache_detail *cd;
int i;
......
......@@ -569,7 +569,7 @@ static int unix_gid_show(struct seq_file *m,
return 0;
}
static struct cache_detail unix_gid_cache_template = {
static const struct cache_detail unix_gid_cache_template = {
.owner = THIS_MODULE,
.hash_size = GID_HASHMAX,
.name = "auth.unix.gid",
......@@ -862,7 +862,7 @@ struct auth_ops svcauth_unix = {
.set_client = svcauth_unix_set_client,
};
static struct cache_detail ip_map_cache_template = {
static const struct cache_detail ip_map_cache_template = {
.owner = THIS_MODULE,
.hash_size = IP_HASHMAX,
.name = "auth.unix.ip",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册