提交 822a5d31 编写于 作者: L Linus Torvalds

Merge branch 'bugfixes' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

* 'bugfixes' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
  NFS: Fix a regression in nfs_file_llseek()
  NFSv4: Do not accept delegated opens when a delegation recall is in effect
  NFSv4: Ensure correct locking when accessing the 'lock_states' list
  NFSv4.1: Ensure that we handle _all_ SEQUENCE status bits.
  NFSv4: Don't error if we handled it in nfs4_recovery_handle_error
  SUNRPC: Ensure we always bump the backlog queue in xprt_free_slot
  SUNRPC: Fix the execution time statistics in the face of RPC restarts
......@@ -147,7 +147,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
* origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
* the cached file length
*/
if (origin != SEEK_SET || origin != SEEK_CUR) {
if (origin != SEEK_SET && origin != SEEK_CUR) {
struct inode *inode = filp->f_mapping->host;
int retval = nfs_revalidate_file_size(inode, filp);
......
......@@ -39,6 +39,8 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ratelimit.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/gss_api.h>
......@@ -894,6 +896,8 @@ static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode
static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
{
if (delegation == NULL)
return 0;
if ((delegation->type & fmode) != fmode)
return 0;
if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
......@@ -1036,8 +1040,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
}
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
if (delegation == NULL ||
!can_open_delegated(delegation, fmode)) {
if (!can_open_delegated(delegation, fmode)) {
rcu_read_unlock();
break;
}
......@@ -1091,7 +1094,12 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
if (delegation)
delegation_flags = delegation->flags;
rcu_read_unlock();
if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
"returning a delegation for "
"OPEN(CLAIM_DELEGATE_CUR)\n",
NFS_CLIENT(inode)->cl_server);
} else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
nfs_inode_set_delegation(state->inode,
data->owner->so_cred,
&data->o_res);
......@@ -1423,11 +1431,9 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
goto out_no_action;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
if (delegation != NULL &&
test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
rcu_read_unlock();
goto out_no_action;
}
if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
can_open_delegated(delegation, data->o_arg.fmode))
goto unlock_no_action;
rcu_read_unlock();
}
/* Update sequence id. */
......@@ -1444,6 +1450,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
return;
rpc_call_start(task);
return;
unlock_no_action:
rcu_read_unlock();
out_no_action:
task->tk_action = NULL;
......
......@@ -1156,11 +1156,13 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs
if (status >= 0) {
status = nfs4_reclaim_locks(state, ops);
if (status >= 0) {
spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
printk("%s: Lock reclaim failed!\n",
__func__);
}
spin_unlock(&state->state_lock);
nfs4_put_open_state(state);
goto restart;
}
......@@ -1224,10 +1226,12 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
clear_bit(NFS_O_RDONLY_STATE, &state->flags);
clear_bit(NFS_O_WRONLY_STATE, &state->flags);
clear_bit(NFS_O_RDWR_STATE, &state->flags);
spin_lock(&state->state_lock);
list_for_each_entry(lock, &state->lock_states, ls_locks) {
lock->ls_seqid.flags = 0;
lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
}
spin_unlock(&state->state_lock);
}
static void nfs4_reset_seqids(struct nfs_server *server,
......@@ -1350,12 +1354,14 @@ static void nfs4_warn_keyexpired(const char *s)
static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
{
switch (error) {
case 0:
break;
case -NFS4ERR_CB_PATH_DOWN:
nfs_handle_cb_pathdown(clp);
return 0;
break;
case -NFS4ERR_NO_GRACE:
nfs4_state_end_reclaim_reboot(clp);
return 0;
break;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
......@@ -1375,13 +1381,15 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_SEQ_MISORDERED:
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
/* Zero session reset errors */
return 0;
break;
case -EKEYEXPIRED:
/* Nothing we can do */
nfs4_warn_keyexpired(clp->cl_hostname);
return 0;
break;
default:
return error;
}
return error;
return 0;
}
static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
......@@ -1428,7 +1436,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
struct rpc_cred *cred;
const struct nfs4_state_maintenance_ops *ops =
clp->cl_mvops->state_renewal_ops;
int status = -NFS4ERR_EXPIRED;
int status;
/* Is the client already known to have an expired lease? */
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
......@@ -1438,6 +1446,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
spin_unlock(&clp->cl_lock);
if (cred == NULL) {
cred = nfs4_get_setclientid_cred(clp);
status = -ENOKEY;
if (cred == NULL)
goto out;
}
......@@ -1525,16 +1534,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
{
if (!flags)
return;
else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
nfs41_handle_server_reboot(clp);
else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
SEQ4_STATUS_ADMIN_STATE_REVOKED |
SEQ4_STATUS_LEASE_MOVED))
nfs41_handle_state_revoked(clp);
else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
nfs41_handle_recallable_state_revoked(clp);
else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
SEQ4_STATUS_BACKCHANNEL_FAULT |
SEQ4_STATUS_CB_PATH_DOWN_SESSION))
nfs41_handle_cb_path_down(clp);
......@@ -1662,10 +1671,10 @@ static void nfs4_state_manager(struct nfs_client *clp)
if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
status = nfs4_check_lease(clp);
if (status < 0)
goto out_error;
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
continue;
if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
goto out_error;
}
/* Initialize or reset the session */
......
......@@ -590,6 +590,27 @@ void rpc_prepare_task(struct rpc_task *task)
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
static void
rpc_init_task_statistics(struct rpc_task *task)
{
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
task->tk_rebind_retry = 2;
/* starting timestamp */
task->tk_start = ktime_get();
}
static void
rpc_reset_task_statistics(struct rpc_task *task)
{
task->tk_timeouts = 0;
task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
rpc_init_task_statistics(task);
}
/*
* Helper that calls task->tk_ops->rpc_call_done if it exists
*/
......@@ -602,6 +623,7 @@ void rpc_exit_task(struct rpc_task *task)
WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */
xprt_release(task);
rpc_reset_task_statistics(task);
}
}
}
......@@ -804,11 +826,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
task->tk_calldata = task_setup_data->callback_data;
INIT_LIST_HEAD(&task->tk_task);
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
task->tk_rebind_retry = 2;
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
task->tk_owner = current->tgid;
......@@ -818,8 +835,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
/* starting timestamp */
task->tk_start = ktime_get();
rpc_init_task_statistics(task);
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
......
......@@ -995,13 +995,11 @@ static void xprt_alloc_slot(struct rpc_task *task)
static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
if (xprt_dynamic_free_slot(xprt, req))
return;
memset(req, 0, sizeof(*req)); /* mark unused */
spin_lock(&xprt->reserve_lock);
list_add(&req->rq_list, &xprt->free);
if (!xprt_dynamic_free_slot(xprt, req)) {
memset(req, 0, sizeof(*req)); /* mark unused */
list_add(&req->rq_list, &xprt->free);
}
rpc_wake_up_next(&xprt->backlog);
spin_unlock(&xprt->reserve_lock);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册