提交 589109df 编写于 作者: L Linus Torvalds

Merge tag 'nfs-for-4.19-2' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client bugfixes from Anna Schumaker:
 "These are a handful of fixes for problems that Trond found. Patch #1
  and #3 have the same name, a second issue was found after applying the
  first patch.

  Stable bugfixes:
   - v4.17+: Fix tracepoint Oops in initiate_file_draining()
   - v4.11+: Fix an infinite loop on I/O

  Other fixes:
   - Return errors if a waiting layoutget is killed
   - Don't open code clearing of delegation state"

* tag 'nfs-for-4.19-2' of git://git.linux-nfs.org/projects/anna/linux-nfs:
  NFS: Don't open code clearing of delegation state
  NFSv4.1 fix infinite loop on I/O.
  NFSv4: Fix a tracepoint Oops in initiate_file_draining()
  pNFS: Ensure we return the error if someone kills a waiting layoutget
  NFSv4: Fix a tracepoint Oops in initiate_file_draining()
......@@ -1637,6 +1637,14 @@ static void nfs_state_set_delegation(struct nfs4_state *state,
write_sequnlock(&state->seqlock);
}
static void nfs_state_clear_delegation(struct nfs4_state *state)
{
write_seqlock(&state->seqlock);
nfs4_stateid_copy(&state->stateid, &state->open_stateid);
clear_bit(NFS_DELEGATED_STATE, &state->flags);
write_sequnlock(&state->seqlock);
}
static int update_open_stateid(struct nfs4_state *state,
const nfs4_stateid *open_stateid,
const nfs4_stateid *delegation,
......@@ -2145,10 +2153,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
if (IS_ERR(opendata))
return PTR_ERR(opendata);
nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
write_seqlock(&state->seqlock);
nfs4_stateid_copy(&state->stateid, &state->open_stateid);
write_sequnlock(&state->seqlock);
clear_bit(NFS_DELEGATED_STATE, &state->flags);
nfs_state_clear_delegation(state);
switch (type & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ|FMODE_WRITE:
case FMODE_WRITE:
......@@ -2601,10 +2606,7 @@ static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
const nfs4_stateid *stateid)
{
nfs_remove_bad_delegation(state->inode, stateid);
write_seqlock(&state->seqlock);
nfs4_stateid_copy(&state->stateid, &state->open_stateid);
write_sequnlock(&state->seqlock);
clear_bit(NFS_DELEGATED_STATE, &state->flags);
nfs_state_clear_delegation(state);
}
static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
......@@ -2672,15 +2674,20 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
if (delegation == NULL) {
rcu_read_unlock();
nfs_state_clear_delegation(state);
return;
}
nfs4_stateid_copy(&stateid, &delegation->stateid);
if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
&delegation->flags)) {
if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
rcu_read_unlock();
nfs_state_clear_delegation(state);
return;
}
if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
&delegation->flags)) {
rcu_read_unlock();
nfs_finish_clear_delegation_stateid(state, &stateid);
return;
}
......
......@@ -1390,6 +1390,8 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
if (!nfs4_state_mark_reclaim_nograce(clp, state))
return -EBADF;
nfs_inode_find_delegation_state_and_recover(state->inode,
&state->stateid);
dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
clp->cl_hostname);
nfs4_schedule_state_manager(clp);
......
......@@ -1137,7 +1137,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
TP_fast_assign(
__entry->error = error;
__entry->fhandle = nfs_fhandle_hash(fhandle);
if (inode != NULL) {
if (!IS_ERR_OR_NULL(inode)) {
__entry->fileid = NFS_FILEID(inode);
__entry->dev = inode->i_sb->s_dev;
} else {
......@@ -1194,7 +1194,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
TP_fast_assign(
__entry->error = error;
__entry->fhandle = nfs_fhandle_hash(fhandle);
if (inode != NULL) {
if (!IS_ERR_OR_NULL(inode)) {
__entry->fileid = NFS_FILEID(inode);
__entry->dev = inode->i_sb->s_dev;
} else {
......
......@@ -1740,16 +1740,16 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
return ret;
}
static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
{
/*
* send layoutcommit as it can hold up layoutreturn due to lseg
* reference
*/
pnfs_layoutcommit_inode(lo->plh_inode, false);
return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
nfs_wait_bit_killable,
TASK_UNINTERRUPTIBLE);
TASK_KILLABLE);
}
static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
......@@ -1830,7 +1830,9 @@ pnfs_update_layout(struct inode *ino,
}
lookup_again:
nfs4_client_recover_expired_lease(clp);
lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
if (IS_ERR(lseg))
goto out;
first = false;
spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
......@@ -1863,9 +1865,9 @@ pnfs_update_layout(struct inode *ino,
if (list_empty(&lo->plh_segs) &&
atomic_read(&lo->plh_outstanding) != 0) {
spin_unlock(&ino->i_lock);
if (wait_var_event_killable(&lo->plh_outstanding,
atomic_read(&lo->plh_outstanding) == 0
|| !list_empty(&lo->plh_segs)))
lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
atomic_read(&lo->plh_outstanding)));
if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
goto out_put_layout_hdr;
pnfs_put_layout_hdr(lo);
goto lookup_again;
......@@ -1898,8 +1900,11 @@ pnfs_update_layout(struct inode *ino,
if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
&lo->plh_flags)) {
spin_unlock(&ino->i_lock);
wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
TASK_UNINTERRUPTIBLE);
lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
NFS_LAYOUT_FIRST_LAYOUTGET,
TASK_KILLABLE));
if (IS_ERR(lseg))
goto out_put_layout_hdr;
pnfs_put_layout_hdr(lo);
dprintk("%s retrying\n", __func__);
goto lookup_again;
......@@ -1925,7 +1930,8 @@ pnfs_update_layout(struct inode *ino,
if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
spin_unlock(&ino->i_lock);
dprintk("%s wait for layoutreturn\n", __func__);
if (pnfs_prepare_to_retry_layoutget(lo)) {
lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
if (!IS_ERR(lseg)) {
if (first)
pnfs_clear_first_layoutget(lo);
pnfs_put_layout_hdr(lo);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册