提交 bc0fb201 编写于 作者: C Chuck Lever 提交者: Trond Myklebust

NFS: create common routine for waiting for direct I/O to complete

We're about to add asynchrony to the NFS direct write path.  Begin by
abstracting out the common pieces in the read path.

The first piece is nfs_direct_read_wait, which works the same whether the
process is waiting for a read or a write.

Test plan:
Compile kernel with CONFIG_NFS and CONFIG_NFS_DIRECTIO enabled.
Signed-off-by: NChuck Lever <cel@netapp.com>
Signed-off-by: NTrond Myklebust <Trond.Myklebust@netapp.com>
上级 487b8372
......@@ -158,6 +158,30 @@ static void nfs_direct_req_release(struct kref *kref)
kmem_cache_free(nfs_direct_cachep, dreq);
}
/*
* Collects and returns the final error value/byte-count.
*/
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
{
int result = -EIOCBQUEUED;
/* Async requests don't wait here */
if (dreq->iocb)
goto out;
result = wait_event_interruptible(dreq->wait,
(atomic_read(&dreq->complete) == 0));
if (!result)
result = atomic_read(&dreq->error);
if (!result)
result = atomic_read(&dreq->count);
out:
kref_put(&dreq->kref, nfs_direct_req_release);
return (ssize_t) result;
}
/*
* Note we also set the number of requests we have in the dreq when we are
* done. This prevents races with I/O completion so we will always wait
......@@ -213,7 +237,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
/*
* We must hold a reference to all the pages in this direct read request
* until the RPCs complete. This could be long *after* we are woken up in
* nfs_direct_read_wait (for instance, if someone hits ^C on a slow server).
* nfs_direct_wait (for instance, if someone hits ^C on a slow server).
*
* In addition, synchronous I/O uses a stack-allocated iocb. Thus we
* can't trust the iocb is still valid here if this is a synchronous
......@@ -315,35 +339,6 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long
} while (count != 0);
}
/*
* Collects and returns the final error value/byte-count.
*/
static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr)
{
int result = -EIOCBQUEUED;
/* Async requests don't wait here */
if (dreq->iocb)
goto out;
result = 0;
if (intr) {
result = wait_event_interruptible(dreq->wait,
(atomic_read(&dreq->complete) == 0));
} else {
wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0));
}
if (!result)
result = atomic_read(&dreq->error);
if (!result)
result = atomic_read(&dreq->count);
out:
kref_put(&dreq->kref, nfs_direct_req_release);
return (ssize_t) result;
}
static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, unsigned int nr_pages)
{
ssize_t result;
......@@ -366,7 +361,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
rpc_clnt_sigmask(clnt, &oldset);
nfs_direct_read_schedule(dreq, user_addr, count, file_offset);
result = nfs_direct_read_wait(dreq, clnt->cl_intr);
result = nfs_direct_wait(dreq);
rpc_clnt_sigunmask(clnt, &oldset);
return result;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册