提交 ef2c488c 编写于 作者: A Anna Schumaker 提交者: Trond Myklebust

NFS: Create a generic_pgio function

These functions are almost identical on both the read and write side.
FLUSH_COND_STABLE will never be set for the read path, so leaving it in
the generic code won't hurt anything.
Signed-off-by: NAnna Schumaker <Anna.Schumaker@Netapp.com>
Signed-off-by: NTrond Myklebust <trond.myklebust@primarydata.com>
上级 844c9e69
......@@ -237,14 +237,10 @@ extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
int nfs_iocounter_wait(struct nfs_io_counter *c);
extern const struct rpc_call_ops nfs_pgio_common_ops;
struct nfs_rw_header *nfs_rw_header_alloc(const struct nfs_rw_ops *);
void nfs_rw_header_free(struct nfs_pgio_header *);
struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *, unsigned int);
void nfs_pgio_data_release(struct nfs_pgio_data *);
int nfs_pgio_error(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
void nfs_pgio_rpcsetup(struct nfs_pgio_data *, unsigned int, unsigned int, int,
struct nfs_commit_info *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
static inline void nfs_iocounter_init(struct nfs_io_counter *c)
{
......@@ -410,8 +406,6 @@ extern int nfs_initiate_read(struct rpc_clnt *clnt,
struct nfs_pgio_data *data,
const struct rpc_call_ops *call_ops, int flags);
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
/* super.c */
......@@ -429,8 +423,6 @@ int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags, bool force_mds,
const struct nfs_pgio_completion_ops *compl_ops);
extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr);
extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
extern void nfs_commit_free(struct nfs_commit_data *p);
extern int nfs_initiate_write(struct rpc_clnt *clnt,
......
......@@ -27,6 +27,7 @@
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
static struct kmem_cache *nfs_page_cachep;
static const struct rpc_call_ops nfs_pgio_common_ops;
static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
{
......@@ -338,8 +339,8 @@ EXPORT_SYMBOL_GPL(nfs_rw_header_free);
* @hdr: The header making a request
* @pagecount: Number of pages to create
*/
struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr,
unsigned int pagecount)
static struct nfs_pgio_data *nfs_pgio_data_alloc(struct nfs_pgio_header *hdr,
unsigned int pagecount)
{
struct nfs_pgio_data *data, *prealloc;
......@@ -396,7 +397,7 @@ EXPORT_SYMBOL_GPL(nfs_pgio_data_release);
* @how: How to commit data (writes only)
* @cinfo: Commit information for the call (writes only)
*/
void nfs_pgio_rpcsetup(struct nfs_pgio_data *data,
static void nfs_pgio_rpcsetup(struct nfs_pgio_data *data,
unsigned int count, unsigned int offset,
int how, struct nfs_commit_info *cinfo)
{
......@@ -451,7 +452,7 @@ static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
* @desc: IO descriptor
* @hdr: pageio header
*/
int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
static int nfs_pgio_error(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
struct nfs_pgio_data *data;
......@@ -534,6 +535,101 @@ static void nfs_pgio_result(struct rpc_task *task, void *calldata)
data->header->rw_ops->rw_result(task, data);
}
/*
* Generate multiple small requests to read or write a single
* contiguous dirty on one page.
*/
static int nfs_pgio_multi(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
struct nfs_page *req = hdr->req;
struct page *page = req->wb_page;
struct nfs_pgio_data *data;
size_t wsize = desc->pg_bsize, nbytes;
unsigned int offset;
int requests = 0;
struct nfs_commit_info cinfo;
nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
(desc->pg_moreio || nfs_reqs_to_commit(&cinfo) ||
desc->pg_count > wsize))
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
offset = 0;
nbytes = desc->pg_count;
do {
size_t len = min(nbytes, wsize);
data = nfs_pgio_data_alloc(hdr, 1);
if (!data)
return nfs_pgio_error(desc, hdr);
data->pages.pagevec[0] = page;
nfs_pgio_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo);
list_add(&data->list, &hdr->rpc_list);
requests++;
nbytes -= len;
offset += len;
} while (nbytes != 0);
nfs_list_remove_request(req);
nfs_list_add_request(req, &hdr->pages);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
/*
* Create an RPC task for the given read or write request and kick it.
* The page must have been locked by the caller.
*
* It may happen that the page we're passed is not marked dirty.
* This is the case if nfs_updatepage detects a conflicting request
* that has been written but not committed.
*/
static int nfs_pgio_one(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
struct nfs_page *req;
struct page **pages;
struct nfs_pgio_data *data;
struct list_head *head = &desc->pg_list;
struct nfs_commit_info cinfo;
data = nfs_pgio_data_alloc(hdr, nfs_page_array_len(desc->pg_base,
desc->pg_count));
if (!data)
return nfs_pgio_error(desc, hdr);
nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
pages = data->pages.pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &hdr->pages);
*pages++ = req->wb_page;
}
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
(desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
/* Set up the argument struct */
nfs_pgio_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
list_add(&data->list, &hdr->rpc_list);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
if (desc->pg_bsize < PAGE_CACHE_SIZE)
return nfs_pgio_multi(desc, hdr);
return nfs_pgio_one(desc, hdr);
}
EXPORT_SYMBOL_GPL(nfs_generic_pgio);
static bool nfs_match_open_context(const struct nfs_open_context *ctx1,
const struct nfs_open_context *ctx2)
{
......@@ -741,7 +837,7 @@ void nfs_destroy_nfspagecache(void)
kmem_cache_destroy(nfs_page_cachep);
}
const struct rpc_call_ops nfs_pgio_common_ops = {
static const struct rpc_call_ops nfs_pgio_common_ops = {
.rpc_call_prepare = nfs_pgio_prepare,
.rpc_call_done = nfs_pgio_result,
.rpc_release = nfs_pgio_release,
......
......@@ -1607,7 +1607,7 @@ pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
atomic_inc(&hdr->refcnt);
ret = nfs_generic_flush(desc, hdr);
ret = nfs_generic_pgio(desc, hdr);
if (ret != 0) {
pnfs_put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
......@@ -1766,7 +1766,7 @@ pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
atomic_inc(&hdr->refcnt);
ret = nfs_generic_pagein(desc, hdr);
ret = nfs_generic_pgio(desc, hdr);
if (ret != 0) {
pnfs_put_lseg(desc->pg_lseg);
desc->pg_lseg = NULL;
......
......@@ -237,85 +237,6 @@ static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
.completion = nfs_read_completion,
};
/*
* Generate multiple requests to fill a single page.
*
* We optimize to reduce the number of read operations on the wire. If we
* detect that we're reading a page, or an area of a page, that is past the
* end of file, we do not generate NFS read operations but just clear the
* parts of the page that would have come back zero from the server anyway.
*
* We rely on the cached value of i_size to make this determination; another
* client can fill pages on the server past our cached end-of-file, but we
* won't see the new data until our attribute cache is updated. This is more
* or less conventional NFS client behavior.
*/
static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
struct nfs_page *req = hdr->req;
struct page *page = req->wb_page;
struct nfs_pgio_data *data;
size_t rsize = desc->pg_bsize, nbytes;
unsigned int offset;
offset = 0;
nbytes = desc->pg_count;
do {
size_t len = min(nbytes,rsize);
data = nfs_pgio_data_alloc(hdr, 1);
if (!data)
return nfs_pgio_error(desc, hdr);
data->pages.pagevec[0] = page;
nfs_pgio_rpcsetup(data, len, offset, 0, NULL);
list_add(&data->list, &hdr->rpc_list);
nbytes -= len;
offset += len;
} while (nbytes != 0);
nfs_list_remove_request(req);
nfs_list_add_request(req, &hdr->pages);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
static int nfs_pagein_one(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
struct nfs_page *req;
struct page **pages;
struct nfs_pgio_data *data;
struct list_head *head = &desc->pg_list;
data = nfs_pgio_data_alloc(hdr, nfs_page_array_len(desc->pg_base,
desc->pg_count));
if (!data)
return nfs_pgio_error(desc, hdr);
pages = data->pages.pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &hdr->pages);
*pages++ = req->wb_page;
}
nfs_pgio_rpcsetup(data, desc->pg_count, 0, 0, NULL);
list_add(&data->list, &hdr->rpc_list);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
if (desc->pg_bsize < PAGE_CACHE_SIZE)
return nfs_pagein_multi(desc, hdr);
return nfs_pagein_one(desc, hdr);
}
EXPORT_SYMBOL_GPL(nfs_generic_pagein);
static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
struct nfs_rw_header *rhdr;
......@@ -330,7 +251,7 @@ static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
hdr = &rhdr->header;
nfs_pgheader_init(desc, hdr, nfs_rw_header_free);
atomic_inc(&hdr->refcnt);
ret = nfs_generic_pagein(desc, hdr);
ret = nfs_generic_pgio(desc, hdr);
if (ret == 0)
ret = nfs_do_multiple_reads(&hdr->rpc_list,
desc->pg_rpc_callops);
......
......@@ -1044,101 +1044,6 @@ static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
.completion = nfs_write_completion,
};
/*
* Generate multiple small requests to write out a single
* contiguous dirty area on one page.
*/
static int nfs_flush_multi(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
struct nfs_page *req = hdr->req;
struct page *page = req->wb_page;
struct nfs_pgio_data *data;
size_t wsize = desc->pg_bsize, nbytes;
unsigned int offset;
int requests = 0;
struct nfs_commit_info cinfo;
nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
(desc->pg_moreio || nfs_reqs_to_commit(&cinfo) ||
desc->pg_count > wsize))
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
offset = 0;
nbytes = desc->pg_count;
do {
size_t len = min(nbytes, wsize);
data = nfs_pgio_data_alloc(hdr, 1);
if (!data)
return nfs_pgio_error(desc, hdr);
data->pages.pagevec[0] = page;
nfs_pgio_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo);
list_add(&data->list, &hdr->rpc_list);
requests++;
nbytes -= len;
offset += len;
} while (nbytes != 0);
nfs_list_remove_request(req);
nfs_list_add_request(req, &hdr->pages);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
/*
* Create an RPC task for the given write request and kick it.
* The page must have been locked by the caller.
*
* It may happen that the page we're passed is not marked dirty.
* This is the case if nfs_updatepage detects a conflicting request
* that has been written but not committed.
*/
static int nfs_flush_one(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
struct nfs_page *req;
struct page **pages;
struct nfs_pgio_data *data;
struct list_head *head = &desc->pg_list;
struct nfs_commit_info cinfo;
data = nfs_pgio_data_alloc(hdr, nfs_page_array_len(desc->pg_base,
desc->pg_count));
if (!data)
return nfs_pgio_error(desc, hdr);
nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
pages = data->pages.pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_list_add_request(req, &hdr->pages);
*pages++ = req->wb_page;
}
if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
(desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
desc->pg_ioflags &= ~FLUSH_COND_STABLE;
/* Set up the argument struct */
nfs_pgio_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
list_add(&data->list, &hdr->rpc_list);
desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0;
}
int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
if (desc->pg_bsize < PAGE_CACHE_SIZE)
return nfs_flush_multi(desc, hdr);
return nfs_flush_one(desc, hdr);
}
EXPORT_SYMBOL_GPL(nfs_generic_flush);
static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
struct nfs_rw_header *whdr;
......@@ -1153,7 +1058,7 @@ static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
hdr = &whdr->header;
nfs_pgheader_init(desc, hdr, nfs_rw_header_free);
atomic_inc(&hdr->refcnt);
ret = nfs_generic_flush(desc, hdr);
ret = nfs_generic_pgio(desc, hdr);
if (ret == 0)
ret = nfs_do_multiple_writes(&hdr->rpc_list,
desc->pg_rpc_callops,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册