提交 1751c363 编写于 作者: T Trond Myklebust

NFS: Cleanup of the nfs_pageio code in preparation for a pnfs bugfix

We need to ensure that the layouts are set up before we can decide to
coalesce requests. To do so, we want to further split up the struct
nfs_pageio_descriptor operations into an initialisation callback, a
coalescing test callback, and a 'do i/o' callback.

This patch cleans up the existing callback methods before adding the
'initialisation' callback.
Signed-off-by: NTrond Myklebust <Trond.Myklebust@netapp.com>
上级 f062eb6c
......@@ -658,7 +658,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
* return true : coalesce page
* return false : don't coalesce page
*/
bool
static bool
filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
struct nfs_page *req)
{
......@@ -681,6 +681,16 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
return (p_stripe == r_stripe);
}
static const struct nfs_pageio_ops filelayout_pg_read_ops = {
.pg_test = filelayout_pg_test,
.pg_doio = nfs_generic_pg_readpages,
};
static const struct nfs_pageio_ops filelayout_pg_write_ops = {
.pg_test = filelayout_pg_test,
.pg_doio = nfs_generic_pg_writepages,
};
static bool filelayout_mark_pnfs_commit(struct pnfs_layout_segment *lseg)
{
return !FILELAYOUT_LSEG(lseg)->commit_through_mds;
......@@ -878,7 +888,8 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.owner = THIS_MODULE,
.alloc_lseg = filelayout_alloc_lseg,
.free_lseg = filelayout_free_lseg,
.pg_test = filelayout_pg_test,
.pg_read_ops = &filelayout_pg_read_ops,
.pg_write_ops = &filelayout_pg_write_ops,
.mark_pnfs_commit = filelayout_mark_pnfs_commit,
.choose_commit_list = filelayout_choose_commit_list,
.commit_pagelist = filelayout_commit_pagelist,
......
......@@ -1007,6 +1007,16 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
OBJIO_LSEG(pgio->pg_lseg)->max_io_size;
}
static const struct nfs_pageio_ops objio_pg_read_ops = {
.pg_test = objio_pg_test,
.pg_doio = nfs_generic_pg_readpages,
};
static const struct nfs_pageio_ops objio_pg_write_ops = {
.pg_test = objio_pg_test,
.pg_doio = nfs_generic_pg_writepages,
};
static struct pnfs_layoutdriver_type objlayout_type = {
.id = LAYOUT_OSD2_OBJECTS,
.name = "LAYOUT_OSD2_OBJECTS",
......@@ -1020,7 +1030,8 @@ static struct pnfs_layoutdriver_type objlayout_type = {
.read_pagelist = objlayout_read_pagelist,
.write_pagelist = objlayout_write_pagelist,
.pg_test = objio_pg_test,
.pg_read_ops = &objio_pg_read_ops,
.pg_write_ops = &objio_pg_write_ops,
.free_deviceid_node = objio_free_deviceid_node,
......
......@@ -230,7 +230,7 @@ EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
*/
void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
int (*doio)(struct nfs_pageio_descriptor *),
const struct nfs_pageio_ops *pg_ops,
size_t bsize,
int io_flags)
{
......@@ -241,12 +241,10 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_base = 0;
desc->pg_moreio = 0;
desc->pg_inode = inode;
desc->pg_doio = doio;
desc->pg_ops = pg_ops;
desc->pg_ioflags = io_flags;
desc->pg_error = 0;
desc->pg_lseg = NULL;
desc->pg_test = nfs_generic_pg_test;
pnfs_pageio_init(desc, inode);
}
/**
......@@ -276,7 +274,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
return false;
if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
return false;
return pgio->pg_test(pgio, prev, req);
return pgio->pg_ops->pg_test(pgio, prev, req);
}
/**
......@@ -311,7 +309,7 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
{
if (!list_empty(&desc->pg_list)) {
int error = desc->pg_doio(desc);
int error = desc->pg_ops->pg_doio(desc);
if (error < 0)
desc->pg_error = error;
else
......
......@@ -1055,6 +1055,30 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
goto out;
}
bool
pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
{
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
return false;
nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
return true;
}
bool
pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
{
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
if (ld == NULL)
return false;
nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
return true;
}
bool
pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
struct nfs_page *req)
......
......@@ -87,7 +87,8 @@ struct pnfs_layoutdriver_type {
void (*free_lseg) (struct pnfs_layout_segment *lseg);
/* test for nfs page cache coalescing */
bool (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
const struct nfs_pageio_ops *pg_read_ops;
const struct nfs_pageio_ops *pg_write_ops;
/* Returns true if layoutdriver wants to divert this request to
* driver's commit routine.
......@@ -152,6 +153,10 @@ struct pnfs_layout_segment *
pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
loff_t pos, u64 count, enum pnfs_iomode access_type,
gfp_t gfp_flags);
bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *, int);
void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
void unset_pnfs_layoutdriver(struct nfs_server *);
enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *,
......@@ -293,15 +298,6 @@ static inline int pnfs_return_layout(struct inode *ino)
return 0;
}
static inline void pnfs_pageio_init(struct nfs_pageio_descriptor *pgio,
struct inode *inode)
{
struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
if (ld)
pgio->pg_test = ld->pg_test;
}
#else /* CONFIG_NFS_V4_1 */
static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
......@@ -385,9 +381,14 @@ static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
{
}
static inline void pnfs_pageio_init(struct nfs_pageio_descriptor *pgio,
struct inode *inode)
static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
{
return false;
}
static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
{
return false;
}
static inline void
......
......@@ -32,6 +32,7 @@
static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc);
static int nfs_pagein_one(struct nfs_pageio_descriptor *desc);
static const struct nfs_pageio_ops nfs_pageio_read_ops;
static const struct rpc_call_ops nfs_read_partial_ops;
static const struct rpc_call_ops nfs_read_full_ops;
......@@ -113,6 +114,20 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
}
}
static void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
struct inode *inode)
{
nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
NFS_SERVER(inode)->rsize, 0);
}
static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct inode *inode)
{
if (!pnfs_pageio_init_read(pgio, inode))
nfs_pageio_init_read_mds(pgio, inode);
}
int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
struct page *page)
{
......@@ -131,14 +146,11 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
if (len < PAGE_CACHE_SIZE)
zero_user_segment(page, len, PAGE_CACHE_SIZE);
nfs_pageio_init(&pgio, inode, NULL, 0, 0);
nfs_pageio_init_read(&pgio, inode);
nfs_list_add_request(new, &pgio.pg_list);
pgio.pg_count = len;
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
nfs_pagein_multi(&pgio);
else
nfs_pagein_one(&pgio);
nfs_pageio_complete(&pgio);
return 0;
}
......@@ -365,6 +377,20 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc)
return ret;
}
int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
if (desc->pg_bsize < PAGE_CACHE_SIZE)
return nfs_pagein_multi(desc);
return nfs_pagein_one(desc);
}
EXPORT_SYMBOL_GPL(nfs_generic_pg_readpages);
static const struct nfs_pageio_ops nfs_pageio_read_ops = {
.pg_test = nfs_generic_pg_test,
.pg_doio = nfs_generic_pg_readpages,
};
/*
* This is the callback from RPC telling us whether a reply was
* received or some error occurred (timeout or socket shutdown).
......@@ -635,8 +661,6 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
.pgio = &pgio,
};
struct inode *inode = mapping->host;
struct nfs_server *server = NFS_SERVER(inode);
size_t rsize = server->rsize;
unsigned long npages;
int ret = -ESTALE;
......@@ -664,10 +688,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
if (ret == 0)
goto read_complete; /* all pages were read */
if (rsize < PAGE_CACHE_SIZE)
nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
else
nfs_pageio_init(&pgio, inode, nfs_pagein_one, rsize, 0);
nfs_pageio_init_read(&pgio, inode);
ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
......
......@@ -1033,15 +1033,31 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc)
return ret;
}
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
if (desc->pg_bsize < PAGE_CACHE_SIZE)
return nfs_flush_multi(desc);
return nfs_flush_one(desc);
}
EXPORT_SYMBOL_GPL(nfs_generic_pg_writepages);
static const struct nfs_pageio_ops nfs_pageio_write_ops = {
.pg_test = nfs_generic_pg_test,
.pg_doio = nfs_generic_pg_writepages,
};
static void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags)
{
size_t wsize = NFS_SERVER(inode)->wsize;
nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
NFS_SERVER(inode)->wsize, ioflags);
}
if (wsize < PAGE_CACHE_SIZE)
nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
else
nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags)
{
if (!pnfs_pageio_init_write(pgio, inode, ioflags))
nfs_pageio_init_write_mds(pgio, inode, ioflags);
}
/*
......
......@@ -55,6 +55,12 @@ struct nfs_page {
struct nfs_writeverf wb_verf; /* Commit cookie */
};
struct nfs_pageio_descriptor;
struct nfs_pageio_ops {
bool (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
int (*pg_doio)(struct nfs_pageio_descriptor *);
};
struct nfs_pageio_descriptor {
struct list_head pg_list;
unsigned long pg_bytes_written;
......@@ -64,11 +70,10 @@ struct nfs_pageio_descriptor {
char pg_moreio;
struct inode *pg_inode;
int (*pg_doio)(struct nfs_pageio_descriptor *);
const struct nfs_pageio_ops *pg_ops;
int pg_ioflags;
int pg_error;
struct pnfs_layout_segment *pg_lseg;
bool (*pg_test)(struct nfs_pageio_descriptor *, struct nfs_page *, struct nfs_page *);
};
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
......@@ -85,7 +90,7 @@ extern int nfs_scan_list(struct nfs_inode *nfsi, struct list_head *dst,
pgoff_t idx_start, unsigned int npages, int tag);
extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
struct inode *inode,
int (*doio)(struct nfs_pageio_descriptor *desc),
const struct nfs_pageio_ops *pg_ops,
size_t bsize,
int how);
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
......@@ -100,6 +105,9 @@ extern void nfs_unlock_request(struct nfs_page *req);
extern int nfs_set_page_tag_locked(struct nfs_page *req);
extern void nfs_clear_page_tag_locked(struct nfs_page *req);
extern int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
extern int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc);
/*
* Lock the page of an asynchronous request without getting a new reference
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册