提交 c6a556b8 编写于 作者: T Trond Myklebust

[PATCH] NFS: Make searching and waiting on busy writeback requests more efficient.

 Basically copies the VFS's method for tracking writebacks and applies
 it to the struct nfs_page.
Signed-off-by: NTrond Myklebust <Trond.Myklebust@netapp.com>
上级 ab0a3dbe
......@@ -111,6 +111,33 @@ void nfs_unlock_request(struct nfs_page *req)
nfs_release_request(req);
}
/**
* nfs_set_page_writeback_locked - Lock a request for writeback
* @req:
*/
int nfs_set_page_writeback_locked(struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
if (!nfs_lock_request(req))
return 0;
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
return 1;
}
/**
* nfs_clear_page_writeback - Unlock request and wake up sleepers
*/
void nfs_clear_page_writeback(struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
spin_lock(&nfsi->req_lock);
radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_WRITEBACK);
spin_unlock(&nfsi->req_lock);
nfs_unlock_request(req);
}
/**
* nfs_clear_request - Free up all resources allocated to the request
* @req:
......@@ -301,7 +328,7 @@ nfs_scan_list(struct list_head *head, struct list_head *dst,
if (req->wb_index > idx_end)
break;
if (!nfs_lock_request(req))
if (!nfs_set_page_writeback_locked(req))
continue;
nfs_list_remove_request(req);
nfs_list_add_request(req, dst);
......
......@@ -173,7 +173,6 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
if (len < PAGE_CACHE_SIZE)
memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
nfs_lock_request(new);
nfs_list_add_request(new, &one_request);
nfs_pagein_one(&one_request, inode);
return 0;
......@@ -185,7 +184,6 @@ static void nfs_readpage_release(struct nfs_page *req)
nfs_clear_request(req);
nfs_release_request(req);
nfs_unlock_request(req);
dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
req->wb_context->dentry->d_inode->i_sb->s_id,
......@@ -553,7 +551,6 @@ readpage_async_filler(void *data, struct page *page)
}
if (len < PAGE_CACHE_SIZE)
memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
nfs_lock_request(new);
nfs_list_add_request(new, desc->head);
return 0;
}
......
......@@ -503,13 +503,12 @@ nfs_wait_on_requests(struct inode *inode, unsigned long idx_start, unsigned int
spin_lock(&nfsi->req_lock);
next = idx_start;
while (radix_tree_gang_lookup(&nfsi->nfs_page_tree, (void **)&req, next, 1)) {
while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
if (req->wb_index > idx_end)
break;
next = req->wb_index + 1;
if (!NFS_WBACK_BUSY(req))
continue;
BUG_ON(!NFS_WBACK_BUSY(req));
atomic_inc(&req->wb_count);
spin_unlock(&nfsi->req_lock);
......@@ -821,7 +820,7 @@ static void nfs_writepage_release(struct nfs_page *req)
#else
nfs_inode_remove_request(req);
#endif
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
}
static inline int flush_task_priority(int how)
......@@ -952,7 +951,7 @@ static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how)
nfs_writedata_free(data);
}
nfs_mark_request_dirty(req);
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
return -ENOMEM;
}
......@@ -1002,7 +1001,7 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
struct nfs_page *req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_dirty(req);
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
}
return -ENOMEM;
}
......@@ -1029,7 +1028,7 @@ nfs_flush_list(struct list_head *head, int wpages, int how)
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_dirty(req);
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
}
return error;
}
......@@ -1121,7 +1120,7 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
nfs_inode_remove_request(req);
#endif
next:
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
}
}
......@@ -1278,7 +1277,7 @@ nfs_commit_list(struct list_head *head, int how)
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
nfs_mark_request_commit(req);
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
}
return -ENOMEM;
}
......@@ -1324,7 +1323,7 @@ nfs_commit_done(struct rpc_task *task)
dprintk(" mismatch\n");
nfs_mark_request_dirty(req);
next:
nfs_unlock_request(req);
nfs_clear_page_writeback(req);
res++;
}
sub_page_state(nr_unstable,res);
......
......@@ -19,6 +19,11 @@
#include <asm/atomic.h>
/*
* Valid flags for the radix tree
*/
#define NFS_PAGE_TAG_WRITEBACK 1
/*
* Valid flags for a dirty buffer
*/
......@@ -62,6 +67,9 @@ extern int nfs_coalesce_requests(struct list_head *, struct list_head *,
unsigned int);
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern int nfs_set_page_writeback_locked(struct nfs_page *req);
extern void nfs_clear_page_writeback(struct nfs_page *req);
/*
* Lock the page of an asynchronous request without incrementing the wb_count
......@@ -96,10 +104,6 @@ nfs_list_remove_request(struct nfs_page *req)
{
if (list_empty(&req->wb_list))
return;
if (!NFS_WBACK_BUSY(req)) {
printk(KERN_ERR "NFS: unlocked request attempted removed from list!\n");
BUG();
}
list_del_init(&req->wb_list);
req->wb_list_head = NULL;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册