提交 3c87ef6e 编写于 作者: J Jeff Layton 提交者: Trond Myklebust

sunrpc: keep a count of swapfiles associated with the rpc_clnt

Jerome reported seeing a warning pop when working with a swapfile on
NFS. The nfs_swap_activate can end up calling sk_set_memalloc while
holding the rcu_read_lock and that function can sleep.

To fix that, we need to take a reference to the xprt while holding the
rcu_read_lock, set the socket up for swapping and then drop that
reference. But, xprt_put is not exported and having NFS deal with the
underlying xprt is a bit of layering violation anyway.

Fix this by adding a set of activate/deactivate functions that take a
rpc_clnt pointer instead of an rpc_xprt, and have nfs_swap_activate and
nfs_swap_deactivate call those.

Also, add a per-rpc_clnt atomic counter to keep track of the number of
active swapfiles associated with it. When the counter does a 0->1
transition, we enable swapping on the xprt, when we do a 1->0 transition
we disable swapping on it.

This also allows us to be a bit more selective with the RPC_TASK_SWAPPER
flag. If non-swapper and swapper clnts are sharing a xprt, then we only
need to flag the tasks from the swapper clnt with that flag.
Acked-by: NMel Gorman <mgorman@suse.de>
Reported-by: NJerome Marchand <jmarchan@redhat.com>
Signed-off-by: NJeff Layton <jeff.layton@primarydata.com>
Reviewed-by: NChuck Lever <chuck.lever@oracle.com>
Signed-off-by: NTrond Myklebust <trond.myklebust@primarydata.com>
上级 0d2a970d
......@@ -555,31 +555,22 @@ static int nfs_launder_page(struct page *page)
return nfs_wb_page(inode, page);
}
#ifdef CONFIG_NFS_SWAP
static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
sector_t *span)
{
int ret;
struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
*span = sis->pages;
rcu_read_lock();
ret = xs_swapper(rcu_dereference(clnt->cl_xprt), 1);
rcu_read_unlock();
return ret;
return rpc_clnt_swap_activate(clnt);
}
static void nfs_swap_deactivate(struct file *file)
{
struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
rcu_read_lock();
xs_swapper(rcu_dereference(clnt->cl_xprt), 0);
rcu_read_unlock();
rpc_clnt_swap_deactivate(clnt);
}
#endif
const struct address_space_operations nfs_file_aops = {
.readpage = nfs_readpage,
......@@ -596,10 +587,8 @@ const struct address_space_operations nfs_file_aops = {
.launder_page = nfs_launder_page,
.is_dirty_writeback = nfs_check_dirty_writeback,
.error_remove_page = generic_error_remove_page,
#ifdef CONFIG_NFS_SWAP
.swap_activate = nfs_swap_activate,
.swap_deactivate = nfs_swap_deactivate,
#endif
};
/*
......
......@@ -56,6 +56,7 @@ struct rpc_clnt {
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
atomic_t cl_swapper; /* swapfile count */
int cl_nodelen; /* nodename length */
char cl_nodename[UNX_MAXNODENAME+1];
struct rpc_pipe_dir_head cl_pipedir_objects;
......
......@@ -268,4 +268,20 @@ static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q,
}
#endif
#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
int rpc_clnt_swap_activate(struct rpc_clnt *clnt);
void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt);
#else
static inline int
rpc_clnt_swap_activate(struct rpc_clnt *clnt)
{
return -EINVAL;
}
static inline void
rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
{
}
#endif /* CONFIG_SUNRPC_SWAP */
#endif /* _LINUX_SUNRPC_SCHED_H_ */
......@@ -891,15 +891,8 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
task->tk_flags |= RPC_TASK_SOFT;
if (clnt->cl_noretranstimeo)
task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
if (sk_memalloc_socks()) {
struct rpc_xprt *xprt;
rcu_read_lock();
xprt = rcu_dereference(clnt->cl_xprt);
if (xprt->swapper)
task->tk_flags |= RPC_TASK_SWAPPER;
rcu_read_unlock();
}
if (atomic_read(&clnt->cl_swapper))
task->tk_flags |= RPC_TASK_SWAPPER;
/* Add to the client's list of all tasks */
spin_lock(&clnt->cl_lock);
list_add_tail(&task->tk_task, &clnt->cl_tasks);
......@@ -2479,3 +2472,59 @@ void rpc_show_tasks(struct net *net)
spin_unlock(&sn->rpc_client_lock);
}
#endif
#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
int
rpc_clnt_swap_activate(struct rpc_clnt *clnt)
{
int ret = 0;
struct rpc_xprt *xprt;
if (atomic_inc_return(&clnt->cl_swapper) == 1) {
retry:
rcu_read_lock();
xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
rcu_read_unlock();
if (!xprt) {
/*
* If we didn't get a reference, then we likely are
* racing with a migration event. Wait for a grace
* period and try again.
*/
synchronize_rcu();
goto retry;
}
ret = xs_swapper(xprt, 1);
xprt_put(xprt);
}
return ret;
}
EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate);
void
rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
{
struct rpc_xprt *xprt;
if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) {
retry:
rcu_read_lock();
xprt = xprt_get(rcu_dereference(clnt->cl_xprt));
rcu_read_unlock();
if (!xprt) {
/*
* If we didn't get a reference, then we likely are
* racing with a migration event. Wait for a grace
* period and try again.
*/
synchronize_rcu();
goto retry;
}
xs_swapper(xprt, 0);
xprt_put(xprt);
}
}
EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate);
#endif /* CONFIG_SUNRPC_SWAP */
......@@ -1955,7 +1955,7 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
msleep_interruptible(15000);
}
#ifdef CONFIG_SUNRPC_SWAP
#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
static void xs_set_memalloc(struct rpc_xprt *xprt)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
......@@ -1987,7 +1987,6 @@ int xs_swapper(struct rpc_xprt *xprt, int enable)
return err;
}
EXPORT_SYMBOL_GPL(xs_swapper);
#else
static void xs_set_memalloc(struct rpc_xprt *xprt)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册