提交 961a828d 编写于 作者: T Trond Myklebust

SUNRPC: Fix potential races in xprt_lock_write_next()

We have to ensure that the wake up from the waitqueue and the assignment
of xprt->snd_task are atomic. We can do this by assigning the snd_task
while under the waitqueue spinlock.
Signed-off-by: NTrond Myklebust <Trond.Myklebust@netapp.com>
上级 2aeb98f4
...@@ -222,6 +222,7 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser ...@@ -222,6 +222,7 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *ser
return server->nfs_client->cl_session; return server->nfs_client->cl_session;
} }
extern bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy);
extern int nfs4_setup_sequence(const struct nfs_server *server, extern int nfs4_setup_sequence(const struct nfs_server *server,
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res, struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
struct rpc_task *task); struct rpc_task *task);
......
...@@ -385,17 +385,20 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid) ...@@ -385,17 +385,20 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
free_slotid, tbl->highest_used_slotid); free_slotid, tbl->highest_used_slotid);
} }
bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
{
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
return true;
}
/* /*
* Signal state manager thread if session fore channel is drained * Signal state manager thread if session fore channel is drained
*/ */
static void nfs4_check_drain_fc_complete(struct nfs4_session *ses) static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
{ {
struct rpc_task *task;
if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
task = rpc_wake_up_next(&ses->fc_slot_table.slot_tbl_waitq); rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
if (task) nfs4_set_task_privileged, NULL);
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
return; return;
} }
......
...@@ -190,23 +190,22 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp) ...@@ -190,23 +190,22 @@ static int nfs41_setup_state_renewal(struct nfs_client *clp)
static void nfs4_end_drain_session(struct nfs_client *clp) static void nfs4_end_drain_session(struct nfs_client *clp)
{ {
struct nfs4_session *ses = clp->cl_session; struct nfs4_session *ses = clp->cl_session;
struct nfs4_slot_table *tbl;
int max_slots; int max_slots;
if (ses == NULL) if (ses == NULL)
return; return;
tbl = &ses->fc_slot_table;
if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
spin_lock(&ses->fc_slot_table.slot_tbl_lock); spin_lock(&tbl->slot_tbl_lock);
max_slots = ses->fc_slot_table.max_slots; max_slots = tbl->max_slots;
while (max_slots--) { while (max_slots--) {
struct rpc_task *task; if (rpc_wake_up_first(&tbl->slot_tbl_waitq,
nfs4_set_task_privileged,
task = rpc_wake_up_next(&ses->fc_slot_table. NULL) == NULL)
slot_tbl_waitq);
if (!task)
break; break;
rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
} }
spin_unlock(&ses->fc_slot_table.slot_tbl_lock); spin_unlock(&tbl->slot_tbl_lock);
} }
} }
......
...@@ -235,6 +235,9 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *, ...@@ -235,6 +235,9 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *); struct rpc_task *);
void rpc_wake_up(struct rpc_wait_queue *); void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *,
bool (*)(struct rpc_task *, void *),
void *);
void rpc_wake_up_status(struct rpc_wait_queue *, int); void rpc_wake_up_status(struct rpc_wait_queue *, int);
int rpc_queue_empty(struct rpc_wait_queue *); int rpc_queue_empty(struct rpc_wait_queue *);
void rpc_delay(struct rpc_task *, unsigned long); void rpc_delay(struct rpc_task *, unsigned long);
......
...@@ -422,7 +422,7 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task); ...@@ -422,7 +422,7 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
/* /*
* Wake up the next task on a priority queue. * Wake up the next task on a priority queue.
*/ */
static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue) static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
{ {
struct list_head *q; struct list_head *q;
struct rpc_task *task; struct rpc_task *task;
...@@ -467,30 +467,54 @@ static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queu ...@@ -467,30 +467,54 @@ static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queu
new_owner: new_owner:
rpc_set_waitqueue_owner(queue, task->tk_owner); rpc_set_waitqueue_owner(queue, task->tk_owner);
out: out:
rpc_wake_up_task_queue_locked(queue, task);
return task; return task;
} }
static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
{
if (RPC_IS_PRIORITY(queue))
return __rpc_find_next_queued_priority(queue);
if (!list_empty(&queue->tasks[0]))
return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
return NULL;
}
/* /*
* Wake up the next task on the wait queue. * Wake up the first task on the wait queue.
*/ */
struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
bool (*func)(struct rpc_task *, void *), void *data)
{ {
struct rpc_task *task = NULL; struct rpc_task *task = NULL;
dprintk("RPC: wake_up_next(%p \"%s\")\n", dprintk("RPC: wake_up_first(%p \"%s\")\n",
queue, rpc_qname(queue)); queue, rpc_qname(queue));
spin_lock_bh(&queue->lock); spin_lock_bh(&queue->lock);
if (RPC_IS_PRIORITY(queue)) task = __rpc_find_next_queued(queue);
task = __rpc_wake_up_next_priority(queue); if (task != NULL) {
else { if (func(task, data))
task_for_first(task, &queue->tasks[0])
rpc_wake_up_task_queue_locked(queue, task); rpc_wake_up_task_queue_locked(queue, task);
else
task = NULL;
} }
spin_unlock_bh(&queue->lock); spin_unlock_bh(&queue->lock);
return task; return task;
} }
EXPORT_SYMBOL_GPL(rpc_wake_up_first);
static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
{
return true;
}
/*
* Wake up the next task on the wait queue.
*/
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
{
return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_next); EXPORT_SYMBOL_GPL(rpc_wake_up_next);
/** /**
......
...@@ -292,54 +292,57 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) ...@@ -292,54 +292,57 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
return retval; return retval;
} }
static void __xprt_lock_write_next(struct rpc_xprt *xprt) static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
{ {
struct rpc_task *task; struct rpc_xprt *xprt = data;
struct rpc_rqst *req; struct rpc_rqst *req;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
return;
task = rpc_wake_up_next(&xprt->sending);
if (task == NULL)
goto out_unlock;
req = task->tk_rqstp; req = task->tk_rqstp;
xprt->snd_task = task; xprt->snd_task = task;
if (req) { if (req) {
req->rq_bytes_sent = 0; req->rq_bytes_sent = 0;
req->rq_ntrans++; req->rq_ntrans++;
} }
return true;
}
static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
return; return;
out_unlock: if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_func, xprt))
return;
xprt_clear_locked(xprt); xprt_clear_locked(xprt);
} }
static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt) static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
{ {
struct rpc_task *task; struct rpc_xprt *xprt = data;
struct rpc_rqst *req; struct rpc_rqst *req;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
return;
if (RPCXPRT_CONGESTED(xprt))
goto out_unlock;
task = rpc_wake_up_next(&xprt->sending);
if (task == NULL)
goto out_unlock;
req = task->tk_rqstp; req = task->tk_rqstp;
if (req == NULL) { if (req == NULL) {
xprt->snd_task = task; xprt->snd_task = task;
return; return true;
} }
if (__xprt_get_cong(xprt, task)) { if (__xprt_get_cong(xprt, task)) {
xprt->snd_task = task; xprt->snd_task = task;
req->rq_bytes_sent = 0; req->rq_bytes_sent = 0;
req->rq_ntrans++; req->rq_ntrans++;
return; return true;
} }
return false;
}
static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
return;
if (RPCXPRT_CONGESTED(xprt))
goto out_unlock;
if (rpc_wake_up_first(&xprt->sending, __xprt_lock_write_cong_func, xprt))
return;
out_unlock: out_unlock:
xprt_clear_locked(xprt); xprt_clear_locked(xprt);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册