提交 ef3f5434 编写于 作者: T Trond Myklebust

SUNRPC: Distinguish between the slot allocation list and receive queue

When storing a struct rpc_rqst on the slot allocation list, we currently
use the same field 'rq_list' as we use to store the request on the
receive queue. Since the structure is never on both lists at the same
time, this is OK.
However, for clarity, let's make that a union with different names for
the different lists so that we can more easily distinguish between
the two states.
Signed-off-by: NTrond Myklebust <trond.myklebust@hammerspace.com>
上级 78b576ce
...@@ -82,7 +82,11 @@ struct rpc_rqst { ...@@ -82,7 +82,11 @@ struct rpc_rqst {
struct page **rq_enc_pages; /* scratch pages for use by struct page **rq_enc_pages; /* scratch pages for use by
gss privacy code */ gss privacy code */
void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */ void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
struct list_head rq_list;
union {
struct list_head rq_list; /* Slot allocation list */
struct list_head rq_recv; /* Receive queue */
};
void *rq_buffer; /* Call XDR encode buffer */ void *rq_buffer; /* Call XDR encode buffer */
size_t rq_callsize; size_t rq_callsize;
...@@ -249,7 +253,8 @@ struct rpc_xprt { ...@@ -249,7 +253,8 @@ struct rpc_xprt {
struct list_head bc_pa_list; /* List of preallocated struct list_head bc_pa_list; /* List of preallocated
* backchannel rpc_rqst's */ * backchannel rpc_rqst's */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */ #endif /* CONFIG_SUNRPC_BACKCHANNEL */
struct list_head recv;
struct list_head recv_queue; /* Receive queue */
struct { struct {
unsigned long bind_count, /* total number of binds */ unsigned long bind_count, /* total number of binds */
......
...@@ -708,7 +708,7 @@ static void ...@@ -708,7 +708,7 @@ static void
xprt_schedule_autodisconnect(struct rpc_xprt *xprt) xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
__must_hold(&xprt->transport_lock) __must_hold(&xprt->transport_lock)
{ {
if (list_empty(&xprt->recv) && xprt_has_timer(xprt)) if (list_empty(&xprt->recv_queue) && xprt_has_timer(xprt))
mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout); mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
} }
...@@ -718,7 +718,7 @@ xprt_init_autodisconnect(struct timer_list *t) ...@@ -718,7 +718,7 @@ xprt_init_autodisconnect(struct timer_list *t)
struct rpc_xprt *xprt = from_timer(xprt, t, timer); struct rpc_xprt *xprt = from_timer(xprt, t, timer);
spin_lock(&xprt->transport_lock); spin_lock(&xprt->transport_lock);
if (!list_empty(&xprt->recv)) if (!list_empty(&xprt->recv_queue))
goto out_abort; goto out_abort;
/* Reset xprt->last_used to avoid connect/autodisconnect cycling */ /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
xprt->last_used = jiffies; xprt->last_used = jiffies;
...@@ -848,7 +848,7 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid) ...@@ -848,7 +848,7 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
{ {
struct rpc_rqst *entry; struct rpc_rqst *entry;
list_for_each_entry(entry, &xprt->recv, rq_list) list_for_each_entry(entry, &xprt->recv_queue, rq_recv)
if (entry->rq_xid == xid) { if (entry->rq_xid == xid) {
trace_xprt_lookup_rqst(xprt, xid, 0); trace_xprt_lookup_rqst(xprt, xid, 0);
entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime); entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
...@@ -938,7 +938,7 @@ xprt_request_enqueue_receive(struct rpc_task *task) ...@@ -938,7 +938,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
sizeof(req->rq_private_buf)); sizeof(req->rq_private_buf));
/* Add request to the receive list */ /* Add request to the receive list */
list_add_tail(&req->rq_list, &xprt->recv); list_add_tail(&req->rq_recv, &xprt->recv_queue);
set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate); set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
...@@ -957,7 +957,7 @@ static void ...@@ -957,7 +957,7 @@ static void
xprt_request_dequeue_receive_locked(struct rpc_task *task) xprt_request_dequeue_receive_locked(struct rpc_task *task)
{ {
if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
list_del(&task->tk_rqstp->rq_list); list_del(&task->tk_rqstp->rq_recv);
} }
/** /**
...@@ -1492,7 +1492,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) ...@@ -1492,7 +1492,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
spin_lock_init(&xprt->queue_lock); spin_lock_init(&xprt->queue_lock);
INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv); INIT_LIST_HEAD(&xprt->recv_queue);
#if defined(CONFIG_SUNRPC_BACKCHANNEL) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
spin_lock_init(&xprt->bc_pa_lock); spin_lock_init(&xprt->bc_pa_lock);
INIT_LIST_HEAD(&xprt->bc_pa_list); INIT_LIST_HEAD(&xprt->bc_pa_list);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册