提交 b48fa6b9 编写于 作者: N Neil Brown 提交者: J. Bruce Fields

sunrpc: centralise most calls to svc_xprt_received

svc_xprt_received must be called when ->xpo_recvfrom has finished
receiving a message, so that the XPT_BUSY flag will be cleared and
if necessary, requeued for further work.

This call is currently made in each ->xpo_recvfrom function, often
from multiple different points.  In each case it is the earliest point
on a particular path where it is known that the protection provided by
XPT_BUSY is no longer needed.

However there are (still) some error paths which do not call
svc_xprt_received, and requiring each ->xpo_recvfrom to make the call
does not encourage robustness.

So: move the svc_xprt_received call to be made just after the
call to ->xpo_recvfrom(), and move it of the various ->xpo_recvfrom
methods.

This means that it may not be called at the earliest possible instant,
but this is unlikely to be a measurable performance issue.

Note that there are still other calls to svc_xprt_received as it is
also needed when an xprt is newly created.
Signed-off-by: NNeilBrown <neilb@suse.de>
Signed-off-by: NJ. Bruce Fields <bfields@citi.umich.edu>
上级 26c0c75e
......@@ -743,8 +743,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
if (rqstp->rq_deferred) {
svc_xprt_received(xprt);
len = svc_deferred_recv(rqstp);
} else
} else {
len = xprt->xpt_ops->xpo_recvfrom(rqstp);
svc_xprt_received(xprt);
}
dprintk("svc: got len=%d\n", len);
}
......
......@@ -547,7 +547,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
dprintk("svc: recvfrom returned error %d\n", -err);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
}
svc_xprt_received(&svsk->sk_xprt);
return -EAGAIN;
}
len = svc_addr_len(svc_addr(rqstp));
......@@ -562,11 +561,6 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
svsk->sk_sk->sk_stamp = skb->tstamp;
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
/*
* Maybe more packets - kick another thread ASAP.
*/
svc_xprt_received(&svsk->sk_xprt);
len = skb->len - sizeof(struct udphdr);
rqstp->rq_arg.len = len;
......@@ -917,7 +911,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (len < want) {
dprintk("svc: short recvfrom while reading record "
"length (%d of %d)\n", len, want);
svc_xprt_received(&svsk->sk_xprt);
goto err_again; /* record header not complete */
}
......@@ -953,7 +946,6 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (len < svsk->sk_reclen) {
dprintk("svc: incomplete TCP record (%d of %d)\n",
len, svsk->sk_reclen);
svc_xprt_received(&svsk->sk_xprt);
goto err_again; /* record not complete */
}
len = svsk->sk_reclen;
......@@ -961,14 +953,11 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
return len;
error:
if (len == -EAGAIN) {
if (len == -EAGAIN)
dprintk("RPC: TCP recv_record got EAGAIN\n");
svc_xprt_received(&svsk->sk_xprt);
}
return len;
err_delete:
set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
svc_xprt_received(&svsk->sk_xprt);
err_again:
return -EAGAIN;
}
......@@ -1110,7 +1099,6 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
svsk->sk_tcplen = 0;
svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt);
svc_xprt_received(&svsk->sk_xprt);
if (serv->sv_stats)
serv->sv_stats->nettcpcnt++;
......@@ -1119,7 +1107,6 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
err_again:
if (len == -EAGAIN) {
dprintk("RPC: TCP recvfrom got EAGAIN\n");
svc_xprt_received(&svsk->sk_xprt);
return len;
}
error:
......
......@@ -566,7 +566,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
ret, rqstp->rq_arg.len, rqstp->rq_arg.head[0].iov_base,
rqstp->rq_arg.head[0].iov_len);
svc_xprt_received(rqstp->rq_xprt);
return ret;
}
......@@ -665,7 +664,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_arg.head[0].iov_len);
rqstp->rq_prot = IPPROTO_MAX;
svc_xprt_copy_addrs(rqstp, xprt);
svc_xprt_received(xprt);
return ret;
close_out:
......@@ -678,6 +676,5 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
*/
set_bit(XPT_CLOSE, &xprt->xpt_flags);
defer:
svc_xprt_received(xprt);
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册