提交 555ee3af 编写于 作者: C Chuck Lever 提交者: Trond Myklebust

[PATCH] RPC: clean up after nocong was removed

 Clean-up:  Move some macros that are specific to the Van Jacobson
 implementation into xprt.c.  Get rid of the cong_wait field in
 rpc_xprt, which is no longer used.  Get rid of xprt_clear_backlog.

 Test-plan:
 Compile with CONFIG_NFS enabled.
Signed-off-by: NChuck Lever <cel@netapp.com>
Signed-off-by: NTrond Myklebust <Trond.Myklebust@netapp.com>
上级 ed63c003
......@@ -15,20 +15,6 @@
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/xdr.h>
/*
* The transport code maintains an estimate on the maximum number of out-
* standing RPC requests, using a smoothed version of the congestion
* avoidance implemented in 44BSD. This is basically the Van Jacobson
* congestion algorithm: If a retransmit occurs, the congestion window is
* halved; otherwise, it is incremented by 1/cwnd when
*
* - a reply is received and
* - a full number of requests are outstanding and
* - the congestion window hasn't been updated recently.
*
* Upper procedures may check whether a request would block waiting for
* a free RPC slot by using the RPC_CONGESTED() macro.
*/
extern unsigned int xprt_udp_slot_table_entries;
extern unsigned int xprt_tcp_slot_table_entries;
......@@ -36,12 +22,6 @@ extern unsigned int xprt_tcp_slot_table_entries;
#define RPC_DEF_SLOT_TABLE (16U)
#define RPC_MAX_SLOT_TABLE (128U)
#define RPC_CWNDSHIFT (8U)
#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
#define RPC_INITCWND RPC_CWNDSCALE
#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
/* Default timeout values */
#define RPC_MAX_UDP_TIMEOUT (60*HZ)
#define RPC_MAX_TCP_TIMEOUT (600*HZ)
......@@ -213,8 +193,6 @@ struct rpc_xprt {
void (*old_data_ready)(struct sock *, int);
void (*old_state_change)(struct sock *);
void (*old_write_space)(struct sock *);
wait_queue_head_t cong_wait;
};
#define XPRT_LAST_FRAG (1 << 0)
......
......@@ -62,7 +62,23 @@ static inline void do_xprt_reserve(struct rpc_task *);
static void xprt_connect_status(struct rpc_task *task);
static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
static int xprt_clear_backlog(struct rpc_xprt *xprt);
/*
* The transport code maintains an estimate on the maximum number of out-
* standing RPC requests, using a smoothed version of the congestion
* avoidance implemented in 44BSD. This is basically the Van Jacobson
* congestion algorithm: If a retransmit occurs, the congestion window is
* halved; otherwise, it is incremented by 1/cwnd when
*
* - a reply is received and
* - a full number of requests are outstanding and
* - the congestion window hasn't been updated recently.
*/
#define RPC_CWNDSHIFT (8U)
#define RPC_CWNDSCALE (1U << RPC_CWNDSHIFT)
#define RPC_INITCWND RPC_CWNDSCALE
#define RPC_MAXCWND(xprt) ((xprt)->max_reqs << RPC_CWNDSHIFT)
#define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
/**
* xprt_reserve_xprt - serialize write access to transports
......@@ -850,7 +866,7 @@ void xprt_release(struct rpc_task *task)
spin_lock(&xprt->reserve_lock);
list_add(&req->rq_list, &xprt->free);
xprt_clear_backlog(xprt);
rpc_wake_up_next(&xprt->backlog);
spin_unlock(&xprt->reserve_lock);
}
......@@ -902,7 +918,6 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
spin_lock_init(&xprt->transport_lock);
spin_lock_init(&xprt->reserve_lock);
init_waitqueue_head(&xprt->cong_wait);
INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv);
......@@ -911,6 +926,7 @@ static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc
xprt->timer.function = xprt_init_autodisconnect;
xprt->timer.data = (unsigned long) xprt;
xprt->last_used = jiffies;
xprt->cwnd = RPC_INITCWND;
rpc_init_wait_queue(&xprt->pending, "xprt_pending");
rpc_init_wait_queue(&xprt->sending, "xprt_sending");
......@@ -955,16 +971,9 @@ static void xprt_shutdown(struct rpc_xprt *xprt)
rpc_wake_up(&xprt->resend);
xprt_wake_pending_tasks(xprt, -EIO);
rpc_wake_up(&xprt->backlog);
wake_up(&xprt->cong_wait);
del_timer_sync(&xprt->timer);
}
static int xprt_clear_backlog(struct rpc_xprt *xprt) {
rpc_wake_up_next(&xprt->backlog);
wake_up(&xprt->cong_wait);
return 1;
}
/**
* xprt_destroy - destroy an RPC transport, killing off all requests.
* @xprt: transport to destroy
......
......@@ -1100,7 +1100,6 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
xprt->prot = IPPROTO_UDP;
xprt->port = XS_MAX_RESVPORT;
xprt->tsh_size = 0;
xprt->cwnd = RPC_INITCWND;
xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
/* XXX: header size can vary due to auth type, IPv6, etc. */
xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
......@@ -1139,7 +1138,6 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
xprt->prot = IPPROTO_TCP;
xprt->port = XS_MAX_RESVPORT;
xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
xprt->cwnd = RPC_MAXCWND(xprt);
xprt->resvport = capable(CAP_NET_BIND_SERVICE) ? 1 : 0;
xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册