提交 ea3b1ea5 编写于 作者: S Sowmini Varadhan 提交者: David S. Miller

RDS: TCP: make ->sk_user_data point to a rds_conn_path

The socket callbacks should all operate on a struct rds_conn_path,
in preparation for a MP capable RDS-TCP.
Acked-by: NSantosh Shilimkar <santosh.shilimkar@oracle.com>
Signed-off-by: NSowmini Varadhan <sowmini.varadhan@oracle.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 afb4164d
...@@ -136,9 +136,9 @@ void rds_tcp_restore_callbacks(struct socket *sock, ...@@ -136,9 +136,9 @@ void rds_tcp_restore_callbacks(struct socket *sock,
* from being called while it isn't set. * from being called while it isn't set.
*/ */
void rds_tcp_reset_callbacks(struct socket *sock, void rds_tcp_reset_callbacks(struct socket *sock,
struct rds_connection *conn) struct rds_conn_path *cp)
{ {
struct rds_tcp_connection *tc = conn->c_transport_data; struct rds_tcp_connection *tc = cp->cp_transport_data;
struct socket *osock = tc->t_sock; struct socket *osock = tc->t_sock;
if (!osock) if (!osock)
...@@ -148,8 +148,8 @@ void rds_tcp_reset_callbacks(struct socket *sock, ...@@ -148,8 +148,8 @@ void rds_tcp_reset_callbacks(struct socket *sock,
* We have an outstanding SYN to this peer, which may * We have an outstanding SYN to this peer, which may
* potentially have transitioned to the RDS_CONN_UP state, * potentially have transitioned to the RDS_CONN_UP state,
* so we must quiesce any send threads before resetting * so we must quiesce any send threads before resetting
* c_transport_data. We quiesce these threads by setting * cp_transport_data. We quiesce these threads by setting
* c_state to something other than RDS_CONN_UP, and then * cp_state to something other than RDS_CONN_UP, and then
* waiting for any existing threads in rds_send_xmit to * waiting for any existing threads in rds_send_xmit to
* complete release_in_xmit(). (Subsequent threads entering * complete release_in_xmit(). (Subsequent threads entering
* rds_send_xmit() will bail on !rds_conn_up(). * rds_send_xmit() will bail on !rds_conn_up().
...@@ -164,8 +164,8 @@ void rds_tcp_reset_callbacks(struct socket *sock, ...@@ -164,8 +164,8 @@ void rds_tcp_reset_callbacks(struct socket *sock,
* RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
* cannot mark rds_conn_path_up() in the window before lock_sock() * cannot mark rds_conn_path_up() in the window before lock_sock()
*/ */
atomic_set(&conn->c_state, RDS_CONN_RESETTING); atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags)); wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
lock_sock(osock->sk); lock_sock(osock->sk);
/* reset receive side state for rds_tcp_data_recv() for osock */ /* reset receive side state for rds_tcp_data_recv() for osock */
if (tc->t_tinc) { if (tc->t_tinc) {
...@@ -186,11 +186,12 @@ void rds_tcp_reset_callbacks(struct socket *sock, ...@@ -186,11 +186,12 @@ void rds_tcp_reset_callbacks(struct socket *sock,
release_sock(osock->sk); release_sock(osock->sk);
sock_release(osock); sock_release(osock);
newsock: newsock:
rds_send_path_reset(&conn->c_path[0]); rds_send_path_reset(cp);
lock_sock(sock->sk); lock_sock(sock->sk);
write_lock_bh(&sock->sk->sk_callback_lock); write_lock_bh(&sock->sk->sk_callback_lock);
tc->t_sock = sock; tc->t_sock = sock;
sock->sk->sk_user_data = conn; tc->t_cpath = cp;
sock->sk->sk_user_data = cp;
sock->sk->sk_data_ready = rds_tcp_data_ready; sock->sk->sk_data_ready = rds_tcp_data_ready;
sock->sk->sk_write_space = rds_tcp_write_space; sock->sk->sk_write_space = rds_tcp_write_space;
sock->sk->sk_state_change = rds_tcp_state_change; sock->sk->sk_state_change = rds_tcp_state_change;
...@@ -203,9 +204,9 @@ void rds_tcp_reset_callbacks(struct socket *sock, ...@@ -203,9 +204,9 @@ void rds_tcp_reset_callbacks(struct socket *sock,
* above rds_tcp_reset_callbacks for notes about synchronization * above rds_tcp_reset_callbacks for notes about synchronization
* with data path * with data path
*/ */
void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
{ {
struct rds_tcp_connection *tc = conn->c_transport_data; struct rds_tcp_connection *tc = cp->cp_transport_data;
rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc); rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
write_lock_bh(&sock->sk->sk_callback_lock); write_lock_bh(&sock->sk->sk_callback_lock);
...@@ -221,12 +222,12 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) ...@@ -221,12 +222,12 @@ void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
sock->sk->sk_data_ready = sock->sk->sk_user_data; sock->sk->sk_data_ready = sock->sk->sk_user_data;
tc->t_sock = sock; tc->t_sock = sock;
tc->t_cpath = &conn->c_path[0]; tc->t_cpath = cp;
tc->t_orig_data_ready = sock->sk->sk_data_ready; tc->t_orig_data_ready = sock->sk->sk_data_ready;
tc->t_orig_write_space = sock->sk->sk_write_space; tc->t_orig_write_space = sock->sk->sk_write_space;
tc->t_orig_state_change = sock->sk->sk_state_change; tc->t_orig_state_change = sock->sk->sk_state_change;
sock->sk->sk_user_data = conn; sock->sk->sk_user_data = cp;
sock->sk->sk_data_ready = rds_tcp_data_ready; sock->sk->sk_data_ready = rds_tcp_data_ready;
sock->sk->sk_write_space = rds_tcp_write_space; sock->sk->sk_write_space = rds_tcp_write_space;
sock->sk->sk_state_change = rds_tcp_state_change; sock->sk->sk_state_change = rds_tcp_state_change;
......
...@@ -49,8 +49,8 @@ struct rds_tcp_statistics { ...@@ -49,8 +49,8 @@ struct rds_tcp_statistics {
/* tcp.c */ /* tcp.c */
void rds_tcp_tune(struct socket *sock); void rds_tcp_tune(struct socket *sock);
void rds_tcp_nonagle(struct socket *sock); void rds_tcp_nonagle(struct socket *sock);
void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn); void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp);
void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn); void rds_tcp_reset_callbacks(struct socket *sock, struct rds_conn_path *cp);
void rds_tcp_restore_callbacks(struct socket *sock, void rds_tcp_restore_callbacks(struct socket *sock,
struct rds_tcp_connection *tc); struct rds_tcp_connection *tc);
u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
......
...@@ -41,16 +41,16 @@ ...@@ -41,16 +41,16 @@
void rds_tcp_state_change(struct sock *sk) void rds_tcp_state_change(struct sock *sk)
{ {
void (*state_change)(struct sock *sk); void (*state_change)(struct sock *sk);
struct rds_connection *conn; struct rds_conn_path *cp;
struct rds_tcp_connection *tc; struct rds_tcp_connection *tc;
read_lock_bh(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data; cp = sk->sk_user_data;
if (!conn) { if (!cp) {
state_change = sk->sk_state_change; state_change = sk->sk_state_change;
goto out; goto out;
} }
tc = conn->c_transport_data; tc = cp->cp_transport_data;
state_change = tc->t_orig_state_change; state_change = tc->t_orig_state_change;
rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state); rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
...@@ -61,12 +61,11 @@ void rds_tcp_state_change(struct sock *sk) ...@@ -61,12 +61,11 @@ void rds_tcp_state_change(struct sock *sk)
case TCP_SYN_RECV: case TCP_SYN_RECV:
break; break;
case TCP_ESTABLISHED: case TCP_ESTABLISHED:
rds_connect_path_complete(&conn->c_path[0], rds_connect_path_complete(cp, RDS_CONN_CONNECTING);
RDS_CONN_CONNECTING);
break; break;
case TCP_CLOSE_WAIT: case TCP_CLOSE_WAIT:
case TCP_CLOSE: case TCP_CLOSE:
rds_conn_drop(conn); rds_conn_path_drop(cp);
default: default:
break; break;
} }
...@@ -81,6 +80,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn) ...@@ -81,6 +80,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
struct sockaddr_in src, dest; struct sockaddr_in src, dest;
int ret; int ret;
struct rds_tcp_connection *tc = conn->c_transport_data; struct rds_tcp_connection *tc = conn->c_transport_data;
struct rds_conn_path *cp = &conn->c_path[0];
mutex_lock(&tc->t_conn_path_lock); mutex_lock(&tc->t_conn_path_lock);
...@@ -114,7 +114,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn) ...@@ -114,7 +114,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
* once we call connect() we can start getting callbacks and they * once we call connect() we can start getting callbacks and they
* own the socket * own the socket
*/ */
rds_tcp_set_callbacks(sock, conn); rds_tcp_set_callbacks(sock, cp);
ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest), ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest),
O_NONBLOCK); O_NONBLOCK);
......
...@@ -79,6 +79,7 @@ int rds_tcp_accept_one(struct socket *sock) ...@@ -79,6 +79,7 @@ int rds_tcp_accept_one(struct socket *sock)
struct inet_sock *inet; struct inet_sock *inet;
struct rds_tcp_connection *rs_tcp = NULL; struct rds_tcp_connection *rs_tcp = NULL;
int conn_state; int conn_state;
struct rds_conn_path *cp;
if (!sock) /* module unload or netns delete in progress */ if (!sock) /* module unload or netns delete in progress */
return -ENETUNREACH; return -ENETUNREACH;
...@@ -120,6 +121,7 @@ int rds_tcp_accept_one(struct socket *sock) ...@@ -120,6 +121,7 @@ int rds_tcp_accept_one(struct socket *sock)
* rds_tcp_state_change() will do that cleanup * rds_tcp_state_change() will do that cleanup
*/ */
rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data; rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
cp = &conn->c_path[0];
rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING); rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
mutex_lock(&rs_tcp->t_conn_path_lock); mutex_lock(&rs_tcp->t_conn_path_lock);
conn_state = rds_conn_state(conn); conn_state = rds_conn_state(conn);
...@@ -136,16 +138,14 @@ int rds_tcp_accept_one(struct socket *sock) ...@@ -136,16 +138,14 @@ int rds_tcp_accept_one(struct socket *sock)
!conn->c_path[0].cp_outgoing) { !conn->c_path[0].cp_outgoing) {
goto rst_nsk; goto rst_nsk;
} else { } else {
rds_tcp_reset_callbacks(new_sock, conn); rds_tcp_reset_callbacks(new_sock, cp);
conn->c_path[0].cp_outgoing = 0; conn->c_path[0].cp_outgoing = 0;
/* rds_connect_path_complete() marks RDS_CONN_UP */ /* rds_connect_path_complete() marks RDS_CONN_UP */
rds_connect_path_complete(&conn->c_path[0], rds_connect_path_complete(cp, RDS_CONN_RESETTING);
RDS_CONN_RESETTING);
} }
} else { } else {
rds_tcp_set_callbacks(new_sock, conn); rds_tcp_set_callbacks(new_sock, cp);
rds_connect_path_complete(&conn->c_path[0], rds_connect_path_complete(cp, RDS_CONN_CONNECTING);
RDS_CONN_CONNECTING);
} }
new_sock = NULL; new_sock = NULL;
ret = 0; ret = 0;
......
...@@ -297,24 +297,24 @@ int rds_tcp_recv(struct rds_connection *conn) ...@@ -297,24 +297,24 @@ int rds_tcp_recv(struct rds_connection *conn)
void rds_tcp_data_ready(struct sock *sk) void rds_tcp_data_ready(struct sock *sk)
{ {
void (*ready)(struct sock *sk); void (*ready)(struct sock *sk);
struct rds_connection *conn; struct rds_conn_path *cp;
struct rds_tcp_connection *tc; struct rds_tcp_connection *tc;
rdsdebug("data ready sk %p\n", sk); rdsdebug("data ready sk %p\n", sk);
read_lock_bh(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data; cp = sk->sk_user_data;
if (!conn) { /* check for teardown race */ if (!cp) { /* check for teardown race */
ready = sk->sk_data_ready; ready = sk->sk_data_ready;
goto out; goto out;
} }
tc = conn->c_transport_data; tc = cp->cp_transport_data;
ready = tc->t_orig_data_ready; ready = tc->t_orig_data_ready;
rds_tcp_stats_inc(s_tcp_data_ready_calls); rds_tcp_stats_inc(s_tcp_data_ready_calls);
if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM) if (rds_tcp_read_sock(cp->cp_conn, GFP_ATOMIC) == -ENOMEM)
queue_delayed_work(rds_wq, &conn->c_recv_w, 0); queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
out: out:
read_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
ready(sk); ready(sk);
......
...@@ -178,27 +178,27 @@ static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack) ...@@ -178,27 +178,27 @@ static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack)
void rds_tcp_write_space(struct sock *sk) void rds_tcp_write_space(struct sock *sk)
{ {
void (*write_space)(struct sock *sk); void (*write_space)(struct sock *sk);
struct rds_connection *conn; struct rds_conn_path *cp;
struct rds_tcp_connection *tc; struct rds_tcp_connection *tc;
read_lock_bh(&sk->sk_callback_lock); read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data; cp = sk->sk_user_data;
if (!conn) { if (!cp) {
write_space = sk->sk_write_space; write_space = sk->sk_write_space;
goto out; goto out;
} }
tc = conn->c_transport_data; tc = cp->cp_transport_data;
rdsdebug("write_space for tc %p\n", tc); rdsdebug("write_space for tc %p\n", tc);
write_space = tc->t_orig_write_space; write_space = tc->t_orig_write_space;
rds_tcp_stats_inc(s_tcp_write_space_calls); rds_tcp_stats_inc(s_tcp_write_space_calls);
rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc)); rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc));
tc->t_last_seen_una = rds_tcp_snd_una(tc); tc->t_last_seen_una = rds_tcp_snd_una(tc);
rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
queue_delayed_work(rds_wq, &conn->c_send_w, 0); queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
out: out:
read_unlock_bh(&sk->sk_callback_lock); read_unlock_bh(&sk->sk_callback_lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册