提交 0536fcc0 编写于 作者: E Eric Dumazet 提交者: David S. Miller

tcp: prepare fastopen code for upcoming listener changes

While auditing TCP stack for upcoming 'lockless' listener changes,
I found I had to change fastopen_init_queue() to properly init the object
before publishing it.

Otherwise an other cpu could try to lock the spinlock before it gets
properly initialized.

Instead of adding appropriate barriers, just remove dynamic memory
allocations :
- Structure is 28 bytes on 64bit arches. Using additional 8 bytes
  for holding a pointer seems overkill.
- Two listeners can share same cache line and performance would suffer.

If we really want to save few bytes, we would instead dynamically allocate
whole struct request_sock_queue in the future.
Signed-off-by: NEric Dumazet <edumazet@google.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 2985aaac
...@@ -382,25 +382,11 @@ static inline bool tcp_passive_fastopen(const struct sock *sk) ...@@ -382,25 +382,11 @@ static inline bool tcp_passive_fastopen(const struct sock *sk)
tcp_sk(sk)->fastopen_rsk != NULL); tcp_sk(sk)->fastopen_rsk != NULL);
} }
extern void tcp_sock_destruct(struct sock *sk); static inline void fastopen_queue_tune(struct sock *sk, int backlog)
static inline int fastopen_init_queue(struct sock *sk, int backlog)
{ {
struct request_sock_queue *queue = struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
&inet_csk(sk)->icsk_accept_queue;
queue->fastopenq.max_qlen = backlog;
if (queue->fastopenq == NULL) {
queue->fastopenq = kzalloc(
sizeof(struct fastopen_queue),
sk->sk_allocation);
if (queue->fastopenq == NULL)
return -ENOMEM;
sk->sk_destruct = tcp_sock_destruct;
spin_lock_init(&queue->fastopenq->lock);
}
queue->fastopenq->max_qlen = backlog;
return 0;
} }
static inline void tcp_saved_syn_free(struct tcp_sock *tp) static inline void tcp_saved_syn_free(struct tcp_sock *tp)
......
...@@ -180,11 +180,8 @@ struct request_sock_queue { ...@@ -180,11 +180,8 @@ struct request_sock_queue {
struct request_sock *rskq_accept_tail; struct request_sock *rskq_accept_tail;
u8 rskq_defer_accept; u8 rskq_defer_accept;
struct listen_sock *listen_opt; struct listen_sock *listen_opt;
struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
* enabled on this listener. Check * if TFO is enabled.
* max_qlen != 0 in fastopen_queue
* to determine if TFO is enabled
* right at this moment.
*/ */
/* temporary alignment, our goal is to get rid of this lock */ /* temporary alignment, our goal is to get rid of this lock */
......
...@@ -59,6 +59,13 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, ...@@ -59,6 +59,13 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
spin_lock_init(&queue->syn_wait_lock); spin_lock_init(&queue->syn_wait_lock);
spin_lock_init(&queue->fastopenq.lock);
queue->fastopenq.rskq_rst_head = NULL;
queue->fastopenq.rskq_rst_tail = NULL;
queue->fastopenq.qlen = 0;
queue->fastopenq.max_qlen = 0;
queue->rskq_accept_head = NULL; queue->rskq_accept_head = NULL;
lopt->nr_table_entries = nr_table_entries; lopt->nr_table_entries = nr_table_entries;
lopt->max_qlen_log = ilog2(nr_table_entries); lopt->max_qlen_log = ilog2(nr_table_entries);
...@@ -174,7 +181,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, ...@@ -174,7 +181,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
struct sock *lsk = req->rsk_listener; struct sock *lsk = req->rsk_listener;
struct fastopen_queue *fastopenq; struct fastopen_queue *fastopenq;
fastopenq = inet_csk(lsk)->icsk_accept_queue.fastopenq; fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;
tcp_sk(sk)->fastopen_rsk = NULL; tcp_sk(sk)->fastopen_rsk = NULL;
spin_lock_bh(&fastopenq->lock); spin_lock_bh(&fastopenq->lock);
......
...@@ -219,17 +219,13 @@ int inet_listen(struct socket *sock, int backlog) ...@@ -219,17 +219,13 @@ int inet_listen(struct socket *sock, int backlog)
* shutdown() (rather than close()). * shutdown() (rather than close()).
*/ */
if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 && if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 &&
!inet_csk(sk)->icsk_accept_queue.fastopenq) { !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) {
if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0) if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0)
err = fastopen_init_queue(sk, backlog); fastopen_queue_tune(sk, backlog);
else if ((sysctl_tcp_fastopen & else if ((sysctl_tcp_fastopen &
TFO_SERVER_WO_SOCKOPT2) != 0) TFO_SERVER_WO_SOCKOPT2) != 0)
err = fastopen_init_queue(sk, fastopen_queue_tune(sk,
((uint)sysctl_tcp_fastopen) >> 16); ((uint)sysctl_tcp_fastopen) >> 16);
else
err = 0;
if (err)
goto out;
tcp_fastopen_init_key_once(true); tcp_fastopen_init_key_once(true);
} }
......
...@@ -335,9 +335,8 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) ...@@ -335,9 +335,8 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
sk_acceptq_removed(sk); sk_acceptq_removed(sk);
if (sk->sk_protocol == IPPROTO_TCP && if (sk->sk_protocol == IPPROTO_TCP &&
tcp_rsk(req)->tfo_listener && tcp_rsk(req)->tfo_listener) {
queue->fastopenq) { spin_lock_bh(&queue->fastopenq.lock);
spin_lock_bh(&queue->fastopenq->lock);
if (tcp_rsk(req)->tfo_listener) { if (tcp_rsk(req)->tfo_listener) {
/* We are still waiting for the final ACK from 3WHS /* We are still waiting for the final ACK from 3WHS
* so can't free req now. Instead, we set req->sk to * so can't free req now. Instead, we set req->sk to
...@@ -348,7 +347,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) ...@@ -348,7 +347,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err)
req->sk = NULL; req->sk = NULL;
req = NULL; req = NULL;
} }
spin_unlock_bh(&queue->fastopenq->lock); spin_unlock_bh(&queue->fastopenq.lock);
} }
out: out:
release_sock(sk); release_sock(sk);
...@@ -886,12 +885,12 @@ void inet_csk_listen_stop(struct sock *sk) ...@@ -886,12 +885,12 @@ void inet_csk_listen_stop(struct sock *sk)
sk_acceptq_removed(sk); sk_acceptq_removed(sk);
reqsk_put(req); reqsk_put(req);
} }
if (queue->fastopenq) { if (queue->fastopenq.rskq_rst_head) {
/* Free all the reqs queued in rskq_rst_head. */ /* Free all the reqs queued in rskq_rst_head. */
spin_lock_bh(&queue->fastopenq->lock); spin_lock_bh(&queue->fastopenq.lock);
acc_req = queue->fastopenq->rskq_rst_head; acc_req = queue->fastopenq.rskq_rst_head;
queue->fastopenq->rskq_rst_head = NULL; queue->fastopenq.rskq_rst_head = NULL;
spin_unlock_bh(&queue->fastopenq->lock); spin_unlock_bh(&queue->fastopenq.lock);
while ((req = acc_req) != NULL) { while ((req = acc_req) != NULL) {
acc_req = req->dl_next; acc_req = req->dl_next;
reqsk_put(req); reqsk_put(req);
......
...@@ -2253,13 +2253,6 @@ int tcp_disconnect(struct sock *sk, int flags) ...@@ -2253,13 +2253,6 @@ int tcp_disconnect(struct sock *sk, int flags)
} }
EXPORT_SYMBOL(tcp_disconnect); EXPORT_SYMBOL(tcp_disconnect);
void tcp_sock_destruct(struct sock *sk)
{
inet_sock_destruct(sk);
kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
}
static inline bool tcp_can_repair_sock(const struct sock *sk) static inline bool tcp_can_repair_sock(const struct sock *sk)
{ {
return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
...@@ -2581,7 +2574,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, ...@@ -2581,7 +2574,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
TCPF_LISTEN))) { TCPF_LISTEN))) {
tcp_fastopen_init_key_once(true); tcp_fastopen_init_key_once(true);
err = fastopen_init_queue(sk, val); fastopen_queue_tune(sk, val);
} else { } else {
err = -EINVAL; err = -EINVAL;
} }
...@@ -2849,10 +2842,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, ...@@ -2849,10 +2842,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
break; break;
case TCP_FASTOPEN: case TCP_FASTOPEN:
if (icsk->icsk_accept_queue.fastopenq) val = icsk->icsk_accept_queue.fastopenq.max_qlen;
val = icsk->icsk_accept_queue.fastopenq->max_qlen;
else
val = 0;
break; break;
case TCP_TIMESTAMP: case TCP_TIMESTAMP:
......
...@@ -142,9 +142,9 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, ...@@ -142,9 +142,9 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
if (!child) if (!child)
return NULL; return NULL;
spin_lock(&queue->fastopenq->lock); spin_lock(&queue->fastopenq.lock);
queue->fastopenq->qlen++; queue->fastopenq.qlen++;
spin_unlock(&queue->fastopenq->lock); spin_unlock(&queue->fastopenq.lock);
/* Initialize the child socket. Have to fix some values to take /* Initialize the child socket. Have to fix some values to take
* into account the child is a Fast Open socket and is created * into account the child is a Fast Open socket and is created
...@@ -237,8 +237,8 @@ static bool tcp_fastopen_queue_check(struct sock *sk) ...@@ -237,8 +237,8 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
* between qlen overflow causing Fast Open to be disabled * between qlen overflow causing Fast Open to be disabled
* temporarily vs a server not supporting Fast Open at all. * temporarily vs a server not supporting Fast Open at all.
*/ */
fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq; fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
if (!fastopenq || fastopenq->max_qlen == 0) if (fastopenq->max_qlen == 0)
return false; return false;
if (fastopenq->qlen >= fastopenq->max_qlen) { if (fastopenq->qlen >= fastopenq->max_qlen) {
......
...@@ -2186,7 +2186,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) ...@@ -2186,7 +2186,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
const struct inet_sock *inet = inet_sk(sk); const struct inet_sock *inet = inet_sk(sk);
struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq; const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
__be32 dest = inet->inet_daddr; __be32 dest = inet->inet_daddr;
__be32 src = inet->inet_rcv_saddr; __be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport); __u16 destp = ntohs(inet->inet_dport);
......
...@@ -1672,7 +1672,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) ...@@ -1672,7 +1672,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
const struct inet_sock *inet = inet_sk(sp); const struct inet_sock *inet = inet_sk(sp);
const struct tcp_sock *tp = tcp_sk(sp); const struct tcp_sock *tp = tcp_sk(sp);
const struct inet_connection_sock *icsk = inet_csk(sp); const struct inet_connection_sock *icsk = inet_csk(sp);
struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq; const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
dest = &sp->sk_v6_daddr; dest = &sp->sk_v6_daddr;
src = &sp->sk_v6_rcv_saddr; src = &sp->sk_v6_rcv_saddr;
...@@ -1716,7 +1716,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) ...@@ -1716,7 +1716,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
tp->snd_cwnd, tp->snd_cwnd,
sp->sk_state == TCP_LISTEN ? sp->sk_state == TCP_LISTEN ?
(fastopenq ? fastopenq->max_qlen : 0) : fastopenq->max_qlen :
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
); );
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册