提交 aac065c5 编写于 作者: E Eric Dumazet 提交者: David S. Miller

tcp: move qlen/young out of struct listen_sock

qlen_inc & young_inc were protected by listener lock,
while qlen_dec & young_dec were atomic fields.

Everything needs to be atomic for upcoming lockless listener.

Also move qlen/young in request_sock_queue as we'll get rid
of struct listen_sock eventually.
Signed-off-by: NEric Dumazet <edumazet@google.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 fff1f300
...@@ -122,14 +122,7 @@ extern int sysctl_max_syn_backlog; ...@@ -122,14 +122,7 @@ extern int sysctl_max_syn_backlog;
* @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
*/ */
struct listen_sock { struct listen_sock {
int qlen_inc; /* protected by listener lock */ u32 max_qlen_log;
int young_inc;/* protected by listener lock */
/* following fields can be updated by timer */
atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
atomic_t young_dec;
u32 max_qlen_log ____cacheline_aligned_in_smp;
u32 synflood_warned; u32 synflood_warned;
u32 hash_rnd; u32 hash_rnd;
u32 nr_table_entries; u32 nr_table_entries;
...@@ -179,6 +172,9 @@ struct request_sock_queue { ...@@ -179,6 +172,9 @@ struct request_sock_queue {
spinlock_t rskq_lock; spinlock_t rskq_lock;
u8 rskq_defer_accept; u8 rskq_defer_accept;
atomic_t qlen;
atomic_t young;
struct request_sock *rskq_accept_head; struct request_sock *rskq_accept_head;
struct request_sock *rskq_accept_tail; struct request_sock *rskq_accept_tail;
struct listen_sock *listen_opt; struct listen_sock *listen_opt;
...@@ -242,41 +238,25 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue ...@@ -242,41 +238,25 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
static inline void reqsk_queue_removed(struct request_sock_queue *queue, static inline void reqsk_queue_removed(struct request_sock_queue *queue,
const struct request_sock *req) const struct request_sock *req)
{ {
struct listen_sock *lopt = queue->listen_opt;
if (req->num_timeout == 0) if (req->num_timeout == 0)
atomic_inc(&lopt->young_dec); atomic_dec(&queue->young);
atomic_inc(&lopt->qlen_dec); atomic_dec(&queue->qlen);
} }
static inline void reqsk_queue_added(struct request_sock_queue *queue) static inline void reqsk_queue_added(struct request_sock_queue *queue)
{ {
struct listen_sock *lopt = queue->listen_opt; atomic_inc(&queue->young);
atomic_inc(&queue->qlen);
lopt->young_inc++;
lopt->qlen_inc++;
}
static inline int listen_sock_qlen(const struct listen_sock *lopt)
{
return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
}
static inline int listen_sock_young(const struct listen_sock *lopt)
{
return lopt->young_inc - atomic_read(&lopt->young_dec);
} }
static inline int reqsk_queue_len(const struct request_sock_queue *queue) static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{ {
const struct listen_sock *lopt = queue->listen_opt; return atomic_read(&queue->qlen);
return lopt ? listen_sock_qlen(lopt) : 0;
} }
static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{ {
return listen_sock_young(queue->listen_opt); return atomic_read(&queue->young);
} }
static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
......
...@@ -102,7 +102,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) ...@@ -102,7 +102,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
/* make all the listen_opt local to us */ /* make all the listen_opt local to us */
struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
if (listen_sock_qlen(lopt) != 0) { if (reqsk_queue_len(queue) != 0) {
unsigned int i; unsigned int i;
for (i = 0; i < lopt->nr_table_entries; i++) { for (i = 0; i < lopt->nr_table_entries; i++) {
...@@ -116,7 +116,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) ...@@ -116,7 +116,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
* or risk a dead lock. * or risk a dead lock.
*/ */
spin_unlock_bh(&queue->syn_wait_lock); spin_unlock_bh(&queue->syn_wait_lock);
atomic_inc(&lopt->qlen_dec); atomic_dec(&queue->qlen);
if (del_timer_sync(&req->rsk_timer)) if (del_timer_sync(&req->rsk_timer))
reqsk_put(req); reqsk_put(req);
reqsk_put(req); reqsk_put(req);
...@@ -126,8 +126,8 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) ...@@ -126,8 +126,8 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
} }
} }
if (WARN_ON(listen_sock_qlen(lopt) != 0)) if (WARN_ON(reqsk_queue_len(queue) != 0))
pr_err("qlen %u\n", listen_sock_qlen(lopt)); pr_err("qlen %u\n", reqsk_queue_len(queue));
kvfree(lopt); kvfree(lopt);
} }
......
...@@ -640,9 +640,9 @@ static void reqsk_timer_handler(unsigned long data) ...@@ -640,9 +640,9 @@ static void reqsk_timer_handler(unsigned long data)
* embrions; and abort old ones without pity, if old * embrions; and abort old ones without pity, if old
* ones are about to clog our table. * ones are about to clog our table.
*/ */
qlen = listen_sock_qlen(lopt); qlen = reqsk_queue_len(queue);
if (qlen >> (lopt->max_qlen_log - 1)) { if (qlen >> (lopt->max_qlen_log - 1)) {
int young = listen_sock_young(lopt) << 1; int young = reqsk_queue_len_young(queue) << 1;
while (thresh > 2) { while (thresh > 2) {
if (qlen < young) if (qlen < young)
...@@ -664,7 +664,7 @@ static void reqsk_timer_handler(unsigned long data) ...@@ -664,7 +664,7 @@ static void reqsk_timer_handler(unsigned long data)
unsigned long timeo; unsigned long timeo;
if (req->num_timeout++ == 0) if (req->num_timeout++ == 0)
atomic_inc(&lopt->young_dec); atomic_dec(&queue->young);
timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
mod_timer_pinned(&req->rsk_timer, jiffies + timeo); mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
return; return;
......
...@@ -753,7 +753,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, ...@@ -753,7 +753,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
spin_lock(&icsk->icsk_accept_queue.syn_wait_lock); spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
lopt = icsk->icsk_accept_queue.listen_opt; lopt = icsk->icsk_accept_queue.listen_opt;
if (!lopt || !listen_sock_qlen(lopt)) if (!lopt || !reqsk_queue_len(&icsk->icsk_accept_queue))
goto out; goto out;
if (bc) { if (bc) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册