提交 16bea70a 编写于 作者: O Octavian Purdila 提交者: David S. Miller

tcp: add init_req method to tcp_request_sock_ops

Move the specific IPv4/IPv6 intializations to a new method in
tcp_request_sock_ops in preparation for unifying tcp_v4_conn_request
and tcp_v6_conn_request.
Signed-off-by: NOctavian Purdila <octavian.purdila@intel.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 476eab82
...@@ -111,10 +111,7 @@ struct tcp_request_sock_ops; ...@@ -111,10 +111,7 @@ struct tcp_request_sock_ops;
struct tcp_request_sock { struct tcp_request_sock {
struct inet_request_sock req; struct inet_request_sock req;
#ifdef CONFIG_TCP_MD5SIG
/* Only used by TCP MD5 Signature so far. */
const struct tcp_request_sock_ops *af_specific; const struct tcp_request_sock_ops *af_specific;
#endif
struct sock *listener; /* needed for TFO */ struct sock *listener; /* needed for TFO */
u32 rcv_isn; u32 rcv_isn;
u32 snt_isn; u32 snt_isn;
......
...@@ -1613,6 +1613,8 @@ struct tcp_request_sock_ops { ...@@ -1613,6 +1613,8 @@ struct tcp_request_sock_ops {
const struct request_sock *req, const struct request_sock *req,
const struct sk_buff *skb); const struct sk_buff *skb);
#endif #endif
void (*init_req)(struct request_sock *req, struct sock *sk,
struct sk_buff *skb);
}; };
int tcpv4_offload_init(void); int tcpv4_offload_init(void);
......
...@@ -1237,6 +1237,17 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) ...@@ -1237,6 +1237,17 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
#endif #endif
static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
struct sk_buff *skb)
{
struct inet_request_sock *ireq = inet_rsk(req);
ireq->ir_loc_addr = ip_hdr(skb)->daddr;
ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
ireq->no_srccheck = inet_sk(sk)->transparent;
ireq->opt = tcp_v4_save_options(skb);
}
struct request_sock_ops tcp_request_sock_ops __read_mostly = { struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.family = PF_INET, .family = PF_INET,
.obj_size = sizeof(struct tcp_request_sock), .obj_size = sizeof(struct tcp_request_sock),
...@@ -1247,26 +1258,26 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = { ...@@ -1247,26 +1258,26 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.syn_ack_timeout = tcp_syn_ack_timeout, .syn_ack_timeout = tcp_syn_ack_timeout,
}; };
#ifdef CONFIG_TCP_MD5SIG
static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
#ifdef CONFIG_TCP_MD5SIG
.md5_lookup = tcp_v4_reqsk_md5_lookup, .md5_lookup = tcp_v4_reqsk_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb, .calc_md5_hash = tcp_v4_md5_hash_skb,
};
#endif #endif
.init_req = tcp_v4_init_req,
};
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_options_received tmp_opt; struct tcp_options_received tmp_opt;
struct request_sock *req; struct request_sock *req;
struct inet_request_sock *ireq;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = NULL; struct dst_entry *dst = NULL;
__be32 saddr = ip_hdr(skb)->saddr; __be32 saddr = ip_hdr(skb)->saddr;
__be32 daddr = ip_hdr(skb)->daddr;
__u32 isn = TCP_SKB_CB(skb)->when; __u32 isn = TCP_SKB_CB(skb)->when;
bool want_cookie = false, fastopen; bool want_cookie = false, fastopen;
struct flowi4 fl4; struct flowi4 fl4;
struct tcp_fastopen_cookie foc = { .len = -1 }; struct tcp_fastopen_cookie foc = { .len = -1 };
const struct tcp_request_sock_ops *af_ops;
int err; int err;
/* Never answer to SYNs send to broadcast or multicast */ /* Never answer to SYNs send to broadcast or multicast */
...@@ -1298,9 +1309,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1298,9 +1309,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (!req) if (!req)
goto drop; goto drop;
#ifdef CONFIG_TCP_MD5SIG af_ops = tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
#endif
tcp_clear_options(&tmp_opt); tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = TCP_MSS_DEFAULT; tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
...@@ -1313,11 +1322,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1313,11 +1322,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
tcp_openreq_init(req, &tmp_opt, skb, sk); tcp_openreq_init(req, &tmp_opt, skb, sk);
ireq = inet_rsk(req); af_ops->init_req(req, sk, skb);
ireq->ir_loc_addr = daddr;
ireq->ir_rmt_addr = saddr;
ireq->no_srccheck = inet_sk(sk)->transparent;
ireq->opt = tcp_v4_save_options(skb);
if (security_inet_conn_request(sk, skb, req)) if (security_inet_conn_request(sk, skb, req))
goto drop_and_free; goto drop_and_free;
......
...@@ -720,6 +720,31 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) ...@@ -720,6 +720,31 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
} }
#endif #endif
static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
struct sk_buff *skb)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
ireq->ir_iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq->ir_iif = inet6_iif(skb);
if (!TCP_SKB_CB(skb)->when &&
(ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
np->rxopt.bits.rxohlim || np->repflow)) {
atomic_inc(&skb->users);
ireq->pktopts = skb;
}
}
struct request_sock_ops tcp6_request_sock_ops __read_mostly = { struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
.family = AF_INET6, .family = AF_INET6,
.obj_size = sizeof(struct tcp6_request_sock), .obj_size = sizeof(struct tcp6_request_sock),
...@@ -730,12 +755,13 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = { ...@@ -730,12 +755,13 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
.syn_ack_timeout = tcp_syn_ack_timeout, .syn_ack_timeout = tcp_syn_ack_timeout,
}; };
#ifdef CONFIG_TCP_MD5SIG
static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
#ifdef CONFIG_TCP_MD5SIG
.md5_lookup = tcp_v6_reqsk_md5_lookup, .md5_lookup = tcp_v6_reqsk_md5_lookup,
.calc_md5_hash = tcp_v6_md5_hash_skb, .calc_md5_hash = tcp_v6_md5_hash_skb,
};
#endif #endif
.init_req = tcp_v6_init_req,
};
static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
u32 tsval, u32 tsecr, int oif, u32 tsval, u32 tsecr, int oif,
...@@ -983,13 +1009,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -983,13 +1009,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct tcp_options_received tmp_opt; struct tcp_options_received tmp_opt;
struct request_sock *req; struct request_sock *req;
struct inet_request_sock *ireq; struct inet_request_sock *ireq;
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when; __u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL; struct dst_entry *dst = NULL;
struct tcp_fastopen_cookie foc = { .len = -1 }; struct tcp_fastopen_cookie foc = { .len = -1 };
bool want_cookie = false, fastopen; bool want_cookie = false, fastopen;
struct flowi6 fl6; struct flowi6 fl6;
const struct tcp_request_sock_ops *af_ops;
int err; int err;
if (skb->protocol == htons(ETH_P_IP)) if (skb->protocol == htons(ETH_P_IP))
...@@ -1014,9 +1040,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1014,9 +1040,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (req == NULL) if (req == NULL)
goto drop; goto drop;
#ifdef CONFIG_TCP_MD5SIG af_ops = tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
#endif
tcp_clear_options(&tmp_opt); tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
...@@ -1030,27 +1054,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ...@@ -1030,27 +1054,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_openreq_init(req, &tmp_opt, skb, sk); tcp_openreq_init(req, &tmp_opt, skb, sk);
ireq = inet_rsk(req); ireq = inet_rsk(req);
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; af_ops->init_req(req, sk, skb);
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
if (!want_cookie || tmp_opt.tstamp_ok) if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, skb, sock_net(sk)); TCP_ECN_create_request(req, skb, sock_net(sk));
ireq->ir_iif = sk->sk_bound_dev_if;
/* So that link locals have meaning */
if (!sk->sk_bound_dev_if &&
ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
ireq->ir_iif = inet6_iif(skb);
if (!isn) { if (!isn) {
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim ||
np->repflow) {
atomic_inc(&skb->users);
ireq->pktopts = skb;
}
if (want_cookie) { if (want_cookie) {
isn = cookie_v6_init_sequence(sk, skb, &req->mss); isn = cookie_v6_init_sequence(sk, skb, &req->mss);
req->cookie_ts = tmp_opt.tstamp_ok; req->cookie_ts = tmp_opt.tstamp_ok;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册