inet6_connection_sock.c 6.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * INET        An implementation of the TCP/IP protocol suite for the LINUX
 *             operating system.  INET is implemented using the  BSD Socket
 *             interface as the means of communication with the user level.
 *
 *             Support for INET6 connection oriented protocols.
 *
 * Authors:    See the TCPv6 sources
 *
 *             This program is free software; you can redistribute it and/or
 *             modify it under the terms of the GNU General Public License
 *             as published by the Free Software Foundation; either version
 *             2 of the License, or(at your option) any later version.
 */

#include <linux/module.h>
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/jhash.h>

#include <net/addrconf.h>
#include <net/inet_connection_sock.h>
23 24 25
#include <net/inet_ecn.h>
#include <net/inet_hashtables.h>
#include <net/ip6_route.h>
26
#include <net/sock.h>
A
Adrian Bunk 已提交
27
#include <net/inet6_connection_sock.h>
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
int inet6_csk_bind_conflict(const struct sock *sk,
			    const struct inet_bind_bucket *tb)
{
	const struct sock *sk2;
	const struct hlist_node *node;

	/* We must walk the whole port owner list in this case. -DaveM */
	sk_for_each_bound(sk2, node, &tb->owners) {
		if (sk != sk2 &&
		    (!sk->sk_bound_dev_if ||
		     !sk2->sk_bound_dev_if ||
		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
		    (!sk->sk_reuse || !sk2->sk_reuse ||
		     sk2->sk_state == TCP_LISTEN) &&
		     ipv6_rcv_saddr_equal(sk, sk2))
			break;
	}

	return node != NULL;
}

EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);

52 53 54
/*
 * request_sock (formerly open request) hash tables.
 */
A
Al Viro 已提交
55
static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
56 57
			   const u32 rnd, const u16 synq_hsize)
{
A
Al Viro 已提交
58 59 60
	u32 a = (__force u32)raddr->s6_addr32[0];
	u32 b = (__force u32)raddr->s6_addr32[1];
	u32 c = (__force u32)raddr->s6_addr32[2];
61 62 63 64 65 66

	a += JHASH_GOLDEN_RATIO;
	b += JHASH_GOLDEN_RATIO;
	c += rnd;
	__jhash_mix(a, b, c);

A
Al Viro 已提交
67
	a += (__force u32)raddr->s6_addr32[3];
A
Al Viro 已提交
68
	b += (__force u32)rport;
69 70 71 72 73 74 75
	__jhash_mix(a, b, c);

	return c & (synq_hsize - 1);
}

struct request_sock *inet6_csk_search_req(const struct sock *sk,
					  struct request_sock ***prevp,
A
Al Viro 已提交
76
					  const __be16 rport,
77 78 79 80 81 82 83 84 85 86 87 88 89
					  const struct in6_addr *raddr,
					  const struct in6_addr *laddr,
					  const int iif)
{
	const struct inet_connection_sock *icsk = inet_csk(sk);
	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
	struct request_sock *req, **prev;

	for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport,
						     lopt->hash_rnd,
						     lopt->nr_table_entries)];
	     (req = *prev) != NULL;
	     prev = &req->dl_next) {
90
		const struct inet6_request_sock *treq = inet6_rsk(req);
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

		if (inet_rsk(req)->rmt_port == rport &&
		    req->rsk_ops->family == AF_INET6 &&
		    ipv6_addr_equal(&treq->rmt_addr, raddr) &&
		    ipv6_addr_equal(&treq->loc_addr, laddr) &&
		    (!treq->iif || treq->iif == iif)) {
			BUG_TRAP(req->sk == NULL);
			*prevp = prev;
			return req;
		}
	}

	return NULL;
}

EXPORT_SYMBOL_GPL(inet6_csk_search_req);

void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
				    struct request_sock *req,
				    const unsigned long timeout)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
114
	const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr,
115 116 117 118 119 120 121 122
				      inet_rsk(req)->rmt_port,
				      lopt->hash_rnd, lopt->nr_table_entries);

	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
	inet_csk_reqsk_queue_added(sk, timeout);
}

EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141

void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;

	sin6->sin6_family = AF_INET6;
	ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
	sin6->sin6_port	= inet_sk(sk)->dport;
	/* We do not store received flowlabel for TCP */
	sin6->sin6_flowinfo = 0;
	sin6->sin6_scope_id = 0;
	if (sk->sk_bound_dev_if &&
	    ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
		sin6->sin6_scope_id = sk->sk_bound_dev_if;
}

EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static inline
void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
			   struct in6_addr *daddr, struct in6_addr *saddr)
{
	__ip6_dst_store(sk, dst, daddr, saddr);

#ifdef CONFIG_XFRM
	if (dst) {
		struct rt6_info *rt = (struct rt6_info  *)dst;
		rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
	}
#endif
}

static inline
struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
{
	struct dst_entry *dst;

	dst = __sk_dst_check(sk, cookie);

#ifdef CONFIG_XFRM
	if (dst) {
		struct rt6_info *rt = (struct rt6_info *)dst;
		if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
			sk->sk_dst_cache = NULL;
			dst_release(dst);
			dst = NULL;
		}
	}
#endif

	return dst;
}

177
int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
178
{
179
	struct sock *sk = skb->sk;
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct flowi fl;
	struct dst_entry *dst;
	struct in6_addr *final_p = NULL, final;

	memset(&fl, 0, sizeof(fl));
	fl.proto = sk->sk_protocol;
	ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
	ipv6_addr_copy(&fl.fl6_src, &np->saddr);
	fl.fl6_flowlabel = np->flow_label;
	IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
	fl.oif = sk->sk_bound_dev_if;
	fl.fl_ip_sport = inet->sport;
	fl.fl_ip_dport = inet->dport;
V
Venkat Yekkirala 已提交
195
	security_sk_classify_flow(sk, &fl);
196 197 198 199 200 201 202 203

	if (np->opt && np->opt->srcrt) {
		struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
		ipv6_addr_copy(&final, &fl.fl6_dst);
		ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
		final_p = &final;
	}

204
	dst = __inet6_csk_dst_check(sk, np->dst_cookie);
205 206 207 208 209 210

	if (dst == NULL) {
		int err = ip6_dst_lookup(sk, &dst, &fl);

		if (err) {
			sk->sk_err_soft = -err;
211
			kfree_skb(skb);
212 213 214 215 216 217 218 219
			return err;
		}

		if (final_p)
			ipv6_addr_copy(&fl.fl6_dst, final_p);

		if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
			sk->sk_route_caps = 0;
220
			kfree_skb(skb);
221 222 223
			return err;
		}

224
		__inet6_csk_dst_store(sk, dst, NULL, NULL);
225 226 227 228 229 230 231 232 233 234 235
	}

	skb->dst = dst_clone(dst);

	/* Restore final destination back after routing done */
	ipv6_addr_copy(&fl.fl6_dst, &np->daddr);

	return ip6_xmit(sk, skb, &fl, np->opt, 0);
}

EXPORT_SYMBOL_GPL(inet6_csk_xmit);