gre_offload.c 6.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *	IPV4 GSO/GRO offload support
 *	Linux INET implementation
 *
 *	This program is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU General Public License
 *	as published by the Free Software Foundation; either version
 *	2 of the License, or (at your option) any later version.
 *
 *	GRE GSO support
 */

#include <linux/skbuff.h>
14
#include <linux/init.h>
15 16 17 18 19 20
#include <net/protocol.h>
#include <net/gre.h>

static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
				       netdev_features_t features)
{
21
	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
22
	struct sk_buff *segs = ERR_PTR(-EINVAL);
23
	u16 mac_offset = skb->mac_header;
24
	__be16 protocol = skb->protocol;
25 26
	u16 mac_len = skb->mac_len;
	int gre_offset, outer_hlen;
27
	bool need_csum, ufo;
28 29 30 31 32 33 34

	if (unlikely(skb_shinfo(skb)->gso_type &
				~(SKB_GSO_TCPV4 |
				  SKB_GSO_TCPV6 |
				  SKB_GSO_UDP |
				  SKB_GSO_DODGY |
				  SKB_GSO_TCP_ECN |
E
Eric Dumazet 已提交
35
				  SKB_GSO_GRE |
T
Tom Herbert 已提交
36
				  SKB_GSO_GRE_CSUM |
37 38
				  SKB_GSO_IPIP |
				  SKB_GSO_SIT)))
39 40
		goto out;

41 42 43
	if (!skb->encapsulation)
		goto out;

44
	if (unlikely(tnl_hlen < sizeof(struct gre_base_hdr)))
45 46
		goto out;

47
	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
48 49
		goto out;

50 51
	/* setup inner skb. */
	skb->encapsulation = 0;
52
	__skb_pull(skb, tnl_hlen);
53 54 55
	skb_reset_mac_header(skb);
	skb_set_network_header(skb, skb_inner_network_offset(skb));
	skb->mac_len = skb_inner_network_offset(skb);
56
	skb->protocol = skb->inner_protocol;
57

58 59 60
	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM);
	skb->encap_hdr_csum = need_csum;

61 62
	ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);

63 64
	features &= skb->dev->hw_enc_features;

65 66 67 68 69 70 71 72 73 74
	/* The only checksum offload we care about from here on out is the
	 * outer one so strip the existing checksum feature flags based
	 * on the fact that we will be computing our checksum in software.
	 */
	if (ufo) {
		features &= ~NETIF_F_CSUM_MASK;
		if (!need_csum)
			features |= NETIF_F_HW_CSUM;
	}

75
	/* segment inner packet. */
76
	segs = skb_mac_gso_segment(skb, features);
H
Himangi Saraogi 已提交
77
	if (IS_ERR_OR_NULL(segs)) {
78 79
		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
				     mac_len);
80
		goto out;
81
	}
82

83 84
	outer_hlen = skb_tnl_header_len(skb);
	gre_offset = outer_hlen - tnl_hlen;
85 86
	skb = segs;
	do {
87
		struct gre_base_hdr *greh;
88
		__be32 *pcsum;
89

90 91 92 93 94
		/* Set up inner headers if we are offloading inner checksum */
		if (skb->ip_summed == CHECKSUM_PARTIAL) {
			skb_reset_inner_headers(skb);
			skb->encapsulation = 1;
		}
95

96 97
		skb->mac_len = mac_len;
		skb->protocol = protocol;
98 99 100 101 102 103 104 105 106 107 108 109 110 111

		__skb_push(skb, outer_hlen);
		skb_reset_mac_header(skb);
		skb_set_network_header(skb, mac_len);
		skb_set_transport_header(skb, gre_offset);

		if (!need_csum)
			continue;

		greh = (struct gre_base_hdr *)skb_transport_header(skb);
		pcsum = (__be32 *)(greh + 1);

		*pcsum = 0;
		*(__sum16 *)pcsum = gso_make_checksum(skb, 0);
112 113 114 115 116
	} while ((skb = skb->next));
out:
	return segs;
}

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
static struct sk_buff **gre_gro_receive(struct sk_buff **head,
					struct sk_buff *skb)
{
	struct sk_buff **pp = NULL;
	struct sk_buff *p;
	const struct gre_base_hdr *greh;
	unsigned int hlen, grehlen;
	unsigned int off;
	int flush = 1;
	struct packet_offload *ptype;
	__be16 type;

	off = skb_gro_offset(skb);
	hlen = off + sizeof(*greh);
	greh = skb_gro_header_fast(skb, off);
	if (skb_gro_header_hard(skb, hlen)) {
		greh = skb_gro_header_slow(skb, hlen, off);
		if (unlikely(!greh))
			goto out;
	}

	/* Only support version 0 and K (key), C (csum) flags. Note that
	 * although the support for the S (seq#) flag can be added easily
	 * for GRO, this is problematic for GSO hence can not be enabled
	 * here because a GRO pkt may end up in the forwarding path, thus
	 * requiring GSO support to break it up correctly.
	 */
	if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0)
		goto out;

	type = greh->protocol;

	rcu_read_lock();
	ptype = gro_find_receive_by_type(type);
151
	if (!ptype)
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
		goto out_unlock;

	grehlen = GRE_HEADER_SECTION;

	if (greh->flags & GRE_KEY)
		grehlen += GRE_HEADER_SECTION;

	if (greh->flags & GRE_CSUM)
		grehlen += GRE_HEADER_SECTION;

	hlen = off + grehlen;
	if (skb_gro_header_hard(skb, hlen)) {
		greh = skb_gro_header_slow(skb, hlen, off);
		if (unlikely(!greh))
			goto out_unlock;
	}
168 169

	/* Don't bother verifying checksum if we're going to flush anyway. */
170 171
	if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
		if (skb_gro_checksum_simple_validate(skb))
172
			goto out_unlock;
173

174 175 176 177
		skb_gro_checksum_try_convert(skb, IPPROTO_GRE, 0,
					     null_compute_pseudo);
	}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	for (p = *head; p; p = p->next) {
		const struct gre_base_hdr *greh2;

		if (!NAPI_GRO_CB(p)->same_flow)
			continue;

		/* The following checks are needed to ensure only pkts
		 * from the same tunnel are considered for aggregation.
		 * The criteria for "the same tunnel" includes:
		 * 1) same version (we only support version 0 here)
		 * 2) same protocol (we only support ETH_P_IP for now)
		 * 3) same set of flags
		 * 4) same key if the key field is present.
		 */
		greh2 = (struct gre_base_hdr *)(p->data + off);

		if (greh2->flags != greh->flags ||
		    greh2->protocol != greh->protocol) {
			NAPI_GRO_CB(p)->same_flow = 0;
			continue;
		}
		if (greh->flags & GRE_KEY) {
			/* compare keys */
			if (*(__be32 *)(greh2+1) != *(__be32 *)(greh+1)) {
				NAPI_GRO_CB(p)->same_flow = 0;
				continue;
			}
		}
	}

	skb_gro_pull(skb, grehlen);

	/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
	skb_gro_postpull_rcsum(skb, greh, grehlen);

	pp = ptype->callbacks.gro_receive(head, skb);
214
	flush = 0;
215 216 217 218 219 220 221 222 223

out_unlock:
	rcu_read_unlock();
out:
	NAPI_GRO_CB(skb)->flush |= flush;

	return pp;
}

224
static int gre_gro_complete(struct sk_buff *skb, int nhoff)
225 226 227 228 229 230 231
{
	struct gre_base_hdr *greh = (struct gre_base_hdr *)(skb->data + nhoff);
	struct packet_offload *ptype;
	unsigned int grehlen = sizeof(*greh);
	int err = -ENOENT;
	__be16 type;

232 233 234
	skb->encapsulation = 1;
	skb_shinfo(skb)->gso_type = SKB_GSO_GRE;

235 236 237 238 239 240 241 242 243
	type = greh->protocol;
	if (greh->flags & GRE_KEY)
		grehlen += GRE_HEADER_SECTION;

	if (greh->flags & GRE_CSUM)
		grehlen += GRE_HEADER_SECTION;

	rcu_read_lock();
	ptype = gro_find_complete_by_type(type);
244
	if (ptype)
245 246 247
		err = ptype->callbacks.gro_complete(skb, nhoff + grehlen);

	rcu_read_unlock();
248 249 250

	skb_set_inner_mac_header(skb, nhoff + grehlen);

251 252 253
	return err;
}

254 255 256
static const struct net_offload gre_offload = {
	.callbacks = {
		.gso_segment = gre_gso_segment,
257 258
		.gro_receive = gre_gro_receive,
		.gro_complete = gre_gro_complete,
259 260 261
	},
};

262
static int __init gre_offload_init(void)
263 264 265
{
	return inet_add_offload(&gre_offload, IPPROTO_GRE);
}
266
device_initcall(gre_offload_init);