rmnet_map_data.c 12.2 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
3 4 5 6 7
 *
 * RMNET Data MAP protocol
 */

#include <linux/netdevice.h>
8 9 10
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
11
#include <linux/bitfield.h>
12 13 14 15 16 17 18
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"

#define RMNET_MAP_DEAGGR_SPACING  64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
					 const void *txporthdr)
{
	__sum16 *check = NULL;

	switch (protocol) {
	case IPPROTO_TCP:
		check = &(((struct tcphdr *)txporthdr)->check);
		break;

	case IPPROTO_UDP:
		check = &(((struct udphdr *)txporthdr)->check);
		break;

	default:
		check = NULL;
		break;
	}

	return check;
}

static int
rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
43 44
			       struct rmnet_map_dl_csum_trailer *csum_trailer,
			       struct rmnet_priv *priv)
45 46 47 48 49 50 51 52 53
{
	__sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
	u16 csum_value, csum_value_final;
	struct iphdr *ip4h;
	void *txporthdr;
	__be16 addend;

	ip4h = (struct iphdr *)(skb->data);
	if ((ntohs(ip4h->frag_off) & IP_MF) ||
54 55
	    ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
		priv->stats.csum_fragmented_pkt++;
56
		return -EOPNOTSUPP;
57
	}
58 59 60 61 62

	txporthdr = skb->data + ip4h->ihl * 4;

	csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);

63 64
	if (!csum_field) {
		priv->stats.csum_err_invalid_transport++;
65
		return -EPROTONOSUPPORT;
66
	}
67 68

	/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
69 70
	if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
		priv->stats.csum_skipped++;
71
		return 0;
72
	}
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

	csum_value = ~ntohs(csum_trailer->csum_value);
	hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
	ip_payload_csum = csum16_sub((__force __sum16)csum_value,
				     (__force __be16)hdr_csum);

	pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
					 ntohs(ip4h->tot_len) - ip4h->ihl * 4,
					 ip4h->protocol, 0);
	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
	pseudo_csum = csum16_add(ip_payload_csum, addend);

	addend = (__force __be16)ntohs((__force __be16)*csum_field);
	csum_temp = ~csum16_sub(pseudo_csum, addend);
	csum_value_final = (__force u16)csum_temp;

	if (unlikely(csum_value_final == 0)) {
		switch (ip4h->protocol) {
		case IPPROTO_UDP:
			/* RFC 768 - DL4 1's complement rule for UDP csum 0 */
			csum_value_final = ~csum_value_final;
			break;

		case IPPROTO_TCP:
			/* DL4 Non-RFC compliant TCP checksum found */
			if (*csum_field == (__force __sum16)0xFFFF)
				csum_value_final = ~csum_value_final;
			break;
		}
	}

104 105
	if (csum_value_final == ntohs((__force __be16)*csum_field)) {
		priv->stats.csum_ok++;
106
		return 0;
107 108
	} else {
		priv->stats.csum_validation_failed++;
109
		return -EINVAL;
110
	}
111 112 113 114 115
}

#if IS_ENABLED(CONFIG_IPV6)
static int
rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
116 117
			       struct rmnet_map_dl_csum_trailer *csum_trailer,
			       struct rmnet_priv *priv)
118 119 120 121 122 123 124 125 126 127 128 129 130
{
	__sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
	u16 csum_value, csum_value_final;
	__be16 ip6_hdr_csum, addend;
	struct ipv6hdr *ip6h;
	void *txporthdr;
	u32 length;

	ip6h = (struct ipv6hdr *)(skb->data);

	txporthdr = skb->data + sizeof(struct ipv6hdr);
	csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);

131 132
	if (!csum_field) {
		priv->stats.csum_err_invalid_transport++;
133
		return -EPROTONOSUPPORT;
134
	}
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171

	csum_value = ~ntohs(csum_trailer->csum_value);
	ip6_hdr_csum = (__force __be16)
			~ntohs((__force __be16)ip_compute_csum(ip6h,
			       (int)(txporthdr - (void *)(skb->data))));
	ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
				      ip6_hdr_csum);

	length = (ip6h->nexthdr == IPPROTO_UDP) ?
		 ntohs(((struct udphdr *)txporthdr)->len) :
		 ntohs(ip6h->payload_len);
	pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
			     length, ip6h->nexthdr, 0));
	addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
	pseudo_csum = csum16_add(ip6_payload_csum, addend);

	addend = (__force __be16)ntohs((__force __be16)*csum_field);
	csum_temp = ~csum16_sub(pseudo_csum, addend);
	csum_value_final = (__force u16)csum_temp;

	if (unlikely(csum_value_final == 0)) {
		switch (ip6h->nexthdr) {
		case IPPROTO_UDP:
			/* RFC 2460 section 8.1
			 * DL6 One's complement rule for UDP checksum 0
			 */
			csum_value_final = ~csum_value_final;
			break;

		case IPPROTO_TCP:
			/* DL6 Non-RFC compliant TCP checksum found */
			if (*csum_field == (__force __sum16)0xFFFF)
				csum_value_final = ~csum_value_final;
			break;
		}
	}

172 173
	if (csum_value_final == ntohs((__force __be16)*csum_field)) {
		priv->stats.csum_ok++;
174
		return 0;
175 176
	} else {
		priv->stats.csum_validation_failed++;
177
		return -EINVAL;
178
	}
179 180 181
}
#endif

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
{
	struct iphdr *ip4h = (struct iphdr *)iphdr;
	void *txphdr;
	u16 *csum;

	txphdr = iphdr + ip4h->ihl * 4;

	if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
		csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
		*csum = ~(*csum);
	}
}

static void
rmnet_map_ipv4_ul_csum_header(void *iphdr,
			      struct rmnet_map_ul_csum_header *ul_header,
			      struct sk_buff *skb)
{
201
	struct iphdr *ip4h = iphdr;
202
	u16 val;
203

204
	val = MAP_CSUM_UL_ENABLED_FLAG;
205
	if (ip4h->protocol == IPPROTO_UDP)
206 207
		val |= MAP_CSUM_UL_UDP_FLAG;
	val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
208

209 210
	ul_header->csum_start_offset = htons(skb_network_header_len(skb));
	ul_header->csum_info = htons(val);
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236

	skb->ip_summed = CHECKSUM_NONE;

	rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
}

#if IS_ENABLED(CONFIG_IPV6)
static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
{
	struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
	void *txphdr;
	u16 *csum;

	txphdr = ip6hdr + sizeof(struct ipv6hdr);

	if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
		csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
		*csum = ~(*csum);
	}
}

static void
rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
			      struct rmnet_map_ul_csum_header *ul_header,
			      struct sk_buff *skb)
{
237
	struct ipv6hdr *ip6h = ip6hdr;
238
	u16 val;
239

240
	val = MAP_CSUM_UL_ENABLED_FLAG;
241
	if (ip6h->nexthdr == IPPROTO_UDP)
242 243
		val |= MAP_CSUM_UL_UDP_FLAG;
	val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
244

245 246
	ul_header->csum_start_offset = htons(skb_network_header_len(skb));
	ul_header->csum_info = htons(val);
247 248 249 250 251 252 253

	skb->ip_summed = CHECKSUM_NONE;

	rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
}
#endif

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
/* Adds MAP header to front of skb->data
 * Padding is calculated and set appropriately in MAP header. Mux ID is
 * initialized to 0.
 */
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
						  int hdrlen, int pad)
{
	struct rmnet_map_header *map_header;
	u32 padding, map_datalen;
	u8 *padbytes;

	map_datalen = skb->len - hdrlen;
	map_header = (struct rmnet_map_header *)
			skb_push(skb, sizeof(struct rmnet_map_header));
	memset(map_header, 0, sizeof(struct rmnet_map_header));

	if (pad == RMNET_MAP_NO_PAD_BYTES) {
		map_header->pkt_len = htons(map_datalen);
		return map_header;
	}

275
	BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
276 277 278 279 280 281 282 283 284 285 286 287 288
	padding = ALIGN(map_datalen, 4) - map_datalen;

	if (padding == 0)
		goto done;

	if (skb_tailroom(skb) < padding)
		return NULL;

	padbytes = (u8 *)skb_put(skb, padding);
	memset(padbytes, 0, padding);

done:
	map_header->pkt_len = htons(map_datalen + padding);
289 290
	/* This is a data packet, so the CMD bit is 0 */
	map_header->flags = padding & MAP_PAD_LEN_MASK;
291 292 293 294 295 296 297 298 299 300

	return map_header;
}

/* Deaggregates a single packet
 * A whole new buffer is allocated for each portion of an aggregated frame.
 * Caller should keep calling deaggregate() on the source skb until 0 is
 * returned, indicating that there are no more packets to deaggregate. Caller
 * is responsible for freeing the original skb.
 */
301 302
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
				      struct rmnet_port *port)
303
{
304
	struct rmnet_map_v5_csum_header *next_hdr = NULL;
305
	struct rmnet_map_header *maph;
306
	void *data = skb->data;
307
	struct sk_buff *skbn;
308
	u8 nexthdr_type;
309 310 311 312 313 314
	u32 packet_len;

	if (skb->len == 0)
		return NULL;

	maph = (struct rmnet_map_header *)skb->data;
315
	packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
316

317
	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
318
		packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
319 320 321 322 323 324 325 326 327 328
	} else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
		if (!(maph->flags & MAP_CMD_FLAG)) {
			packet_len += sizeof(*next_hdr);
			if (maph->flags & MAP_NEXT_HEADER_FLAG)
				next_hdr = data + sizeof(*maph);
			else
				/* Mapv5 data pkt without csum hdr is invalid */
				return NULL;
		}
	}
329

330 331 332
	if (((int)skb->len - (int)packet_len) < 0)
		return NULL;

333
	/* Some hardware can send us empty frames. Catch them */
334
	if (!maph->pkt_len)
335 336
		return NULL;

337 338 339 340 341 342 343
	if (next_hdr) {
		nexthdr_type = u8_get_bits(next_hdr->header_info,
					   MAPV5_HDRINFO_HDR_TYPE_FMASK);
		if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
			return NULL;
	}

344 345 346 347 348 349 350 351 352 353 354
	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
	if (!skbn)
		return NULL;

	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
	skb_put(skbn, packet_len);
	memcpy(skbn->data, skb->data, packet_len);
	skb_pull(skb, packet_len);

	return skbn;
}
355 356 357 358 359 360 361 362 363

/* Validates packet checksums. Function takes a pointer to
 * the beginning of a buffer which contains the IP payload +
 * padding + checksum trailer.
 * Only IPv4 and IPv6 are supported along with TCP & UDP.
 * Fragmented or tunneled packets are not supported.
 */
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
{
364
	struct rmnet_priv *priv = netdev_priv(skb->dev);
365 366
	struct rmnet_map_dl_csum_trailer *csum_trailer;

367 368
	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
		priv->stats.csum_sw++;
369
		return -EOPNOTSUPP;
370
	}
371 372 373

	csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);

374
	if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
375
		priv->stats.csum_valid_unset++;
376
		return -EINVAL;
377
	}
378

379 380 381
	if (skb->protocol == htons(ETH_P_IP)) {
		return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
	} else if (skb->protocol == htons(ETH_P_IPV6)) {
382
#if IS_ENABLED(CONFIG_IPV6)
383
		return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
384
#else
385
		priv->stats.csum_err_invalid_ip_version++;
386 387
		return -EPROTONOSUPPORT;
#endif
388 389 390 391
	} else {
		priv->stats.csum_err_invalid_ip_version++;
		return -EPROTONOSUPPORT;
	}
392 393 394

	return 0;
}
395 396 397 398 399 400 401

/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
 * packets that are supported for UL checksum offload.
 */
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
				      struct net_device *orig_dev)
{
402
	struct rmnet_priv *priv = netdev_priv(orig_dev);
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
	struct rmnet_map_ul_csum_header *ul_header;
	void *iphdr;

	ul_header = (struct rmnet_map_ul_csum_header *)
		    skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));

	if (unlikely(!(orig_dev->features &
		     (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
		goto sw_csum;

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		iphdr = (char *)ul_header +
			sizeof(struct rmnet_map_ul_csum_header);

		if (skb->protocol == htons(ETH_P_IP)) {
			rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
			return;
		} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
			rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
			return;
#else
425
			priv->stats.csum_err_invalid_ip_version++;
426 427
			goto sw_csum;
#endif
428 429
		} else {
			priv->stats.csum_err_invalid_ip_version++;
430 431 432 433
		}
	}

sw_csum:
434
	memset(ul_header, 0, sizeof(*ul_header));
435 436

	priv->stats.csum_sw++;
437
}
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469

/* Process a MAPv5 packet header */
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
				      u16 len)
{
	struct rmnet_priv *priv = netdev_priv(skb->dev);
	struct rmnet_map_v5_csum_header *next_hdr;
	u8 nexthdr_type;

	next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
			sizeof(struct rmnet_map_header));

	nexthdr_type = u8_get_bits(next_hdr->header_info,
				   MAPV5_HDRINFO_HDR_TYPE_FMASK);

	if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
		return -EINVAL;

	if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
		priv->stats.csum_sw++;
	} else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
		priv->stats.csum_ok++;
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	} else {
		priv->stats.csum_valid_unset++;
	}

	/* Pull csum v5 header */
	skb_pull(skb, sizeof(*next_hdr));

	return 0;
}