rmnet_handlers.c 5.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3 4 5 6 7 8
 *
 * RMNET Data ingress/egress handler
 */

#include <linux/netdevice.h>
#include <linux/netdev_features.h>
9
#include <linux/if_arp.h>
10
#include <net/sock.h>
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
#include "rmnet_private.h"
#include "rmnet_config.h"
#include "rmnet_vnd.h"
#include "rmnet_map.h"
#include "rmnet_handlers.h"

#define RMNET_IP_VERSION_4 0x40
#define RMNET_IP_VERSION_6 0x60

/* Helper Functions */

static void rmnet_set_skb_proto(struct sk_buff *skb)
{
	switch (skb->data[0] & 0xF0) {
	case RMNET_IP_VERSION_4:
		skb->protocol = htons(ETH_P_IP);
		break;
	case RMNET_IP_VERSION_6:
		skb->protocol = htons(ETH_P_IPV6);
		break;
	default:
		skb->protocol = htons(ETH_P_MAP);
		break;
	}
}

/* Generic handler */

39
static void
40
rmnet_deliver_skb(struct sk_buff *skb)
41
{
42 43
	struct rmnet_priv *priv = netdev_priv(skb->dev);

44 45 46
	skb_reset_transport_header(skb);
	skb_reset_network_header(skb);
	rmnet_vnd_rx_fixup(skb, skb->dev);
47

48 49
	skb->pkt_type = PACKET_HOST;
	skb_set_mac_header(skb, 0);
50
	gro_cells_receive(&priv->gro_cells, skb);
51 52 53 54
}

/* MAP handler */

55
static void
56
__rmnet_map_ingress_handler(struct sk_buff *skb,
57
			    struct rmnet_port *port)
58 59
{
	struct rmnet_endpoint *ep;
60
	u16 len, pad;
61 62 63
	u8 mux_id;

	if (RMNET_MAP_GET_CD_BIT(skb)) {
64
		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
65
			return rmnet_map_command(skb, port);
66

67
		goto free_skb;
68 69 70
	}

	mux_id = RMNET_MAP_GET_MUX_ID(skb);
71 72
	pad = RMNET_MAP_GET_PAD(skb);
	len = RMNET_MAP_GET_LENGTH(skb) - pad;
73

74 75
	if (mux_id >= RMNET_MAX_LOGICAL_EP)
		goto free_skb;
76

77 78 79
	ep = rmnet_get_endpoint(port, mux_id);
	if (!ep)
		goto free_skb;
80

81
	skb->dev = ep->egress_dev;
82 83 84 85

	/* Subtract MAP header */
	skb_pull(skb, sizeof(struct rmnet_map_header));
	rmnet_set_skb_proto(skb);
86

87
	if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
88 89 90 91 92
		if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
			skb->ip_summed = CHECKSUM_UNNECESSARY;
	}

	skb_trim(skb, len);
93 94
	rmnet_deliver_skb(skb);
	return;
95 96 97

free_skb:
	kfree_skb(skb);
98 99
}

100
static void
101
rmnet_map_ingress_handler(struct sk_buff *skb,
102
			  struct rmnet_port *port)
103 104 105
{
	struct sk_buff *skbn;

106
	if (skb->dev->type == ARPHRD_ETHER) {
107
		if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
108 109 110 111 112 113 114
			kfree_skb(skb);
			return;
		}

		skb_push(skb, ETH_HLEN);
	}

115
	if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
116
		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
117
			__rmnet_map_ingress_handler(skbn, port);
118 119 120

		consume_skb(skb);
	} else {
121
		__rmnet_map_ingress_handler(skb, port);
122 123 124 125
	}
}

static int rmnet_map_egress_handler(struct sk_buff *skb,
126
				    struct rmnet_port *port, u8 mux_id,
127 128 129 130 131 132 133 134
				    struct net_device *orig_dev)
{
	int required_headroom, additional_header_len;
	struct rmnet_map_header *map_header;

	additional_header_len = 0;
	required_headroom = sizeof(struct rmnet_map_header);

135
	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
136 137 138 139
		additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
		required_headroom += additional_header_len;
	}

140
	if (skb_headroom(skb) < required_headroom) {
141
		if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
142
			return -ENOMEM;
143 144
	}

145
	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
146 147
		rmnet_map_checksum_uplink_packet(skb, orig_dev);

148 149
	map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
	if (!map_header)
150
		return -ENOMEM;
151

152
	map_header->mux_id = mux_id;
153 154 155

	skb->protocol = htons(ETH_P_MAP);

156
	return 0;
157 158
}

159
static void
160 161
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
162 163 164
	if (skb_mac_header_was_set(skb))
		skb_push(skb, skb->mac_len);

165 166 167 168 169 170
	if (bridge_dev) {
		skb->dev = bridge_dev;
		dev_queue_xmit(skb);
	}
}

171 172 173 174 175 176 177 178 179
/* Ingress / Egress Entry Points */

/* Processes packet as per ingress data format for receiving device. Logical
 * endpoint is determined from packet inspection. Packet is then sent to the
 * egress device listed in the logical endpoint configuration.
 */
rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
{
	struct sk_buff *skb = *pskb;
180
	struct rmnet_port *port;
181 182 183
	struct net_device *dev;

	if (!skb)
184
		goto done;
185

186 187 188 189 190
	if (skb_linearize(skb)) {
		kfree_skb(skb);
		goto done;
	}

191 192 193
	if (skb->pkt_type == PACKET_LOOPBACK)
		return RX_HANDLER_PASS;

194
	dev = skb->dev;
195
	port = rmnet_get_port_rcu(dev);
196 197 198 199 200
	if (unlikely(!port)) {
		atomic_long_inc(&skb->dev->rx_nohandler);
		kfree_skb(skb);
		goto done;
	}
201

202 203
	switch (port->rmnet_mode) {
	case RMNET_EPMODE_VND:
204
		rmnet_map_ingress_handler(skb, port);
205 206
		break;
	case RMNET_EPMODE_BRIDGE:
207
		rmnet_bridge_handler(skb, port->bridge_ep);
208 209
		break;
	}
210

211 212
done:
	return RX_HANDLER_CONSUMED;
213 214 215 216 217 218
}

/* Modifies packet as per logical endpoint configuration and egress data format
 * for egress device configured in logical endpoint. Packet is then transmitted
 * on the egress device.
 */
219
void rmnet_egress_handler(struct sk_buff *skb)
220 221
{
	struct net_device *orig_dev;
222
	struct rmnet_port *port;
223 224
	struct rmnet_priv *priv;
	u8 mux_id;
225

226 227
	sk_pacing_shift_update(skb->sk, 8);

228
	orig_dev = skb->dev;
229 230 231
	priv = netdev_priv(orig_dev);
	skb->dev = priv->real_dev;
	mux_id = priv->mux_id;
232

233
	port = rmnet_get_port_rcu(skb->dev);
234 235
	if (!port)
		goto drop;
236

237
	if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
238
		goto drop;
239

240
	rmnet_vnd_tx_fixup(skb, orig_dev);
241 242

	dev_queue_xmit(skb);
243 244 245 246 247
	return;

drop:
	this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
	kfree_skb(skb);
248
}