vlan_core.c 8.8 KB
Newer Older
1 2 3
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
4
#include <linux/netpoll.h>
5
#include <linux/export.h>
6 7
#include "vlan.h"

8
bool vlan_do_receive(struct sk_buff **skbp)
9
{
10
	struct sk_buff *skb = *skbp;
11
	__be16 vlan_proto = skb->vlan_proto;
12
	u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
13
	struct net_device *vlan_dev;
E
Eric Dumazet 已提交
14
	struct vlan_pcpu_stats *rx_stats;
15

16
	vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
17
	if (!vlan_dev)
18
		return false;
19

20 21 22
	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return false;
H
Herbert Xu 已提交
23

24
	skb->dev = vlan_dev;
25 26 27 28
	if (skb->pkt_type == PACKET_OTHERHOST) {
		/* Our lower layer thinks this is not local, let's make sure.
		 * This allows the VLAN to have a different MAC than the
		 * underlying device, and still route correctly. */
29
		if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
30 31 32
			skb->pkt_type = PACKET_HOST;
	}

33
	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
34 35 36 37 38 39 40 41
		unsigned int offset = skb->data - skb_mac_header(skb);

		/*
		 * vlan_insert_tag expect skb->data pointing to mac header.
		 * So change skb->data before calling it and change back to
		 * original position later
		 */
		skb_push(skb, offset);
42 43
		skb = *skbp = vlan_insert_tag(skb, skb->vlan_proto,
					      skb->vlan_tci);
44 45 46 47 48 49
		if (!skb)
			return false;
		skb_pull(skb, offset + VLAN_HLEN);
		skb_reset_mac_len(skb);
	}

50
	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
51
	skb->vlan_tci = 0;
52

53
	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
E
Eric Dumazet 已提交
54

E
Eric Dumazet 已提交
55
	u64_stats_update_begin(&rx_stats->syncp);
E
Eric Dumazet 已提交
56 57
	rx_stats->rx_packets++;
	rx_stats->rx_bytes += skb->len;
58
	if (skb->pkt_type == PACKET_MULTICAST)
E
Eric Dumazet 已提交
59 60
		rx_stats->rx_multicast++;
	u64_stats_update_end(&rx_stats->syncp);
61 62

	return true;
63
}
64

65 66
/* Must be invoked with rcu_read_lock. */
struct net_device *__vlan_find_dev_deep(struct net_device *dev,
67
					__be16 vlan_proto, u16 vlan_id)
68
{
69
	struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
70

71
	if (vlan_info) {
72 73
		return vlan_group_get_device(&vlan_info->grp,
					     vlan_proto, vlan_id);
74 75
	} else {
		/*
76 77 78
		 * Lower devices of master uppers (bonding, team) do not have
		 * grp assigned to themselves. Grp is assigned to upper device
		 * instead.
79
		 */
80 81 82 83
		struct net_device *upper_dev;

		upper_dev = netdev_master_upper_dev_get_rcu(dev);
		if (upper_dev)
84 85
			return __vlan_find_dev_deep(upper_dev,
						    vlan_proto, vlan_id);
86 87 88 89 90 91
	}

	return NULL;
}
EXPORT_SYMBOL(__vlan_find_dev_deep);

92 93
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
94
	return vlan_dev_priv(dev)->real_dev;
95
}
96
EXPORT_SYMBOL(vlan_dev_real_dev);
97 98 99

u16 vlan_dev_vlan_id(const struct net_device *dev)
{
100
	return vlan_dev_priv(dev)->vlan_id;
101
}
102
EXPORT_SYMBOL(vlan_dev_vlan_id);
H
Herbert Xu 已提交
103

104
static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
105
{
106 107 108 109
	if (skb_cow(skb, skb_headroom(skb)) < 0)
		return NULL;
	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
	skb->mac_header += VLAN_HLEN;
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
	return skb;
}

struct sk_buff *vlan_untag(struct sk_buff *skb)
{
	struct vlan_hdr *vhdr;
	u16 vlan_tci;

	if (unlikely(vlan_tx_tag_present(skb))) {
		/* vlan_tci is already set-up so leave this for another time */
		return skb;
	}

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		goto err_free;

	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
		goto err_free;

	vhdr = (struct vlan_hdr *) skb->data;
	vlan_tci = ntohs(vhdr->h_vlan_TCI);
132
	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
133 134 135 136

	skb_pull_rcsum(skb, VLAN_HLEN);
	vlan_set_encap_proto(skb, vhdr);

137
	skb = vlan_reorder_header(skb);
138 139 140
	if (unlikely(!skb))
		goto err_free;

141 142
	skb_reset_network_header(skb);
	skb_reset_transport_header(skb);
143 144
	skb_reset_mac_len(skb);

145 146 147 148 149 150
	return skb;

err_free:
	kfree_skb(skb);
	return NULL;
}
151
EXPORT_SYMBOL(vlan_untag);
152

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191

/*
 * vlan info and vid list
 */

static void vlan_group_free(struct vlan_group *grp)
{
	int i;

	for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
		kfree(grp->vlan_devices_arrays[i]);
}

static void vlan_info_free(struct vlan_info *vlan_info)
{
	vlan_group_free(&vlan_info->grp);
	kfree(vlan_info);
}

static void vlan_info_rcu_free(struct rcu_head *rcu)
{
	vlan_info_free(container_of(rcu, struct vlan_info, rcu));
}

static struct vlan_info *vlan_info_alloc(struct net_device *dev)
{
	struct vlan_info *vlan_info;

	vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
	if (!vlan_info)
		return NULL;

	vlan_info->real_dev = dev;
	INIT_LIST_HEAD(&vlan_info->vid_list);
	return vlan_info;
}

struct vlan_vid_info {
	struct list_head list;
192 193
	__be16 proto;
	u16 vid;
194 195 196 197
	int refcount;
};

static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
198
					       __be16 proto, u16 vid)
199 200 201 202
{
	struct vlan_vid_info *vid_info;

	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
203
		if (vid_info->proto == proto && vid_info->vid == vid)
204 205 206 207 208
			return vid_info;
	}
	return NULL;
}

209
static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
210 211 212 213 214 215
{
	struct vlan_vid_info *vid_info;

	vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
	if (!vid_info)
		return NULL;
216
	vid_info->proto = proto;
217 218 219 220 221
	vid_info->vid = vid;

	return vid_info;
}

222
static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
223
			  struct vlan_vid_info **pvid_info)
224
{
225
	struct net_device *dev = vlan_info->real_dev;
226
	const struct net_device_ops *ops = dev->netdev_ops;
227 228 229
	struct vlan_vid_info *vid_info;
	int err;

230
	vid_info = vlan_vid_info_alloc(proto, vid);
231 232
	if (!vid_info)
		return -ENOMEM;
233

234 235 236
	if (proto == htons(ETH_P_8021Q) &&
	    dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
		err =  ops->ndo_vlan_rx_add_vid(dev, proto, vid);
237 238 239 240
		if (err) {
			kfree(vid_info);
			return err;
		}
241
	}
242 243 244
	list_add(&vid_info->list, &vlan_info->vid_list);
	vlan_info->nr_vids++;
	*pvid_info = vid_info;
245 246
	return 0;
}
247

248
int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
{
	struct vlan_info *vlan_info;
	struct vlan_vid_info *vid_info;
	bool vlan_info_created = false;
	int err;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info) {
		vlan_info = vlan_info_alloc(dev);
		if (!vlan_info)
			return -ENOMEM;
		vlan_info_created = true;
	}
264
	vid_info = vlan_vid_info_get(vlan_info, proto, vid);
265
	if (!vid_info) {
266
		err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
		if (err)
			goto out_free_vlan_info;
	}
	vid_info->refcount++;

	if (vlan_info_created)
		rcu_assign_pointer(dev->vlan_info, vlan_info);

	return 0;

out_free_vlan_info:
	if (vlan_info_created)
		kfree(vlan_info);
	return err;
}
282 283
EXPORT_SYMBOL(vlan_vid_add);

284 285
static void __vlan_vid_del(struct vlan_info *vlan_info,
			   struct vlan_vid_info *vid_info)
286
{
287
	struct net_device *dev = vlan_info->real_dev;
288
	const struct net_device_ops *ops = dev->netdev_ops;
289 290
	__be16 proto = vid_info->proto;
	u16 vid = vid_info->vid;
291
	int err;
292

293 294 295
	if (proto == htons(ETH_P_8021Q) &&
	    dev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
		err = ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
296
		if (err) {
297 298
			pr_warn("failed to kill vid %04x/%d for device %s\n",
				proto, vid, dev->name);
299 300 301 302 303 304 305
		}
	}
	list_del(&vid_info->list);
	kfree(vid_info);
	vlan_info->nr_vids--;
}

306
void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
307 308 309 310 311 312 313 314 315 316
{
	struct vlan_info *vlan_info;
	struct vlan_vid_info *vid_info;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info)
		return;

317
	vid_info = vlan_vid_info_get(vlan_info, proto, vid);
318 319 320 321 322 323 324 325 326
	if (!vid_info)
		return;
	vid_info->refcount--;
	if (vid_info->refcount == 0) {
		__vlan_vid_del(vlan_info, vid_info);
		if (vlan_info->nr_vids == 0) {
			RCU_INIT_POINTER(dev->vlan_info, NULL);
			call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
		}
327 328 329
	}
}
EXPORT_SYMBOL(vlan_vid_del);
330 331 332 333 334

int vlan_vids_add_by_dev(struct net_device *dev,
			 const struct net_device *by_dev)
{
	struct vlan_vid_info *vid_info;
335
	struct vlan_info *vlan_info;
336 337 338 339
	int err;

	ASSERT_RTNL();

340 341
	vlan_info = rtnl_dereference(by_dev->vlan_info);
	if (!vlan_info)
342 343
		return 0;

344
	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
345
		err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
346 347 348 349 350 351 352
		if (err)
			goto unwind;
	}
	return 0;

unwind:
	list_for_each_entry_continue_reverse(vid_info,
353
					     &vlan_info->vid_list,
354
					     list) {
355
		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
356 357 358 359 360 361 362 363 364 365
	}

	return err;
}
EXPORT_SYMBOL(vlan_vids_add_by_dev);

void vlan_vids_del_by_dev(struct net_device *dev,
			  const struct net_device *by_dev)
{
	struct vlan_vid_info *vid_info;
366
	struct vlan_info *vlan_info;
367 368 369

	ASSERT_RTNL();

370 371
	vlan_info = rtnl_dereference(by_dev->vlan_info);
	if (!vlan_info)
372 373
		return;

374
	list_for_each_entry(vid_info, &vlan_info->vid_list, list)
375
		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
376 377
}
EXPORT_SYMBOL(vlan_vids_del_by_dev);
378 379 380

bool vlan_uses_dev(const struct net_device *dev)
{
381 382 383 384 385 386 387 388
	struct vlan_info *vlan_info;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info)
		return false;
	return vlan_info->grp.nr_vlan_devs ? true : false;
389 390
}
EXPORT_SYMBOL(vlan_uses_dev);