vlan_core.c 9.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
5
#include <linux/netpoll.h>
6
#include <linux/export.h>
7 8
#include "vlan.h"

9
bool vlan_do_receive(struct sk_buff **skbp)
10
{
11
	struct sk_buff *skb = *skbp;
12
	__be16 vlan_proto = skb->vlan_proto;
13
	u16 vlan_id = skb_vlan_tag_get_id(skb);
14
	struct net_device *vlan_dev;
E
Eric Dumazet 已提交
15
	struct vlan_pcpu_stats *rx_stats;
16

17
	vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
18
	if (!vlan_dev)
19
		return false;
20

21 22 23
	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return false;
24 25 26 27 28 29

	if (unlikely(!(vlan_dev->flags & IFF_UP))) {
		kfree_skb(skb);
		*skbp = NULL;
		return false;
	}
H
Herbert Xu 已提交
30

31
	skb->dev = vlan_dev;
32
	if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
33 34 35
		/* Our lower layer thinks this is not local, let's make sure.
		 * This allows the VLAN to have a different MAC than the
		 * underlying device, and still route correctly. */
36
		if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
37 38 39
			skb->pkt_type = PACKET_HOST;
	}

40 41 42
	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
	    !netif_is_macvlan_port(vlan_dev) &&
	    !netif_is_bridge_port(vlan_dev)) {
43 44 45 46 47 48 49 50
		unsigned int offset = skb->data - skb_mac_header(skb);

		/*
		 * vlan_insert_tag expect skb->data pointing to mac header.
		 * So change skb->data before calling it and change back to
		 * original position later
		 */
		skb_push(skb, offset);
51 52
		skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
						    skb->vlan_tci, skb->mac_len);
53 54 55 56 57 58
		if (!skb)
			return false;
		skb_pull(skb, offset + VLAN_HLEN);
		skb_reset_mac_len(skb);
	}

59
	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
60
	skb->vlan_tci = 0;
61

62
	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
E
Eric Dumazet 已提交
63

E
Eric Dumazet 已提交
64
	u64_stats_update_begin(&rx_stats->syncp);
E
Eric Dumazet 已提交
65 66
	rx_stats->rx_packets++;
	rx_stats->rx_bytes += skb->len;
67
	if (skb->pkt_type == PACKET_MULTICAST)
E
Eric Dumazet 已提交
68 69
		rx_stats->rx_multicast++;
	u64_stats_update_end(&rx_stats->syncp);
70 71

	return true;
72
}
73

74
/* Must be invoked with rcu_read_lock. */
75
struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
76
					__be16 vlan_proto, u16 vlan_id)
77
{
78
	struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
79

80
	if (vlan_info) {
81 82
		return vlan_group_get_device(&vlan_info->grp,
					     vlan_proto, vlan_id);
83 84
	} else {
		/*
85 86 87
		 * Lower devices of master uppers (bonding, team) do not have
		 * grp assigned to themselves. Grp is assigned to upper device
		 * instead.
88
		 */
89 90 91 92
		struct net_device *upper_dev;

		upper_dev = netdev_master_upper_dev_get_rcu(dev);
		if (upper_dev)
93
			return __vlan_find_dev_deep_rcu(upper_dev,
94
						    vlan_proto, vlan_id);
95 96 97 98
	}

	return NULL;
}
99
EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
100

101 102
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
103 104 105 106 107 108
	struct net_device *ret = vlan_dev_priv(dev)->real_dev;

	while (is_vlan_dev(ret))
		ret = vlan_dev_priv(ret)->real_dev;

	return ret;
109
}
110
EXPORT_SYMBOL(vlan_dev_real_dev);
111 112 113

u16 vlan_dev_vlan_id(const struct net_device *dev)
{
114
	return vlan_dev_priv(dev)->vlan_id;
115
}
116
EXPORT_SYMBOL(vlan_dev_vlan_id);
H
Herbert Xu 已提交
117

118 119 120 121 122 123
__be16 vlan_dev_vlan_proto(const struct net_device *dev)
{
	return vlan_dev_priv(dev)->vlan_proto;
}
EXPORT_SYMBOL(vlan_dev_vlan_proto);

124 125 126 127 128 129
/*
 * vlan info and vid list
 */

static void vlan_group_free(struct vlan_group *grp)
{
130
	int i, j;
131

132 133 134
	for (i = 0; i < VLAN_PROTO_NUM; i++)
		for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
			kfree(grp->vlan_devices_arrays[i][j]);
135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
}

static void vlan_info_free(struct vlan_info *vlan_info)
{
	vlan_group_free(&vlan_info->grp);
	kfree(vlan_info);
}

static void vlan_info_rcu_free(struct rcu_head *rcu)
{
	vlan_info_free(container_of(rcu, struct vlan_info, rcu));
}

static struct vlan_info *vlan_info_alloc(struct net_device *dev)
{
	struct vlan_info *vlan_info;

	vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
	if (!vlan_info)
		return NULL;

	vlan_info->real_dev = dev;
	INIT_LIST_HEAD(&vlan_info->vid_list);
	return vlan_info;
}

struct vlan_vid_info {
	struct list_head list;
163 164
	__be16 proto;
	u16 vid;
165 166 167
	int refcount;
};

168
static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
P
Patrick McHardy 已提交
169
{
170
	if (proto == htons(ETH_P_8021Q) &&
P
Patrick McHardy 已提交
171 172
	    dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
		return true;
173
	if (proto == htons(ETH_P_8021AD) &&
P
Patrick McHardy 已提交
174 175 176 177 178
	    dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
		return true;
	return false;
}

179
static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
180
					       __be16 proto, u16 vid)
181 182 183 184
{
	struct vlan_vid_info *vid_info;

	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
185
		if (vid_info->proto == proto && vid_info->vid == vid)
186 187 188 189 190
			return vid_info;
	}
	return NULL;
}

191
static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
192 193 194 195 196 197
{
	struct vlan_vid_info *vid_info;

	vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
	if (!vid_info)
		return NULL;
198
	vid_info->proto = proto;
199 200 201 202 203
	vid_info->vid = vid;

	return vid_info;
}

204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
{
	if (!vlan_hw_filter_capable(dev, proto))
		return 0;

	if (netif_device_present(dev))
		return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
	else
		return -ENODEV;
}

static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
{
	if (!vlan_hw_filter_capable(dev, proto))
		return 0;

	if (netif_device_present(dev))
		return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
	else
		return -ENODEV;
}

int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
{
	struct net_device *real_dev = vlan_info->real_dev;
	struct vlan_vid_info *vlan_vid_info;
	int err;

	list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
		if (vlan_vid_info->proto == proto) {
			err = vlan_add_rx_filter_info(real_dev, proto,
						      vlan_vid_info->vid);
			if (err)
				goto unwind;
		}
	}

	return 0;

unwind:
	list_for_each_entry_continue_reverse(vlan_vid_info,
					     &vlan_info->vid_list, list) {
		if (vlan_vid_info->proto == proto)
			vlan_kill_rx_filter_info(real_dev, proto,
						 vlan_vid_info->vid);
	}

	return err;
}
EXPORT_SYMBOL(vlan_filter_push_vids);

void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
{
	struct vlan_vid_info *vlan_vid_info;

	list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
		if (vlan_vid_info->proto == proto)
			vlan_kill_rx_filter_info(vlan_info->real_dev,
						 vlan_vid_info->proto,
						 vlan_vid_info->vid);
}
EXPORT_SYMBOL(vlan_filter_drop_vids);

267
static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
268
			  struct vlan_vid_info **pvid_info)
269
{
270 271 272 273
	struct net_device *dev = vlan_info->real_dev;
	struct vlan_vid_info *vid_info;
	int err;

274
	vid_info = vlan_vid_info_alloc(proto, vid);
275 276
	if (!vid_info)
		return -ENOMEM;
277

278 279 280 281
	err = vlan_add_rx_filter_info(dev, proto, vid);
	if (err) {
		kfree(vid_info);
		return err;
282
	}
283

284 285 286
	list_add(&vid_info->list, &vlan_info->vid_list);
	vlan_info->nr_vids++;
	*pvid_info = vid_info;
287 288
	return 0;
}
289

290
int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
{
	struct vlan_info *vlan_info;
	struct vlan_vid_info *vid_info;
	bool vlan_info_created = false;
	int err;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info) {
		vlan_info = vlan_info_alloc(dev);
		if (!vlan_info)
			return -ENOMEM;
		vlan_info_created = true;
	}
306
	vid_info = vlan_vid_info_get(vlan_info, proto, vid);
307
	if (!vid_info) {
308
		err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
		if (err)
			goto out_free_vlan_info;
	}
	vid_info->refcount++;

	if (vlan_info_created)
		rcu_assign_pointer(dev->vlan_info, vlan_info);

	return 0;

out_free_vlan_info:
	if (vlan_info_created)
		kfree(vlan_info);
	return err;
}
324 325
EXPORT_SYMBOL(vlan_vid_add);

326 327
static void __vlan_vid_del(struct vlan_info *vlan_info,
			   struct vlan_vid_info *vid_info)
328
{
329
	struct net_device *dev = vlan_info->real_dev;
330 331
	__be16 proto = vid_info->proto;
	u16 vid = vid_info->vid;
332
	int err;
333

334 335 336 337 338
	err = vlan_kill_rx_filter_info(dev, proto, vid);
	if (err)
		pr_warn("failed to kill vid %04x/%d for device %s\n",
			proto, vid, dev->name);

339 340 341 342 343
	list_del(&vid_info->list);
	kfree(vid_info);
	vlan_info->nr_vids--;
}

344
void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
345 346 347 348 349 350 351 352 353 354
{
	struct vlan_info *vlan_info;
	struct vlan_vid_info *vid_info;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info)
		return;

355
	vid_info = vlan_vid_info_get(vlan_info, proto, vid);
356 357 358 359 360 361 362 363 364
	if (!vid_info)
		return;
	vid_info->refcount--;
	if (vid_info->refcount == 0) {
		__vlan_vid_del(vlan_info, vid_info);
		if (vlan_info->nr_vids == 0) {
			RCU_INIT_POINTER(dev->vlan_info, NULL);
			call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
		}
365 366 367
	}
}
EXPORT_SYMBOL(vlan_vid_del);
368 369 370 371 372

int vlan_vids_add_by_dev(struct net_device *dev,
			 const struct net_device *by_dev)
{
	struct vlan_vid_info *vid_info;
373
	struct vlan_info *vlan_info;
374 375 376 377
	int err;

	ASSERT_RTNL();

378 379
	vlan_info = rtnl_dereference(by_dev->vlan_info);
	if (!vlan_info)
380 381
		return 0;

382
	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
383
		err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
384 385 386 387 388 389 390
		if (err)
			goto unwind;
	}
	return 0;

unwind:
	list_for_each_entry_continue_reverse(vid_info,
391
					     &vlan_info->vid_list,
392
					     list) {
393
		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
394 395 396 397 398 399 400 401 402 403
	}

	return err;
}
EXPORT_SYMBOL(vlan_vids_add_by_dev);

void vlan_vids_del_by_dev(struct net_device *dev,
			  const struct net_device *by_dev)
{
	struct vlan_vid_info *vid_info;
404
	struct vlan_info *vlan_info;
405 406 407

	ASSERT_RTNL();

408 409
	vlan_info = rtnl_dereference(by_dev->vlan_info);
	if (!vlan_info)
410 411
		return;

412
	list_for_each_entry(vid_info, &vlan_info->vid_list, list)
413
		vlan_vid_del(dev, vid_info->proto, vid_info->vid);
414 415
}
EXPORT_SYMBOL(vlan_vids_del_by_dev);
416 417 418

bool vlan_uses_dev(const struct net_device *dev)
{
419 420 421 422 423 424 425 426
	struct vlan_info *vlan_info;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info)
		return false;
	return vlan_info->grp.nr_vlan_devs ? true : false;
427 428
}
EXPORT_SYMBOL(vlan_uses_dev);