vlan_core.c 8.4 KB
Newer Older
1 2 3
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
4
#include <linux/netpoll.h>
5
#include <linux/export.h>
6 7
#include "vlan.h"

8
bool vlan_do_receive(struct sk_buff **skbp)
9
{
10 11
	struct sk_buff *skb = *skbp;
	u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
12
	struct net_device *vlan_dev;
E
Eric Dumazet 已提交
13
	struct vlan_pcpu_stats *rx_stats;
14

15
	vlan_dev = vlan_find_dev(skb->dev, vlan_id);
16
	if (!vlan_dev)
17
		return false;
18

19 20 21
	skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		return false;
H
Herbert Xu 已提交
22

23
	skb->dev = vlan_dev;
24 25 26 27
	if (skb->pkt_type == PACKET_OTHERHOST) {
		/* Our lower layer thinks this is not local, let's make sure.
		 * This allows the VLAN to have a different MAC than the
		 * underlying device, and still route correctly. */
28
		if (ether_addr_equal(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
29 30 31
			skb->pkt_type = PACKET_HOST;
	}

32
	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
		unsigned int offset = skb->data - skb_mac_header(skb);

		/*
		 * vlan_insert_tag expect skb->data pointing to mac header.
		 * So change skb->data before calling it and change back to
		 * original position later
		 */
		skb_push(skb, offset);
		skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
		if (!skb)
			return false;
		skb_pull(skb, offset + VLAN_HLEN);
		skb_reset_mac_len(skb);
	}

48
	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
49
	skb->vlan_tci = 0;
50

51
	rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
E
Eric Dumazet 已提交
52

E
Eric Dumazet 已提交
53
	u64_stats_update_begin(&rx_stats->syncp);
E
Eric Dumazet 已提交
54 55
	rx_stats->rx_packets++;
	rx_stats->rx_bytes += skb->len;
56
	if (skb->pkt_type == PACKET_MULTICAST)
E
Eric Dumazet 已提交
57 58
		rx_stats->rx_multicast++;
	u64_stats_update_end(&rx_stats->syncp);
59 60

	return true;
61
}
62

63 64
/* Must be invoked with rcu_read_lock. */
struct net_device *__vlan_find_dev_deep(struct net_device *dev,
65 66
					u16 vlan_id)
{
67
	struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
68

69 70
	if (vlan_info) {
		return vlan_group_get_device(&vlan_info->grp, vlan_id);
71 72
	} else {
		/*
73 74 75
		 * Lower devices of master uppers (bonding, team) do not have
		 * grp assigned to themselves. Grp is assigned to upper device
		 * instead.
76
		 */
77 78 79 80 81
		struct net_device *upper_dev;

		upper_dev = netdev_master_upper_dev_get_rcu(dev);
		if (upper_dev)
			return __vlan_find_dev_deep(upper_dev, vlan_id);
82 83 84 85 86 87
	}

	return NULL;
}
EXPORT_SYMBOL(__vlan_find_dev_deep);

88 89
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
90
	return vlan_dev_priv(dev)->real_dev;
91
}
92
EXPORT_SYMBOL(vlan_dev_real_dev);
93 94 95

u16 vlan_dev_vlan_id(const struct net_device *dev)
{
96
	return vlan_dev_priv(dev)->vlan_id;
97
}
98
EXPORT_SYMBOL(vlan_dev_vlan_id);
H
Herbert Xu 已提交
99

100
static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
101
{
102 103 104 105
	if (skb_cow(skb, skb_headroom(skb)) < 0)
		return NULL;
	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
	skb->mac_header += VLAN_HLEN;
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
	return skb;
}

struct sk_buff *vlan_untag(struct sk_buff *skb)
{
	struct vlan_hdr *vhdr;
	u16 vlan_tci;

	if (unlikely(vlan_tx_tag_present(skb))) {
		/* vlan_tci is already set-up so leave this for another time */
		return skb;
	}

	skb = skb_share_check(skb, GFP_ATOMIC);
	if (unlikely(!skb))
		goto err_free;

	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
		goto err_free;

	vhdr = (struct vlan_hdr *) skb->data;
	vlan_tci = ntohs(vhdr->h_vlan_TCI);
	__vlan_hwaccel_put_tag(skb, vlan_tci);

	skb_pull_rcsum(skb, VLAN_HLEN);
	vlan_set_encap_proto(skb, vhdr);

133
	skb = vlan_reorder_header(skb);
134 135 136
	if (unlikely(!skb))
		goto err_free;

137 138
	skb_reset_network_header(skb);
	skb_reset_transport_header(skb);
139 140
	skb_reset_mac_len(skb);

141 142 143 144 145 146
	return skb;

err_free:
	kfree_skb(skb);
	return NULL;
}
147

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216

/*
 * vlan info and vid list
 */

static void vlan_group_free(struct vlan_group *grp)
{
	int i;

	for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
		kfree(grp->vlan_devices_arrays[i]);
}

static void vlan_info_free(struct vlan_info *vlan_info)
{
	vlan_group_free(&vlan_info->grp);
	kfree(vlan_info);
}

static void vlan_info_rcu_free(struct rcu_head *rcu)
{
	vlan_info_free(container_of(rcu, struct vlan_info, rcu));
}

static struct vlan_info *vlan_info_alloc(struct net_device *dev)
{
	struct vlan_info *vlan_info;

	vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
	if (!vlan_info)
		return NULL;

	vlan_info->real_dev = dev;
	INIT_LIST_HEAD(&vlan_info->vid_list);
	return vlan_info;
}

struct vlan_vid_info {
	struct list_head list;
	unsigned short vid;
	int refcount;
};

static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
					       unsigned short vid)
{
	struct vlan_vid_info *vid_info;

	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
		if (vid_info->vid == vid)
			return vid_info;
	}
	return NULL;
}

static struct vlan_vid_info *vlan_vid_info_alloc(unsigned short vid)
{
	struct vlan_vid_info *vid_info;

	vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
	if (!vid_info)
		return NULL;
	vid_info->vid = vid;

	return vid_info;
}

static int __vlan_vid_add(struct vlan_info *vlan_info, unsigned short vid,
			  struct vlan_vid_info **pvid_info)
217
{
218
	struct net_device *dev = vlan_info->real_dev;
219
	const struct net_device_ops *ops = dev->netdev_ops;
220 221 222 223 224 225
	struct vlan_vid_info *vid_info;
	int err;

	vid_info = vlan_vid_info_alloc(vid);
	if (!vid_info)
		return -ENOMEM;
226 227

	if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
228 229 230 231 232 233
	    ops->ndo_vlan_rx_add_vid) {
		err =  ops->ndo_vlan_rx_add_vid(dev, vid);
		if (err) {
			kfree(vid_info);
			return err;
		}
234
	}
235 236 237
	list_add(&vid_info->list, &vlan_info->vid_list);
	vlan_info->nr_vids++;
	*pvid_info = vid_info;
238 239
	return 0;
}
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274

int vlan_vid_add(struct net_device *dev, unsigned short vid)
{
	struct vlan_info *vlan_info;
	struct vlan_vid_info *vid_info;
	bool vlan_info_created = false;
	int err;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info) {
		vlan_info = vlan_info_alloc(dev);
		if (!vlan_info)
			return -ENOMEM;
		vlan_info_created = true;
	}
	vid_info = vlan_vid_info_get(vlan_info, vid);
	if (!vid_info) {
		err = __vlan_vid_add(vlan_info, vid, &vid_info);
		if (err)
			goto out_free_vlan_info;
	}
	vid_info->refcount++;

	if (vlan_info_created)
		rcu_assign_pointer(dev->vlan_info, vlan_info);

	return 0;

out_free_vlan_info:
	if (vlan_info_created)
		kfree(vlan_info);
	return err;
}
275 276
EXPORT_SYMBOL(vlan_vid_add);

277 278
static void __vlan_vid_del(struct vlan_info *vlan_info,
			   struct vlan_vid_info *vid_info)
279
{
280
	struct net_device *dev = vlan_info->real_dev;
281
	const struct net_device_ops *ops = dev->netdev_ops;
282 283
	unsigned short vid = vid_info->vid;
	int err;
284 285 286

	if ((dev->features & NETIF_F_HW_VLAN_FILTER) &&
	     ops->ndo_vlan_rx_kill_vid) {
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
		err = ops->ndo_vlan_rx_kill_vid(dev, vid);
		if (err) {
			pr_warn("failed to kill vid %d for device %s\n",
				vid, dev->name);
		}
	}
	list_del(&vid_info->list);
	kfree(vid_info);
	vlan_info->nr_vids--;
}

void vlan_vid_del(struct net_device *dev, unsigned short vid)
{
	struct vlan_info *vlan_info;
	struct vlan_vid_info *vid_info;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info)
		return;

	vid_info = vlan_vid_info_get(vlan_info, vid);
	if (!vid_info)
		return;
	vid_info->refcount--;
	if (vid_info->refcount == 0) {
		__vlan_vid_del(vlan_info, vid_info);
		if (vlan_info->nr_vids == 0) {
			RCU_INIT_POINTER(dev->vlan_info, NULL);
			call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
		}
319 320 321
	}
}
EXPORT_SYMBOL(vlan_vid_del);
322 323 324 325 326

int vlan_vids_add_by_dev(struct net_device *dev,
			 const struct net_device *by_dev)
{
	struct vlan_vid_info *vid_info;
327
	struct vlan_info *vlan_info;
328 329 330 331
	int err;

	ASSERT_RTNL();

332 333
	vlan_info = rtnl_dereference(by_dev->vlan_info);
	if (!vlan_info)
334 335
		return 0;

336
	list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
337 338 339 340 341 342 343 344
		err = vlan_vid_add(dev, vid_info->vid);
		if (err)
			goto unwind;
	}
	return 0;

unwind:
	list_for_each_entry_continue_reverse(vid_info,
345
					     &vlan_info->vid_list,
346 347 348 349 350 351 352 353 354 355 356 357
					     list) {
		vlan_vid_del(dev, vid_info->vid);
	}

	return err;
}
EXPORT_SYMBOL(vlan_vids_add_by_dev);

void vlan_vids_del_by_dev(struct net_device *dev,
			  const struct net_device *by_dev)
{
	struct vlan_vid_info *vid_info;
358
	struct vlan_info *vlan_info;
359 360 361

	ASSERT_RTNL();

362 363
	vlan_info = rtnl_dereference(by_dev->vlan_info);
	if (!vlan_info)
364 365
		return;

366
	list_for_each_entry(vid_info, &vlan_info->vid_list, list)
367 368 369
		vlan_vid_del(dev, vid_info->vid);
}
EXPORT_SYMBOL(vlan_vids_del_by_dev);
370 371 372

bool vlan_uses_dev(const struct net_device *dev)
{
373 374 375 376 377 378 379 380
	struct vlan_info *vlan_info;

	ASSERT_RTNL();

	vlan_info = rtnl_dereference(dev->vlan_info);
	if (!vlan_info)
		return false;
	return vlan_info->grp.nr_vlan_devs ? true : false;
381 382
}
EXPORT_SYMBOL(vlan_uses_dev);