vlan_core.c 3.0 KB
Newer Older
1 2 3 4 5 6 7
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include "vlan.h"

/* VLAN rx hw acceleration helper.  This acts like netif_{rx,receive_skb}(). */
int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
8
		      u16 vlan_tci, int polling)
9
{
H
Herbert Xu 已提交
10 11
	if (skb_bond_should_drop(skb))
		goto drop;
12

13
	skb->vlan_tci = vlan_tci;
H
Herbert Xu 已提交
14 15 16 17
	skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);

	if (!skb->dev)
		goto drop;
18 19

	return (polling ? netif_receive_skb(skb) : netif_rx(skb));
H
Herbert Xu 已提交
20 21 22 23

drop:
	dev_kfree_skb_any(skb);
	return NET_RX_DROP;
24 25 26 27 28
}
EXPORT_SYMBOL(__vlan_hwaccel_rx);

int vlan_hwaccel_do_receive(struct sk_buff *skb)
{
H
Herbert Xu 已提交
29
	struct net_device *dev = skb->dev;
30 31
	struct net_device_stats *stats;

H
Herbert Xu 已提交
32
	skb->dev = vlan_dev_info(dev)->real_dev;
33 34
	netif_nit_deliver(skb);

35 36
	skb->dev = dev;
	skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
37
	skb->vlan_tci = 0;
38

39
	stats = &dev->stats;
40 41 42 43 44 45 46 47 48 49 50 51 52 53
	stats->rx_packets++;
	stats->rx_bytes += skb->len;

	switch (skb->pkt_type) {
	case PACKET_BROADCAST:
		break;
	case PACKET_MULTICAST:
		stats->multicast++;
		break;
	case PACKET_OTHERHOST:
		/* Our lower layer thinks this is not local, let's make sure.
		 * This allows the VLAN to have a different MAC than the
		 * underlying device, and still route correctly. */
		if (!compare_ether_addr(eth_hdr(skb)->h_dest,
54
					dev->dev_addr))
55 56 57
			skb->pkt_type = PACKET_HOST;
		break;
	};
58
	return 0;
59
}
60 61 62 63 64

struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
	return vlan_dev_info(dev)->real_dev;
}
65
EXPORT_SYMBOL(vlan_dev_real_dev);
66 67 68 69 70

u16 vlan_dev_vlan_id(const struct net_device *dev)
{
	return vlan_dev_info(dev)->vlan_id;
}
71
EXPORT_SYMBOL(vlan_dev_vlan_id);
H
Herbert Xu 已提交
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147

static int vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
			   unsigned int vlan_tci, struct sk_buff *skb)
{
	struct sk_buff *p;

	if (skb_bond_should_drop(skb))
		goto drop;

	skb->vlan_tci = vlan_tci;
	skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);

	if (!skb->dev)
		goto drop;

	for (p = napi->gro_list; p; p = p->next) {
		NAPI_GRO_CB(p)->same_flow = p->dev == skb->dev;
		NAPI_GRO_CB(p)->flush = 0;
	}

	return dev_gro_receive(napi, skb);

drop:
	return 2;
}

int vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
		     unsigned int vlan_tci, struct sk_buff *skb)
{
	int err = NET_RX_SUCCESS;

	switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
	case -1:
		return netif_receive_skb(skb);

	case 2:
		err = NET_RX_DROP;
		/* fall through */

	case 1:
		kfree_skb(skb);
		break;
	}

	return err;
}
EXPORT_SYMBOL(vlan_gro_receive);

int vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
		   unsigned int vlan_tci, struct napi_gro_fraginfo *info)
{
	struct sk_buff *skb = napi_fraginfo_skb(napi, info);
	int err = NET_RX_DROP;

	if (!skb)
		goto out;

	err = NET_RX_SUCCESS;

	switch (vlan_gro_common(napi, grp, vlan_tci, skb)) {
	case -1:
		return netif_receive_skb(skb);

	case 2:
		err = NET_RX_DROP;
		/* fall through */

	case 1:
		napi_reuse_skb(napi, skb);
		break;
	}

out:
	return err;
}
EXPORT_SYMBOL(vlan_gro_frags);