提交 eae3f88e 编写于 作者: D David S. Miller

net: Separate out SKB validation logic from transmit path.

dev_hard_start_xmit() does two things, it first validates and
canonicalizes the SKB, then it actually sends it.

Make a set of helper functions for doing the first part.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 95f6b3dd
...@@ -2644,80 +2644,97 @@ static struct sk_buff *xmit_list(struct sk_buff *first, struct net_device *dev, ...@@ -2644,80 +2644,97 @@ static struct sk_buff *xmit_list(struct sk_buff *first, struct net_device *dev,
return skb; return skb;
} }
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features)
struct netdev_queue *txq)
{ {
int rc = NETDEV_TX_OK; if (vlan_tx_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto,
vlan_tx_tag_get(skb));
if (skb)
skb->vlan_tci = 0;
}
return skb;
}
if (likely(!skb->next)) { static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
netdev_features_t features; {
netdev_features_t features;
/* if (skb->next)
* If device doesn't need skb->dst, release it right now while return skb;
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
features = netif_skb_features(skb); /* If device doesn't need skb->dst, release it right now while
* its hot in this cpu cache
*/
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(skb);
if (vlan_tx_tag_present(skb) && features = netif_skb_features(skb);
!vlan_hw_offload_capable(features, skb->vlan_proto)) { skb = validate_xmit_vlan(skb, features);
skb = __vlan_put_tag(skb, skb->vlan_proto, if (unlikely(!skb))
vlan_tx_tag_get(skb)); goto out_null;
if (unlikely(!skb))
goto out;
skb->vlan_tci = 0; /* If encapsulation offload request, verify we are testing
} * hardware encapsulation features instead of standard
* features for the netdev
*/
if (skb->encapsulation)
features &= dev->hw_enc_features;
/* If encapsulation offload request, verify we are testing if (netif_needs_gso(skb, features)) {
* hardware encapsulation features instead of standard if (unlikely(dev_gso_segment(skb, features)))
* features for the netdev goto out_kfree_skb;
*/ } else {
if (skb->encapsulation) if (skb_needs_linearize(skb, features) &&
features &= dev->hw_enc_features; __skb_linearize(skb))
goto out_kfree_skb;
if (netif_needs_gso(skb, features)) { /* If packet is not checksummed and device does not
if (unlikely(dev_gso_segment(skb, features))) * support checksumming for this protocol, complete
goto out_kfree_skb; * checksumming here.
if (skb->next) */
goto gso; if (skb->ip_summed == CHECKSUM_PARTIAL) {
} else { if (skb->encapsulation)
if (skb_needs_linearize(skb, features) && skb_set_inner_transport_header(skb,
__skb_linearize(skb)) skb_checksum_start_offset(skb));
else
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
if (!(features & NETIF_F_ALL_CSUM) &&
skb_checksum_help(skb))
goto out_kfree_skb; goto out_kfree_skb;
/* If packet is not checksummed and device does not
* support checksumming for this protocol, complete
* checksumming here.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (skb->encapsulation)
skb_set_inner_transport_header(skb,
skb_checksum_start_offset(skb));
else
skb_set_transport_header(skb,
skb_checksum_start_offset(skb));
if (!(features & NETIF_F_ALL_CSUM) &&
skb_checksum_help(skb))
goto out_kfree_skb;
}
} }
}
return skb;
out_kfree_skb:
kfree_skb(skb);
out_null:
return NULL;
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
{
int rc = NETDEV_TX_OK;
skb = validate_xmit_skb(skb, dev);
if (!skb)
return rc;
if (likely(!skb->next))
return xmit_one(skb, dev, txq, false); return xmit_one(skb, dev, txq, false);
}
gso:
skb->next = xmit_list(skb->next, dev, txq, &rc); skb->next = xmit_list(skb->next, dev, txq, &rc);
if (likely(skb->next == NULL)) { if (likely(skb->next == NULL)) {
skb->destructor = DEV_GSO_CB(skb)->destructor; skb->destructor = DEV_GSO_CB(skb)->destructor;
consume_skb(skb); consume_skb(skb);
return rc; return rc;
} }
out_kfree_skb:
kfree_skb(skb); kfree_skb(skb);
out:
return rc; return rc;
} }
EXPORT_SYMBOL_GPL(dev_hard_start_xmit); EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册