提交 5b057c6b 编写于 作者: H Herbert Xu 提交者: David S. Miller

[NET]: Avoid allocating skb in skb_pad

First of all it is unnecessary to allocate a new skb in skb_pad since
the existing one is not shared.  More importantly, our hard_start_xmit
interface does not allow a new skb to be allocated since that breaks
requeueing.

This patch uses pskb_expand_head to expand the existing skb and linearize
it if needed.  Actually, someone should sift through every instance of
skb_pad on a non-linear skb as they do not fit the reasons why this was
originally created.

Incidentally, this fixes a minor bug when the skb is cloned (tcpdump,
TCP, etc.).  As it is skb_pad will simply write over a cloned skb.  Because
of the position of the write it is unlikely to cause problems but still
it's best if we don't do it.
Signed-off-by: NHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 5fa21d82
...@@ -1031,8 +1031,7 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev) ...@@ -1031,8 +1031,7 @@ static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
return 1; return 1;
} }
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN)) {
if (skb == NULL) {
netif_wake_queue(dev); netif_wake_queue(dev);
return 0; return 0;
} }
......
...@@ -1070,8 +1070,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1070,8 +1070,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len, (unsigned int)skb->data)); skb->len, (unsigned int)skb->data));
if (skb->len < ETH_ZLEN) { if (skb->len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -573,8 +573,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) ...@@ -573,8 +573,7 @@ static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
if (len < ETH_ZLEN) { if (len < ETH_ZLEN) {
len = ETH_ZLEN; len = ETH_ZLEN;
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
} }
......
...@@ -607,8 +607,7 @@ static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -607,8 +607,7 @@ static int ariadne_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* FIXME: is the 79C960 new enough to do its own padding right ? */ /* FIXME: is the 79C960 new enough to do its own padding right ? */
if (skb->len < ETH_ZLEN) if (skb->len < ETH_ZLEN)
{ {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
len = ETH_ZLEN; len = ETH_ZLEN;
} }
......
...@@ -700,8 +700,7 @@ ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) ...@@ -700,8 +700,7 @@ ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
} }
if (skb->len < ETH_ZLEN) { if (skb->len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
goto out; goto out;
} }
......
...@@ -518,8 +518,7 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) ...@@ -518,8 +518,7 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
length = (length + 1) & ~1; length = (length + 1) & ~1;
if (length != skb->len) { if (length != skb->len) {
skb = skb_padto(skb, length); if (skb_padto(skb, length))
if (skb == NULL)
goto out; goto out;
} }
......
...@@ -804,8 +804,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) ...@@ -804,8 +804,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
++len; ++len;
if (len > skb->len) { if (len > skb->len) {
skb = skb_padto(skb, len); if (skb_padto(skb, len))
if (skb == NULL)
return 0; return 0;
} }
......
...@@ -2915,8 +2915,7 @@ static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2915,8 +2915,7 @@ static int cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
static int ring; static int ring;
skb = skb_padto(skb, cp->min_frame_size); if (skb_padto(skb, cp->min_frame_size))
if (!skb)
return 0; return 0;
/* XXX: we need some higher-level QoS hooks to steer packets to /* XXX: we need some higher-level QoS hooks to steer packets to
......
...@@ -885,8 +885,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -885,8 +885,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skblen; len = skblen;
if (len < ETH_ZLEN) { if (len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
len = ETH_ZLEN; len = ETH_ZLEN;
} }
......
...@@ -938,11 +938,8 @@ static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -938,11 +938,8 @@ static int depca_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->len < 1) if (skb->len < 1)
goto out; goto out;
if (skb->len < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN))
skb = skb_padto(skb, ETH_ZLEN); goto out;
if (skb == NULL)
goto out;
}
netif_stop_queue(dev); netif_stop_queue(dev);
......
...@@ -1154,8 +1154,7 @@ static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev) ...@@ -1154,8 +1154,7 @@ static int eepro_send_packet(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name); printk(KERN_DEBUG "%s: entering eepro_send_packet routine.\n", dev->name);
if (length < ETH_ZLEN) { if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -677,8 +677,7 @@ static int eexp_xmit(struct sk_buff *buf, struct net_device *dev) ...@@ -677,8 +677,7 @@ static int eexp_xmit(struct sk_buff *buf, struct net_device *dev)
#endif #endif
if (buf->len < ETH_ZLEN) { if (buf->len < ETH_ZLEN) {
buf = skb_padto(buf, ETH_ZLEN); if (skb_padto(buf, ETH_ZLEN))
if (buf == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -1027,11 +1027,8 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1027,11 +1027,8 @@ static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
u32 ctrl_word; u32 ctrl_word;
unsigned long flags; unsigned long flags;
if (skb->len < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN))
skb = skb_padto(skb, ETH_ZLEN); return 0;
if (skb == NULL)
return 0;
}
/* Caution: the write order is important here, set the field with the /* Caution: the write order is important here, set the field with the
"ownership" bit last. */ "ownership" bit last. */
......
...@@ -1064,8 +1064,7 @@ static int eth16i_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -1064,8 +1064,7 @@ static int eth16i_tx(struct sk_buff *skb, struct net_device *dev)
unsigned long flags; unsigned long flags;
if (length < ETH_ZLEN) { if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -1487,11 +1487,8 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev) ...@@ -1487,11 +1487,8 @@ static int hp100_start_xmit_bm(struct sk_buff *skb, struct net_device *dev)
if (skb->len <= 0) if (skb->len <= 0)
return 0; return 0;
if (skb->len < ETH_ZLEN && lp->chip == HP100_CHIPID_SHASTA) { if (lp->chip == HP100_CHIPID_SHASTA && skb_padto(skb, ETH_ZLEN))
skb = skb_padto(skb, ETH_ZLEN); return 0;
if (skb == NULL)
return 0;
}
/* Get Tx ring tail pointer */ /* Get Tx ring tail pointer */
if (lp->txrtail->next == lp->txrhead) { if (lp->txrtail->next == lp->txrhead) {
......
...@@ -968,8 +968,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -968,8 +968,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* The old LANCE chips doesn't automatically pad buffers to min. size. */ /* The old LANCE chips doesn't automatically pad buffers to min. size. */
if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) { if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
if (skb->len < ETH_ZLEN) { if (skb->len < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
goto out; goto out;
lp->tx_ring[entry].length = -ETH_ZLEN; lp->tx_ring[entry].length = -ETH_ZLEN;
} }
......
...@@ -1083,8 +1083,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1083,8 +1083,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->len, skb->data)); skb->len, skb->data));
if (length < ETH_ZLEN) { if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -877,8 +877,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) { ...@@ -877,8 +877,7 @@ static int i596_start_xmit (struct sk_buff *skb, struct net_device *dev) {
length = skb->len; length = skb->len;
if (length < ETH_ZLEN) { if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -1939,8 +1939,7 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1939,8 +1939,7 @@ static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
/* pad frames to at least ETH_ZLEN bytes */ /* pad frames to at least ETH_ZLEN bytes */
if (unlikely(skb->len < ETH_ZLEN)) { if (unlikely(skb->len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN)) {
if (skb == NULL) {
/* The packet is gone, so we must /* The packet is gone, so we must
* return 0 */ * return 0 */
mgp->stats.tx_dropped += 1; mgp->stats.tx_dropped += 1;
......
...@@ -831,8 +831,7 @@ static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -831,8 +831,7 @@ static int fjn_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (length < ETH_ZLEN) if (length < ETH_ZLEN)
{ {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -1374,8 +1374,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1374,8 +1374,7 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
if (pktlen < ETH_ZLEN) if (pktlen < ETH_ZLEN)
{ {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
pktlen = ETH_ZLEN; pktlen = ETH_ZLEN;
} }
......
...@@ -2222,8 +2222,7 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2222,8 +2222,7 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = skb->len; len = skb->len;
if (unlikely(len < ETH_ZLEN)) { if (unlikely(len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (!skb)
goto err_update_stats; goto err_update_stats;
len = ETH_ZLEN; len = ETH_ZLEN;
} }
......
...@@ -396,8 +396,7 @@ static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev) ...@@ -396,8 +396,7 @@ static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev)
unsigned char *buf; unsigned char *buf;
if (length < ETH_ZLEN) { if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -1156,8 +1156,7 @@ static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1156,8 +1156,7 @@ static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t mapping; dma_addr_t mapping;
if (unlikely(skb->len < ETH_ZLEN)) { if (unlikely(skb->len < ETH_ZLEN)) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN)) {
if (!skb) {
tp->stats.tx_dropped++; tp->stats.tx_dropped++;
goto out; goto out;
} }
......
...@@ -1525,7 +1525,7 @@ struct sk_buff *pMessage) /* pointer to send-message */ ...@@ -1525,7 +1525,7 @@ struct sk_buff *pMessage) /* pointer to send-message */
** This is to resolve faulty padding by the HW with 0xaa bytes. ** This is to resolve faulty padding by the HW with 0xaa bytes.
*/ */
if (BytesSend < C_LEN_ETHERNET_MINSIZE) { if (BytesSend < C_LEN_ETHERNET_MINSIZE) {
if ((pMessage = skb_padto(pMessage, C_LEN_ETHERNET_MINSIZE)) == NULL) { if (skb_padto(pMessage, C_LEN_ETHERNET_MINSIZE)) {
spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags); spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
return 0; return 0;
} }
......
...@@ -2310,8 +2310,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) ...@@ -2310,8 +2310,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
u64 map; u64 map;
unsigned long flags; unsigned long flags;
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (!skb)
return NETDEV_TX_OK; return NETDEV_TX_OK;
if (!spin_trylock_irqsave(&skge->tx_lock, flags)) if (!spin_trylock_irqsave(&skge->tx_lock, flags))
......
...@@ -523,8 +523,7 @@ static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * de ...@@ -523,8 +523,7 @@ static int smc_wait_to_send_packet( struct sk_buff * skb, struct net_device * de
length = skb->len; length = skb->len;
if (length < ETH_ZLEN) { if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN)) {
if (skb == NULL) {
netif_wake_queue(dev); netif_wake_queue(dev);
return 0; return 0;
} }
......
...@@ -231,8 +231,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) ...@@ -231,8 +231,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
length = skb->len; length = skb->len;
if (length < ETH_ZLEN) { if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -1349,8 +1349,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -1349,8 +1349,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE) #if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
if (skb->ip_summed == CHECKSUM_HW) { if (skb->ip_summed == CHECKSUM_HW) {
skb = skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK); if (skb_padto(skb, (skb->len + PADDING_MASK) & ~PADDING_MASK))
if (skb == NULL)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */ #endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
......
...@@ -1284,11 +1284,8 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -1284,11 +1284,8 @@ static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
/* Calculate the next Tx descriptor entry. */ /* Calculate the next Tx descriptor entry. */
entry = rp->cur_tx % TX_RING_SIZE; entry = rp->cur_tx % TX_RING_SIZE;
if (skb->len < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN))
skb = skb_padto(skb, ETH_ZLEN); return 0;
if (skb == NULL)
return 0;
}
rp->tx_skbuff[entry] = skb; rp->tx_skbuff[entry] = skb;
......
...@@ -924,8 +924,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -924,8 +924,7 @@ static int ray_dev_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (length < ETH_ZLEN) if (length < ETH_ZLEN)
{ {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -3194,11 +3194,8 @@ wavelan_packet_xmit(struct sk_buff * skb, ...@@ -3194,11 +3194,8 @@ wavelan_packet_xmit(struct sk_buff * skb,
* and we don't have the Ethernet specific requirement of beeing * and we don't have the Ethernet specific requirement of beeing
* able to detect collisions, therefore in theory we don't really * able to detect collisions, therefore in theory we don't really
* need to pad. Jean II */ * need to pad. Jean II */
if (skb->len < ETH_ZLEN) { if (skb_padto(skb, ETH_ZLEN))
skb = skb_padto(skb, ETH_ZLEN); return 0;
if (skb == NULL)
return 0;
}
wv_packet_write(dev, skb->data, skb->len); wv_packet_write(dev, skb->data, skb->len);
......
...@@ -862,13 +862,11 @@ static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -862,13 +862,11 @@ static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Fix GX chipset errata. */ /* Fix GX chipset errata. */
if (cacheline_end > 24 || cacheline_end == 0) { if (cacheline_end > 24 || cacheline_end == 0) {
len = skb->len + 32 - cacheline_end + 1; len = skb->len + 32 - cacheline_end + 1;
if (len != skb->len) if (skb_padto(skb, len)) {
skb = skb_padto(skb, len); yp->tx_skbuff[entry] = NULL;
} netif_wake_queue(dev);
if (skb == NULL) { return 0;
yp->tx_skbuff[entry] = NULL; }
netif_wake_queue(dev);
return 0;
} }
} }
yp->tx_skbuff[entry] = skb; yp->tx_skbuff[entry] = skb;
......
...@@ -544,8 +544,7 @@ static int znet_send_packet(struct sk_buff *skb, struct net_device *dev) ...@@ -544,8 +544,7 @@ static int znet_send_packet(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name); printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name);
if (length < ETH_ZLEN) { if (length < ETH_ZLEN) {
skb = skb_padto(skb, ETH_ZLEN); if (skb_padto(skb, ETH_ZLEN))
if (skb == NULL)
return 0; return 0;
length = ETH_ZLEN; length = ETH_ZLEN;
} }
......
...@@ -345,7 +345,7 @@ extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, ...@@ -345,7 +345,7 @@ extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
int newheadroom, int newtailroom, int newheadroom, int newtailroom,
gfp_t priority); gfp_t priority);
extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad); extern int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) kfree_skb(a) #define dev_kfree_skb(a) kfree_skb(a)
extern void skb_over_panic(struct sk_buff *skb, int len, extern void skb_over_panic(struct sk_buff *skb, int len,
void *here); void *here);
...@@ -1122,16 +1122,15 @@ static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) ...@@ -1122,16 +1122,15 @@ static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
* *
* Pads up a buffer to ensure the trailing bytes exist and are * Pads up a buffer to ensure the trailing bytes exist and are
* blanked. If the buffer already contains sufficient data it * blanked. If the buffer already contains sufficient data it
* is untouched. Returns the buffer, which may be a replacement * is untouched. Otherwise it is extended. Returns zero on
* for the original, or NULL for out of memory - in which case * success. The skb is freed on error.
* the original buffer is still freed.
*/ */
static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len) static inline int skb_padto(struct sk_buff *skb, unsigned int len)
{ {
unsigned int size = skb->len; unsigned int size = skb->len;
if (likely(size >= len)) if (likely(size >= len))
return skb; return 0;
return skb_pad(skb, len-size); return skb_pad(skb, len-size);
} }
......
...@@ -781,24 +781,40 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, ...@@ -781,24 +781,40 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
* filled. Used by network drivers which may DMA or transfer data * filled. Used by network drivers which may DMA or transfer data
* beyond the buffer end onto the wire. * beyond the buffer end onto the wire.
* *
* May return NULL in out of memory cases. * May return error in out of memory cases. The skb is freed on error.
*/ */
struct sk_buff *skb_pad(struct sk_buff *skb, int pad) int skb_pad(struct sk_buff *skb, int pad)
{ {
struct sk_buff *nskb; int err;
int ntail;
/* If the skbuff is non linear tailroom is always zero.. */ /* If the skbuff is non linear tailroom is always zero.. */
if (skb_tailroom(skb) >= pad) { if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
memset(skb->data+skb->len, 0, pad); memset(skb->data+skb->len, 0, pad);
return skb; return 0;
} }
nskb = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb) + pad, GFP_ATOMIC); ntail = skb->data_len + pad - (skb->end - skb->tail);
if (likely(skb_cloned(skb) || ntail > 0)) {
err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
if (unlikely(err))
goto free_skb;
}
/* FIXME: The use of this function with non-linear skb's really needs
* to be audited.
*/
err = skb_linearize(skb);
if (unlikely(err))
goto free_skb;
memset(skb->data + skb->len, 0, pad);
return 0;
free_skb:
kfree_skb(skb); kfree_skb(skb);
if (nskb) return err;
memset(nskb->data+nskb->len, 0, pad);
return nskb;
} }
/* Trims skb to length len. It can change skb pointers. /* Trims skb to length len. It can change skb pointers.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册