提交 acb600de 编写于 作者: E Eric Dumazet 提交者: David S. Miller

net: remove skb recycling

Over time, skb recycling infrastructure got litle interest and
many bugs. Generic rx path skb allocation is now using page
fragments for efficient GRO / TCP coalescing, and recyling
a tx skb for rx path is not worth the pain.

Last identified bug is that fat skbs can be recycled
and it can endup using high order pages after few iterations.

With help from Maxime Bizon, who pointed out that commit
87151b86 (net: allow pskb_expand_head() to get maximum tailroom)
introduced this regression for recycled skbs.

Instead of fixing this bug, lets remove skb recycling.

Drivers wanting really hot skbs should use build_skb() anyway,
to allocate/populate sk_buff right before netif_receive_skb()
Signed-off-by: NEric Dumazet <edumazet@google.com>
Cc: Maxime Bizon <mbizon@freebox.fr>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 809d5fc9
...@@ -375,7 +375,6 @@ struct xgmac_priv { ...@@ -375,7 +375,6 @@ struct xgmac_priv {
unsigned int tx_tail; unsigned int tx_tail;
void __iomem *base; void __iomem *base;
struct sk_buff_head rx_recycle;
unsigned int dma_buf_sz; unsigned int dma_buf_sz;
dma_addr_t dma_rx_phy; dma_addr_t dma_rx_phy;
dma_addr_t dma_tx_phy; dma_addr_t dma_tx_phy;
...@@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv) ...@@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry; p = priv->dma_rx + entry;
if (priv->rx_skbuff[entry] == NULL) { if (priv->rx_skbuff[entry] == NULL) {
skb = __skb_dequeue(&priv->rx_recycle); skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
if (skb == NULL)
skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
if (unlikely(skb == NULL)) if (unlikely(skb == NULL))
break; break;
...@@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv) ...@@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
desc_get_buf_len(p), DMA_TO_DEVICE); desc_get_buf_len(p), DMA_TO_DEVICE);
} }
/* dev_kfree_skb(skb);
* If there's room in the queue (limit it to size)
* we add this skb back into the pool,
* if it's the right size.
*/
if ((skb_queue_len(&priv->rx_recycle) <
DMA_RX_RING_SZ) &&
skb_recycle_check(skb, priv->dma_buf_sz))
__skb_queue_head(&priv->rx_recycle, skb);
else
dev_kfree_skb(skb);
} }
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
...@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev) ...@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev)
dev->dev_addr); dev->dev_addr);
} }
skb_queue_head_init(&priv->rx_recycle);
memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
/* Initialize the XGMAC and descriptors */ /* Initialize the XGMAC and descriptors */
...@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev) ...@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev)
napi_disable(&priv->napi); napi_disable(&priv->napi);
writel(0, priv->base + XGMAC_DMA_INTR_ENA); writel(0, priv->base + XGMAC_DMA_INTR_ENA);
skb_queue_purge(&priv->rx_recycle);
/* Disable the MAC core */ /* Disable the MAC core */
xgmac_mac_disable(priv->base); xgmac_mac_disable(priv->base);
......
...@@ -1765,7 +1765,6 @@ static void free_skb_resources(struct gfar_private *priv) ...@@ -1765,7 +1765,6 @@ static void free_skb_resources(struct gfar_private *priv)
sizeof(struct rxbd8) * priv->total_rx_ring_size, sizeof(struct rxbd8) * priv->total_rx_ring_size,
priv->tx_queue[0]->tx_bd_base, priv->tx_queue[0]->tx_bd_base,
priv->tx_queue[0]->tx_bd_dma_base); priv->tx_queue[0]->tx_bd_dma_base);
skb_queue_purge(&priv->rx_recycle);
} }
void gfar_start(struct net_device *dev) void gfar_start(struct net_device *dev)
...@@ -1943,8 +1942,6 @@ static int gfar_enet_open(struct net_device *dev) ...@@ -1943,8 +1942,6 @@ static int gfar_enet_open(struct net_device *dev)
enable_napi(priv); enable_napi(priv);
skb_queue_head_init(&priv->rx_recycle);
/* Initialize a bunch of registers */ /* Initialize a bunch of registers */
init_registers(dev); init_registers(dev);
...@@ -2533,16 +2530,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) ...@@ -2533,16 +2530,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bytes_sent += skb->len; bytes_sent += skb->len;
/* If there's room in the queue (limit it to rx_buffer_size) dev_kfree_skb_any(skb);
* we add this skb back into the pool, if it's the right size
*/
if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
skb_recycle_check(skb, priv->rx_buffer_size +
RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
skb_queue_head(&priv->rx_recycle, skb);
} else
dev_kfree_skb_any(skb);
tx_queue->tx_skbuff[skb_dirtytx] = NULL; tx_queue->tx_skbuff[skb_dirtytx] = NULL;
...@@ -2608,7 +2596,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, ...@@ -2608,7 +2596,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
static struct sk_buff *gfar_alloc_skb(struct net_device *dev) static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{ {
struct gfar_private *priv = netdev_priv(dev); struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL; struct sk_buff *skb;
skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
if (!skb) if (!skb)
...@@ -2621,14 +2609,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev) ...@@ -2621,14 +2609,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
struct sk_buff *gfar_new_skb(struct net_device *dev) struct sk_buff *gfar_new_skb(struct net_device *dev)
{ {
struct gfar_private *priv = netdev_priv(dev); return gfar_alloc_skb(dev);
struct sk_buff *skb = NULL;
skb = skb_dequeue(&priv->rx_recycle);
if (!skb)
skb = gfar_alloc_skb(dev);
return skb;
} }
static inline void count_errors(unsigned short status, struct net_device *dev) static inline void count_errors(unsigned short status, struct net_device *dev)
...@@ -2787,7 +2768,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit) ...@@ -2787,7 +2768,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
if (unlikely(!newskb)) if (unlikely(!newskb))
newskb = skb; newskb = skb;
else if (skb) else if (skb)
skb_queue_head(&priv->rx_recycle, skb); dev_kfree_skb(skb);
} else { } else {
/* Increment the number of packets */ /* Increment the number of packets */
rx_queue->stats.rx_packets++; rx_queue->stats.rx_packets++;
......
...@@ -1080,8 +1080,6 @@ struct gfar_private { ...@@ -1080,8 +1080,6 @@ struct gfar_private {
u32 cur_filer_idx; u32 cur_filer_idx;
struct sk_buff_head rx_recycle;
/* RX queue filer rule set*/ /* RX queue filer rule set*/
struct ethtool_rx_list rx_list; struct ethtool_rx_list rx_list;
struct mutex rx_queue_access; struct mutex rx_queue_access;
......
...@@ -209,14 +209,12 @@ static struct list_head *dequeue(struct list_head *lh) ...@@ -209,14 +209,12 @@ static struct list_head *dequeue(struct list_head *lh)
static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
u8 __iomem *bd) u8 __iomem *bd)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb;
skb = __skb_dequeue(&ugeth->rx_recycle); skb = netdev_alloc_skb(ugeth->ndev,
ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT);
if (!skb) if (!skb)
skb = netdev_alloc_skb(ugeth->ndev,
ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT);
if (skb == NULL)
return NULL; return NULL;
/* We need the data buffer to be aligned properly. We will reserve /* We need the data buffer to be aligned properly. We will reserve
...@@ -2020,8 +2018,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) ...@@ -2020,8 +2018,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
iounmap(ugeth->ug_regs); iounmap(ugeth->ug_regs);
ugeth->ug_regs = NULL; ugeth->ug_regs = NULL;
} }
skb_queue_purge(&ugeth->rx_recycle);
} }
static void ucc_geth_set_multi(struct net_device *dev) static void ucc_geth_set_multi(struct net_device *dev)
...@@ -2230,8 +2226,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) ...@@ -2230,8 +2226,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
return -ENOMEM; return -ENOMEM;
} }
skb_queue_head_init(&ugeth->rx_recycle);
return 0; return 0;
} }
...@@ -3274,12 +3268,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit ...@@ -3274,12 +3268,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
if (netif_msg_rx_err(ugeth)) if (netif_msg_rx_err(ugeth))
ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
__func__, __LINE__, (u32) skb); __func__, __LINE__, (u32) skb);
if (skb) { dev_free_skb(skb);
skb->data = skb->head + NET_SKB_PAD;
skb->len = 0;
skb_reset_tail_pointer(skb);
__skb_queue_head(&ugeth->rx_recycle, skb);
}
ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
...@@ -3349,13 +3338,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) ...@@ -3349,13 +3338,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
dev->stats.tx_packets++; dev->stats.tx_packets++;
if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && dev_kfree_skb(skb);
skb_recycle_check(skb,
ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT))
__skb_queue_head(&ugeth->rx_recycle, skb);
else
dev_kfree_skb(skb);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] = ugeth->skb_dirtytx[txQ] =
......
...@@ -1214,8 +1214,6 @@ struct ucc_geth_private { ...@@ -1214,8 +1214,6 @@ struct ucc_geth_private {
/* index of the first skb which hasn't been transmitted yet. */ /* index of the first skb which hasn't been transmitted yet. */
u16 skb_dirtytx[NUM_TX_QUEUES]; u16 skb_dirtytx[NUM_TX_QUEUES];
struct sk_buff_head rx_recycle;
struct ugeth_mii_info *mii_info; struct ugeth_mii_info *mii_info;
struct phy_device *phydev; struct phy_device *phydev;
phy_interface_t phy_interface; phy_interface_t phy_interface;
......
...@@ -412,7 +412,6 @@ struct mv643xx_eth_private { ...@@ -412,7 +412,6 @@ struct mv643xx_eth_private {
u8 work_rx_refill; u8 work_rx_refill;
int skb_size; int skb_size;
struct sk_buff_head rx_recycle;
/* /*
* RX state. * RX state.
...@@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget) ...@@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
struct rx_desc *rx_desc; struct rx_desc *rx_desc;
int size; int size;
skb = __skb_dequeue(&mp->rx_recycle); skb = netdev_alloc_skb(mp->dev, mp->skb_size);
if (skb == NULL)
skb = netdev_alloc_skb(mp->dev, mp->skb_size);
if (skb == NULL) { if (skb == NULL) {
mp->oom = 1; mp->oom = 1;
...@@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) ...@@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
desc->byte_cnt, DMA_TO_DEVICE); desc->byte_cnt, DMA_TO_DEVICE);
} }
if (skb != NULL) { dev_kfree_skb(skb);
if (skb_queue_len(&mp->rx_recycle) <
mp->rx_ring_size &&
skb_recycle_check(skb, mp->skb_size))
__skb_queue_head(&mp->rx_recycle, skb);
else
dev_kfree_skb(skb);
}
} }
__netif_tx_unlock(nq); __netif_tx_unlock(nq);
...@@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev) ...@@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev)
napi_enable(&mp->napi); napi_enable(&mp->napi);
skb_queue_head_init(&mp->rx_recycle);
mp->int_mask = INT_EXT; mp->int_mask = INT_EXT;
for (i = 0; i < mp->rxq_count; i++) { for (i = 0; i < mp->rxq_count; i++) {
...@@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev) ...@@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
mib_counters_update(mp); mib_counters_update(mp);
del_timer_sync(&mp->mib_counters_timer); del_timer_sync(&mp->mib_counters_timer);
skb_queue_purge(&mp->rx_recycle);
for (i = 0; i < mp->rxq_count; i++) for (i = 0; i < mp->rxq_count; i++)
rxq_deinit(mp->rxq + i); rxq_deinit(mp->rxq + i);
for (i = 0; i < mp->txq_count; i++) for (i = 0; i < mp->txq_count; i++)
......
...@@ -50,7 +50,6 @@ struct stmmac_priv { ...@@ -50,7 +50,6 @@ struct stmmac_priv {
unsigned int dirty_rx; unsigned int dirty_rx;
struct sk_buff **rx_skbuff; struct sk_buff **rx_skbuff;
dma_addr_t *rx_skbuff_dma; dma_addr_t *rx_skbuff_dma;
struct sk_buff_head rx_recycle;
struct net_device *dev; struct net_device *dev;
dma_addr_t dma_rx_phy; dma_addr_t dma_rx_phy;
......
...@@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv) ...@@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->hw->ring->clean_desc3(p); priv->hw->ring->clean_desc3(p);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
/* dev_kfree_skb(skb);
* If there's room in the queue (limit it to size)
* we add this skb back into the pool,
* if it's the right size.
*/
if ((skb_queue_len(&priv->rx_recycle) <
priv->dma_rx_size) &&
skb_recycle_check(skb, priv->dma_buf_sz))
__skb_queue_head(&priv->rx_recycle, skb);
else
dev_kfree_skb(skb);
priv->tx_skbuff[entry] = NULL; priv->tx_skbuff[entry] = NULL;
} }
...@@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev) ...@@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev)
priv->eee_enabled = stmmac_eee_init(priv); priv->eee_enabled = stmmac_eee_init(priv);
napi_enable(&priv->napi); napi_enable(&priv->napi);
skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev); netif_start_queue(dev);
return 0; return 0;
...@@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev) ...@@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev)
kfree(priv->tm); kfree(priv->tm);
#endif #endif
napi_disable(&priv->napi); napi_disable(&priv->napi);
skb_queue_purge(&priv->rx_recycle);
/* Free the IRQ lines */ /* Free the IRQ lines */
free_irq(dev->irq, dev); free_irq(dev->irq, dev);
...@@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) ...@@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
if (likely(priv->rx_skbuff[entry] == NULL)) { if (likely(priv->rx_skbuff[entry] == NULL)) {
struct sk_buff *skb; struct sk_buff *skb;
skb = __skb_dequeue(&priv->rx_recycle); skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
if (skb == NULL)
skb = netdev_alloc_skb_ip_align(priv->dev,
bfsize);
if (unlikely(skb == NULL)) if (unlikely(skb == NULL))
break; break;
......
...@@ -589,9 +589,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, ...@@ -589,9 +589,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
} }
extern void skb_recycle(struct sk_buff *skb);
extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
extern struct sk_buff *skb_clone(struct sk_buff *skb, extern struct sk_buff *skb_clone(struct sk_buff *skb,
...@@ -2645,27 +2642,6 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb) ...@@ -2645,27 +2642,6 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
{
if (irqs_disabled())
return false;
if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
return false;
if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
return false;
skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
if (skb_end_offset(skb) < skb_size)
return false;
if (skb_shared(skb) || skb_cloned(skb))
return false;
return true;
}
/** /**
* skb_head_is_locked - Determine if the skb->head is locked down * skb_head_is_locked - Determine if the skb->head is locked down
* @skb: skb to check * @skb: skb to check
......
...@@ -655,53 +655,6 @@ void consume_skb(struct sk_buff *skb) ...@@ -655,53 +655,6 @@ void consume_skb(struct sk_buff *skb)
} }
EXPORT_SYMBOL(consume_skb); EXPORT_SYMBOL(consume_skb);
/**
* skb_recycle - clean up an skb for reuse
* @skb: buffer
*
* Recycles the skb to be reused as a receive buffer. This
* function does any necessary reference count dropping, and
* cleans up the skbuff as if it just came from __alloc_skb().
*/
void skb_recycle(struct sk_buff *skb)
{
struct skb_shared_info *shinfo;
skb_release_head_state(skb);
shinfo = skb_shinfo(skb);
memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
atomic_set(&shinfo->dataref, 1);
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->data = skb->head + NET_SKB_PAD;
skb_reset_tail_pointer(skb);
}
EXPORT_SYMBOL(skb_recycle);
/**
* skb_recycle_check - check if skb can be reused for receive
* @skb: buffer
* @skb_size: minimum receive buffer size
*
* Checks that the skb passed in is not shared or cloned, and
* that it is linear and its head portion at least as large as
* skb_size so that it can be recycled as a receive buffer.
* If these conditions are met, this function does any necessary
* reference count dropping and cleans up the skbuff as if it
* just came from __alloc_skb().
*/
bool skb_recycle_check(struct sk_buff *skb, int skb_size)
{
if (!skb_is_recycleable(skb, skb_size))
return false;
skb_recycle(skb);
return true;
}
EXPORT_SYMBOL(skb_recycle_check);
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{ {
new->tstamp = old->tstamp; new->tstamp = old->tstamp;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册