提交 7e6d06f0 编写于 作者: B Ben Hutchings 提交者: David S. Miller

sfc: Fix maximum number of TSO segments and minimum TX queue size

Currently an skb requiring TSO may not fit within a minimum-size TX
queue.  The TX queue selected for the skb may stall and trigger the TX
watchdog repeatedly (since the problem skb will be retried after the
TX reset).  This issue is designated as CVE-2012-3412.

Set the maximum number of TSO segments for our devices to 100.  This
should make no difference to behaviour unless the actual MSS is less
than about 700.  Increase the minimum TX queue size accordingly to
allow for 2 worst-case skbs, so that there will definitely be space
to add an skb after we wake a queue.

To avoid invalidating existing configurations, change
efx_ethtool_set_ringparam() to fix up values that are too small rather
than returning -EINVAL.
Signed-off-by: NBen Hutchings <bhutchings@solarflare.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 30b678d8
...@@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx) ...@@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx)
goto fail2; goto fail2;
} }
BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
rc = -EINVAL;
goto fail3;
}
efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
rc = efx_probe_filters(efx); rc = efx_probe_filters(efx);
...@@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->irq = efx->pci_dev->irq; net_dev->irq = efx->pci_dev->irq;
net_dev->netdev_ops = &efx_netdev_ops; net_dev->netdev_ops = &efx_netdev_ops;
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
rtnl_lock(); rtnl_lock();
......
...@@ -30,6 +30,7 @@ extern netdev_tx_t ...@@ -30,6 +30,7 @@ extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
/* RX */ /* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
...@@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); ...@@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_EVQ_SIZE 16384UL #define EFX_MAX_EVQ_SIZE 16384UL
#define EFX_MIN_EVQ_SIZE 512UL #define EFX_MIN_EVQ_SIZE 512UL
/* The smallest [rt]xq_entries that the driver supports. Callers of /* Maximum number of TCP segments we support for soft-TSO */
* efx_wake_queue() assume that they can subsequently send at least one #define EFX_TSO_MAX_SEGS 100
* skb. Falcon/A1 may require up to three descriptors per skb_frag. */
#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) /* The smallest [rt]xq_entries that the driver supports. RX minimum
* is a bit arbitrary. For TX, we must have space for at least 2
* TSO skbs.
*/
#define EFX_RXQ_MIN_ENT 128U
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
/* Filters */ /* Filters */
extern int efx_probe_filters(struct efx_nic *efx); extern int efx_probe_filters(struct efx_nic *efx);
......
...@@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev, ...@@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
struct ethtool_ringparam *ring) struct ethtool_ringparam *ring)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
u32 txq_entries;
if (ring->rx_mini_pending || ring->rx_jumbo_pending || if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
ring->tx_pending > EFX_MAX_DMAQ_SIZE) ring->tx_pending > EFX_MAX_DMAQ_SIZE)
return -EINVAL; return -EINVAL;
if (ring->rx_pending < EFX_MIN_RING_SIZE || if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
ring->tx_pending < EFX_MIN_RING_SIZE) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"TX and RX queues cannot be smaller than %ld\n", "RX queues cannot be smaller than %u\n",
EFX_MIN_RING_SIZE); EFX_RXQ_MIN_ENT);
return -EINVAL; return -EINVAL;
} }
return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
if (txq_entries != ring->tx_pending)
netif_warn(efx, drv, efx->net_dev,
"increasing TX queue size to minimum of %u\n",
txq_entries);
return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
} }
static int efx_ethtool_set_pauseparam(struct net_device *net_dev, static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
......
...@@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) ...@@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
return len; return len;
} }
unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
{
/* Header and payload descriptor for each output segment, plus
* one for every input fragment boundary within a segment
*/
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
/* Possibly one more per segment for the alignment workaround */
if (EFX_WORKAROUND_5391(efx))
max_descs += EFX_TSO_MAX_SEGS;
/* Possibly more for PCIe page boundaries within input fragments */
if (PAGE_SIZE > EFX_PAGE_SIZE)
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
return max_descs;
}
/* /*
* Add a socket buffer to a TX queue * Add a socket buffer to a TX queue
* *
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册