提交 f47f68cc 编写于 作者: J Jose Abreu 提交者: Greg Kroah-Hartman

net: stmmac: Re-work the queue selection for TSO packets

[ Upstream commit 4993e5b37e8bcb55ac90f76eb6d2432647273747 ]

Ben Hutchings says:
	"This is the wrong place to change the queue mapping.
	stmmac_xmit() is called with a specific TX queue locked,
	and accessing a different TX queue results in a data race
	for all of that queue's state.

	I think this commit should be reverted upstream and in all
	stable branches.  Instead, the driver should implement the
	ndo_select_queue operation and override the queue mapping there."

Fixes: c5acdbee22a1 ("net: stmmac: Send TSO packets always from Queue 0")
Suggested-by: NBen Hutchings <ben@decadent.org.uk>
Signed-off-by: NJose Abreu <joabreu@synopsys.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 201d7d62
...@@ -3036,17 +3036,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3036,17 +3036,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Manage oversized TCP frames for GMAC4 device */ /* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) { if (skb_is_gso(skb) && priv->tso) {
if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
/*
* There is no way to determine the number of TSO
* capable Queues. Let's use always the Queue 0
* because if TSO is supported then at least this
* one will be capable.
*/
skb_set_queue_mapping(skb, 0);
return stmmac_tso_xmit(skb, dev); return stmmac_tso_xmit(skb, dev);
}
} }
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
...@@ -3855,6 +3846,23 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type, ...@@ -3855,6 +3846,23 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
} }
} }
static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
{
if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
/*
* There is no way to determine the number of TSO
* capable Queues. Let's use always the Queue 0
* because if TSO is supported then at least this
* one will be capable.
*/
return 0;
}
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int stmmac_set_mac_address(struct net_device *ndev, void *addr) static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{ {
struct stmmac_priv *priv = netdev_priv(ndev); struct stmmac_priv *priv = netdev_priv(ndev);
...@@ -4097,6 +4105,7 @@ static const struct net_device_ops stmmac_netdev_ops = { ...@@ -4097,6 +4105,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_tx_timeout = stmmac_tx_timeout, .ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl, .ndo_do_ioctl = stmmac_ioctl,
.ndo_setup_tc = stmmac_setup_tc, .ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller, .ndo_poll_controller = stmmac_poll_controller,
#endif #endif
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册