diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0209ac328e8a29494a46dcd01bde8625e4641270..608c3ac4d045bda95d5c0f252360aba97c8527d1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1403,6 +1403,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, extern struct netdev_queue *netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); /* * Net namespace inlines diff --git a/net/core/dev.c b/net/core/dev.c index 4794cae84939d6a92bf7357e418fa346634190ff..81ff67149f620ce2bb372bba2f0537f8e5c7ae01 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2495,37 +2495,44 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) #endif } -struct netdev_queue *netdev_pick_tx(struct net_device *dev, - struct sk_buff *skb) +u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) { - int queue_index; - const struct net_device_ops *ops = dev->netdev_ops; - - if (dev->real_num_tx_queues == 1) - queue_index = 0; - else if (ops->ndo_select_queue) { - queue_index = ops->ndo_select_queue(dev, skb); - queue_index = dev_cap_txqueue(dev, queue_index); - } else { - struct sock *sk = skb->sk; - queue_index = sk_tx_queue_get(sk); + struct sock *sk = skb->sk; + int queue_index = sk_tx_queue_get(sk); - if (queue_index < 0 || skb->ooo_okay || - queue_index >= dev->real_num_tx_queues) { - int old_index = queue_index; + if (queue_index < 0 || skb->ooo_okay || + queue_index >= dev->real_num_tx_queues) { + int new_index = get_xps_queue(dev, skb); + if (new_index < 0) + new_index = skb_tx_hash(dev, skb); - queue_index = get_xps_queue(dev, skb); - if (queue_index < 0) - queue_index = skb_tx_hash(dev, skb); - - if (queue_index != old_index && sk) { - struct dst_entry *dst = + if (queue_index != new_index && sk) { + struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1); - if (dst && skb_dst(skb) == dst) - sk_tx_queue_set(sk, queue_index); - } + if (dst && skb_dst(skb) == dst) + sk_tx_queue_set(sk, queue_index); + } + + queue_index = new_index; + } + + return queue_index; +} + +struct netdev_queue *netdev_pick_tx(struct net_device *dev, + struct sk_buff *skb) +{ + int queue_index = 0; + + if (dev->real_num_tx_queues != 1) { + const struct net_device_ops *ops = dev->netdev_ops; + if (ops->ndo_select_queue) + queue_index = ops->ndo_select_queue(dev, skb); + else + queue_index = __netdev_pick_tx(dev, skb); + queue_index = dev_cap_txqueue(dev, queue_index); } skb_set_queue_mapping(skb, queue_index);