提交 554794de 编写于 作者: J Jarek Poplawski 提交者: David S. Miller

pkt_sched: Fix handling of gso skbs on requeuing

Jay Cliburn noticed and diagnosed a bug triggered in
dev_gso_skb_destructor() after last change from qdisc->gso_skb
to qdisc->requeue list. Since gso_segmented skbs can't be queued
to another list this patch brings back qdisc->gso_skb for them.
Reported-by: NJay Cliburn <jcliburn@gmail.com>
Signed-off-by: NJarek Poplawski <jarkao2@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 13c1d189
...@@ -52,6 +52,7 @@ struct Qdisc ...@@ -52,6 +52,7 @@ struct Qdisc
u32 parent; u32 parent;
atomic_t refcnt; atomic_t refcnt;
unsigned long state; unsigned long state;
struct sk_buff *gso_skb;
struct sk_buff_head requeue; struct sk_buff_head requeue;
struct sk_buff_head q; struct sk_buff_head q;
struct netdev_queue *dev_queue; struct netdev_queue *dev_queue;
......
...@@ -44,6 +44,9 @@ static inline int qdisc_qlen(struct Qdisc *q) ...@@ -44,6 +44,9 @@ static inline int qdisc_qlen(struct Qdisc *q)
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{ {
if (unlikely(skb->next))
q->gso_skb = skb;
else
__skb_queue_head(&q->requeue, skb); __skb_queue_head(&q->requeue, skb);
__netif_schedule(q); __netif_schedule(q);
...@@ -52,7 +55,10 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) ...@@ -52,7 +55,10 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
static inline struct sk_buff *dequeue_skb(struct Qdisc *q) static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
{ {
struct sk_buff *skb = skb_peek(&q->requeue); struct sk_buff *skb = q->gso_skb;
if (!skb)
skb = skb_peek(&q->requeue);
if (unlikely(skb)) { if (unlikely(skb)) {
struct net_device *dev = qdisc_dev(q); struct net_device *dev = qdisc_dev(q);
...@@ -60,10 +66,15 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) ...@@ -60,10 +66,15 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
/* check the reason of requeuing without tx lock first */ /* check the reason of requeuing without tx lock first */
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) if (!netif_tx_queue_stopped(txq) &&
__skb_unlink(skb, &q->requeue); !netif_tx_queue_frozen(txq)) {
if (q->gso_skb)
q->gso_skb = NULL;
else else
__skb_unlink(skb, &q->requeue);
} else {
skb = NULL; skb = NULL;
}
} else { } else {
skb = q->dequeue(q); skb = q->dequeue(q);
} }
...@@ -548,6 +559,7 @@ void qdisc_destroy(struct Qdisc *qdisc) ...@@ -548,6 +559,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
module_put(ops->owner); module_put(ops->owner);
dev_put(qdisc_dev(qdisc)); dev_put(qdisc_dev(qdisc));
kfree_skb(qdisc->gso_skb);
__skb_queue_purge(&qdisc->requeue); __skb_queue_purge(&qdisc->requeue);
kfree((char *) qdisc - qdisc->padded); kfree((char *) qdisc - qdisc->padded);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册