提交 d3b753db 编写于 作者: D David S. Miller

pkt_sched: Move gso_skb into Qdisc.

We liberate any dangling gso_skb during qdisc destruction.

It really only matters for the root qdisc.  But when qdiscs
can be shared by multiple netdev_queue objects, we can't
have the gso_skb in the netdev_queue any more.
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 b4c21639
...@@ -449,7 +449,6 @@ struct netdev_queue { ...@@ -449,7 +449,6 @@ struct netdev_queue {
struct net_device *dev; struct net_device *dev;
struct Qdisc *qdisc; struct Qdisc *qdisc;
unsigned long state; unsigned long state;
struct sk_buff *gso_skb;
spinlock_t _xmit_lock; spinlock_t _xmit_lock;
int xmit_lock_owner; int xmit_lock_owner;
struct Qdisc *qdisc_sleeping; struct Qdisc *qdisc_sleeping;
......
...@@ -36,6 +36,7 @@ struct Qdisc ...@@ -36,6 +36,7 @@ struct Qdisc
u32 handle; u32 handle;
u32 parent; u32 parent;
atomic_t refcnt; atomic_t refcnt;
struct sk_buff *gso_skb;
struct sk_buff_head q; struct sk_buff_head q;
struct netdev_queue *dev_queue; struct netdev_queue *dev_queue;
struct list_head list; struct list_head list;
......
...@@ -77,7 +77,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, ...@@ -77,7 +77,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb,
struct Qdisc *q) struct Qdisc *q)
{ {
if (unlikely(skb->next)) if (unlikely(skb->next))
dev_queue->gso_skb = skb; q->gso_skb = skb;
else else
q->ops->requeue(skb, q); q->ops->requeue(skb, q);
...@@ -85,13 +85,12 @@ static inline int dev_requeue_skb(struct sk_buff *skb, ...@@ -85,13 +85,12 @@ static inline int dev_requeue_skb(struct sk_buff *skb,
return 0; return 0;
} }
static inline struct sk_buff *dequeue_skb(struct netdev_queue *dev_queue, static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
struct Qdisc *q)
{ {
struct sk_buff *skb; struct sk_buff *skb;
if ((skb = dev_queue->gso_skb)) if ((skb = q->gso_skb))
dev_queue->gso_skb = NULL; q->gso_skb = NULL;
else else
skb = q->dequeue(q); skb = q->dequeue(q);
...@@ -155,10 +154,9 @@ static inline int qdisc_restart(struct netdev_queue *txq) ...@@ -155,10 +154,9 @@ static inline int qdisc_restart(struct netdev_queue *txq)
struct sk_buff *skb; struct sk_buff *skb;
/* Dequeue packet */ /* Dequeue packet */
if (unlikely((skb = dequeue_skb(txq, q)) == NULL)) if (unlikely((skb = dequeue_skb(q)) == NULL))
return 0; return 0;
/* And release queue */ /* And release queue */
spin_unlock(&txq->lock); spin_unlock(&txq->lock);
...@@ -643,8 +641,8 @@ static void dev_deactivate_queue(struct net_device *dev, ...@@ -643,8 +641,8 @@ static void dev_deactivate_queue(struct net_device *dev,
void *_qdisc_default) void *_qdisc_default)
{ {
struct Qdisc *qdisc_default = _qdisc_default; struct Qdisc *qdisc_default = _qdisc_default;
struct sk_buff *skb = NULL;
struct Qdisc *qdisc; struct Qdisc *qdisc;
struct sk_buff *skb;
spin_lock_bh(&dev_queue->lock); spin_lock_bh(&dev_queue->lock);
...@@ -652,9 +650,10 @@ static void dev_deactivate_queue(struct net_device *dev, ...@@ -652,9 +650,10 @@ static void dev_deactivate_queue(struct net_device *dev,
if (qdisc) { if (qdisc) {
dev_queue->qdisc = qdisc_default; dev_queue->qdisc = qdisc_default;
qdisc_reset(qdisc); qdisc_reset(qdisc);
skb = qdisc->gso_skb;
qdisc->gso_skb = NULL;
} }
skb = dev_queue->gso_skb;
dev_queue->gso_skb = NULL;
spin_unlock_bh(&dev_queue->lock); spin_unlock_bh(&dev_queue->lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册