提交 fd44de7c 编写于 作者: P Patrick McHardy 提交者: David S. Miller

[NET_SCHED]: ingress: switch back to using ingress_lock

Switch ingress queueing back to use ingress_lock. qdisc_lock_tree now locks
both the ingress and egress qdiscs on the device. All changes to data that
might be used on both ingress and egress needs to be protected by using
qdisc_lock_tree instead of manually taking dev->queue_lock. Additionally
the qdisc stats_lock needs to be initialized to ingress_lock for ingress
qdiscs.
Signed-off-by: NPatrick McHardy <kaber@trash.net>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 0463d4ae
...@@ -1747,10 +1747,10 @@ static int ing_filter(struct sk_buff *skb) ...@@ -1747,10 +1747,10 @@ static int ing_filter(struct sk_buff *skb)
skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS); skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
spin_lock(&dev->queue_lock); spin_lock(&dev->ingress_lock);
if ((q = dev->qdisc_ingress) != NULL) if ((q = dev->qdisc_ingress) != NULL)
result = q->enqueue(skb, q); result = q->enqueue(skb, q);
spin_unlock(&dev->queue_lock); spin_unlock(&dev->ingress_lock);
} }
......
...@@ -89,9 +89,9 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif) ...@@ -89,9 +89,9 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
static inline static inline
void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id) void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
{ {
spin_lock_bh(&dev->queue_lock); qdisc_lock_tree(dev);
memset(head->fastmap, 0, sizeof(head->fastmap)); memset(head->fastmap, 0, sizeof(head->fastmap));
spin_unlock_bh(&dev->queue_lock); qdisc_unlock_tree(dev);
} }
static inline void static inline void
......
...@@ -500,12 +500,16 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp) ...@@ -500,12 +500,16 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
if (handle == TC_H_INGRESS) { if (handle == TC_H_INGRESS) {
sch->flags |= TCQ_F_INGRESS; sch->flags |= TCQ_F_INGRESS;
sch->stats_lock = &dev->ingress_lock;
handle = TC_H_MAKE(TC_H_INGRESS, 0); handle = TC_H_MAKE(TC_H_INGRESS, 0);
} else if (handle == 0) { } else {
handle = qdisc_alloc_handle(dev); sch->stats_lock = &dev->queue_lock;
err = -ENOMEM; if (handle == 0) {
if (handle == 0) handle = qdisc_alloc_handle(dev);
goto err_out3; err = -ENOMEM;
if (handle == 0)
goto err_out3;
}
} }
sch->handle = handle; sch->handle = handle;
...@@ -654,9 +658,9 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) ...@@ -654,9 +658,9 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return err; return err;
if (q) { if (q) {
qdisc_notify(skb, n, clid, q, NULL); qdisc_notify(skb, n, clid, q, NULL);
spin_lock_bh(&dev->queue_lock); qdisc_lock_tree(dev);
qdisc_destroy(q); qdisc_destroy(q);
spin_unlock_bh(&dev->queue_lock); qdisc_unlock_tree(dev);
} }
} else { } else {
qdisc_notify(skb, n, clid, NULL, q); qdisc_notify(skb, n, clid, NULL, q);
...@@ -789,17 +793,17 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) ...@@ -789,17 +793,17 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
err = qdisc_graft(dev, p, clid, q, &old_q); err = qdisc_graft(dev, p, clid, q, &old_q);
if (err) { if (err) {
if (q) { if (q) {
spin_lock_bh(&dev->queue_lock); qdisc_lock_tree(dev);
qdisc_destroy(q); qdisc_destroy(q);
spin_unlock_bh(&dev->queue_lock); qdisc_unlock_tree(dev);
} }
return err; return err;
} }
qdisc_notify(skb, n, clid, old_q, q); qdisc_notify(skb, n, clid, old_q, q);
if (old_q) { if (old_q) {
spin_lock_bh(&dev->queue_lock); qdisc_lock_tree(dev);
qdisc_destroy(old_q); qdisc_destroy(old_q);
spin_unlock_bh(&dev->queue_lock); qdisc_unlock_tree(dev);
} }
} }
return 0; return 0;
......
...@@ -42,16 +42,20 @@ ...@@ -42,16 +42,20 @@
* The idea is the following: * The idea is the following:
* - enqueue, dequeue are serialized via top level device * - enqueue, dequeue are serialized via top level device
* spinlock dev->queue_lock. * spinlock dev->queue_lock.
* - ingress filtering is serialized via top level device
* spinlock dev->ingress_lock.
* - updates to tree and tree walking are only done under the rtnl mutex. * - updates to tree and tree walking are only done under the rtnl mutex.
*/ */
void qdisc_lock_tree(struct net_device *dev) void qdisc_lock_tree(struct net_device *dev)
{ {
spin_lock_bh(&dev->queue_lock); spin_lock_bh(&dev->queue_lock);
spin_lock(&dev->ingress_lock);
} }
void qdisc_unlock_tree(struct net_device *dev) void qdisc_unlock_tree(struct net_device *dev)
{ {
spin_unlock(&dev->ingress_lock);
spin_unlock_bh(&dev->queue_lock); spin_unlock_bh(&dev->queue_lock);
} }
...@@ -431,7 +435,6 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) ...@@ -431,7 +435,6 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
sch->dequeue = ops->dequeue; sch->dequeue = ops->dequeue;
sch->dev = dev; sch->dev = dev;
dev_hold(dev); dev_hold(dev);
sch->stats_lock = &dev->queue_lock;
atomic_set(&sch->refcnt, 1); atomic_set(&sch->refcnt, 1);
return sch; return sch;
...@@ -447,6 +450,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, ...@@ -447,6 +450,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
sch = qdisc_alloc(dev, ops); sch = qdisc_alloc(dev, ops);
if (IS_ERR(sch)) if (IS_ERR(sch))
goto errout; goto errout;
sch->stats_lock = &dev->queue_lock;
sch->parent = parentid; sch->parent = parentid;
if (!ops->init || ops->init(sch, NULL) == 0) if (!ops->init || ops->init(sch, NULL) == 0)
......
...@@ -248,16 +248,11 @@ ing_hook(unsigned int hook, struct sk_buff **pskb, ...@@ -248,16 +248,11 @@ ing_hook(unsigned int hook, struct sk_buff **pskb,
skb->dev ? (*pskb)->dev->name : "(no dev)", skb->dev ? (*pskb)->dev->name : "(no dev)",
skb->len); skb->len);
/*
revisit later: Use a private since lock dev->queue_lock is also
used on the egress (might slow things for an iota)
*/
if (dev->qdisc_ingress) { if (dev->qdisc_ingress) {
spin_lock(&dev->queue_lock); spin_lock(&dev->ingress_lock);
if ((q = dev->qdisc_ingress) != NULL) if ((q = dev->qdisc_ingress) != NULL)
fwres = q->enqueue(skb, q); fwres = q->enqueue(skb, q);
spin_unlock(&dev->queue_lock); spin_unlock(&dev->ingress_lock);
} }
return fwres; return fwres;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册