提交 41b0cd36 编写于 作者: Z Zhu Yanjun 提交者: David S. Miller

forcedeth: remove duplicate structure member in xmit

Since both first_tx_ctx and tx_skb are the head of tx ctx, it not
necessary to use two structure members to statically indicate
the head of tx ctx. So first_tx_ctx is removed.

CC: Srinivas Eeda <srinivas.eeda@oracle.com>
CC: Joe Jin <joe.jin@oracle.com>
CC: Junxiao Bi <junxiao.bi@oracle.com>
Signed-off-by: NZhu Yanjun <yanjun.zhu@oracle.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 e9c5a106
...@@ -824,7 +824,7 @@ struct fe_priv { ...@@ -824,7 +824,7 @@ struct fe_priv {
*/ */
union ring_type get_tx, put_tx, last_tx; union ring_type get_tx, put_tx, last_tx;
struct nv_skb_map *get_tx_ctx, *put_tx_ctx; struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
struct nv_skb_map *first_tx_ctx, *last_tx_ctx; struct nv_skb_map *last_tx_ctx;
struct nv_skb_map *tx_skb; struct nv_skb_map *tx_skb;
union ring_type tx_ring; union ring_type tx_ring;
...@@ -1939,7 +1939,8 @@ static void nv_init_tx(struct net_device *dev) ...@@ -1939,7 +1939,8 @@ static void nv_init_tx(struct net_device *dev)
np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
else else
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; np->get_tx_ctx = np->tx_skb;
np->put_tx_ctx = np->tx_skb;
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
netdev_reset_queue(np->dev); netdev_reset_queue(np->dev);
np->tx_pkts_in_progress = 0; np->tx_pkts_in_progress = 0;
...@@ -2251,7 +2252,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2251,7 +2252,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(put_tx++ == np->last_tx.orig)) if (unlikely(put_tx++ == np->last_tx.orig))
put_tx = np->tx_ring.orig; put_tx = np->tx_ring.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->first_tx_ctx; np->put_tx_ctx = np->tx_skb;
} while (size); } while (size);
/* setup the fragments */ /* setup the fragments */
...@@ -2277,7 +2278,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2277,7 +2278,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
do { do {
nv_unmap_txskb(np, start_tx_ctx); nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->first_tx_ctx; tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx); } while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx; np->put_tx_ctx = start_tx_ctx;
...@@ -2297,7 +2298,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2297,7 +2298,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(put_tx++ == np->last_tx.orig)) if (unlikely(put_tx++ == np->last_tx.orig))
put_tx = np->tx_ring.orig; put_tx = np->tx_ring.orig;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->first_tx_ctx; np->put_tx_ctx = np->tx_skb;
} while (frag_size); } while (frag_size);
} }
...@@ -2306,7 +2307,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2306,7 +2307,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
else else
prev_tx = put_tx - 1; prev_tx = put_tx - 1;
if (unlikely(np->put_tx_ctx == np->first_tx_ctx)) if (unlikely(np->put_tx_ctx == np->tx_skb))
prev_tx_ctx = np->last_tx_ctx; prev_tx_ctx = np->last_tx_ctx;
else else
prev_tx_ctx = np->put_tx_ctx - 1; prev_tx_ctx = np->put_tx_ctx - 1;
...@@ -2409,7 +2410,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, ...@@ -2409,7 +2410,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
if (unlikely(put_tx++ == np->last_tx.ex)) if (unlikely(put_tx++ == np->last_tx.ex))
put_tx = np->tx_ring.ex; put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->first_tx_ctx; np->put_tx_ctx = np->tx_skb;
} while (size); } while (size);
/* setup the fragments */ /* setup the fragments */
...@@ -2435,7 +2436,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, ...@@ -2435,7 +2436,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
do { do {
nv_unmap_txskb(np, start_tx_ctx); nv_unmap_txskb(np, start_tx_ctx);
if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
tmp_tx_ctx = np->first_tx_ctx; tmp_tx_ctx = np->tx_skb;
} while (tmp_tx_ctx != np->put_tx_ctx); } while (tmp_tx_ctx != np->put_tx_ctx);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
np->put_tx_ctx = start_tx_ctx; np->put_tx_ctx = start_tx_ctx;
...@@ -2455,7 +2456,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, ...@@ -2455,7 +2456,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
if (unlikely(put_tx++ == np->last_tx.ex)) if (unlikely(put_tx++ == np->last_tx.ex))
put_tx = np->tx_ring.ex; put_tx = np->tx_ring.ex;
if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
np->put_tx_ctx = np->first_tx_ctx; np->put_tx_ctx = np->tx_skb;
} while (frag_size); } while (frag_size);
} }
...@@ -2464,7 +2465,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, ...@@ -2464,7 +2465,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
else else
prev_tx = put_tx - 1; prev_tx = put_tx - 1;
if (unlikely(np->put_tx_ctx == np->first_tx_ctx)) if (unlikely(np->put_tx_ctx == np->tx_skb))
prev_tx_ctx = np->last_tx_ctx; prev_tx_ctx = np->last_tx_ctx;
else else
prev_tx_ctx = np->put_tx_ctx - 1; prev_tx_ctx = np->put_tx_ctx - 1;
...@@ -2600,7 +2601,7 @@ static int nv_tx_done(struct net_device *dev, int limit) ...@@ -2600,7 +2601,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
np->get_tx.orig = np->tx_ring.orig; np->get_tx.orig = np->tx_ring.orig;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->first_tx_ctx; np->get_tx_ctx = np->tx_skb;
} }
netdev_completed_queue(np->dev, tx_work, bytes_compl); netdev_completed_queue(np->dev, tx_work, bytes_compl);
...@@ -2654,7 +2655,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) ...@@ -2654,7 +2655,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
np->get_tx.ex = np->tx_ring.ex; np->get_tx.ex = np->tx_ring.ex;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->first_tx_ctx; np->get_tx_ctx = np->tx_skb;
} }
netdev_completed_queue(np->dev, tx_work, bytes_cleaned); netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册