提交 2a90f7e1 编写于 作者: S Simon Guinot 提交者: David S. Miller

net: mvneta: add xmit_more support

Basing on xmit_more flag of the skb, TX descriptors can be concatenated
before flushing. This commit delay Tx descriptor flush if the queue is
running and if there is more skb's to send.

A maximum allowed number of descriptors for flushing at once due to
MVNETA_TXQ_UPDATE_REG(q) reqisters limitation, is 255. Because of that
a new macro was added (MVNETA_TXQ_DEC_SENT_MASK) in order to ensure that
concatenated amount of descriptor does not exceed that value.
Signed-off-by: NSimon Guinot <simon.guinot@sequanux.org>
Signed-off-by: NMarcin Wojtas <mw@semihalf.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 a3308d8f
...@@ -224,6 +224,7 @@ ...@@ -224,6 +224,7 @@
#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
#define MVNETA_TXQ_DEC_SENT_SHIFT 16 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
#define MVNETA_TXQ_DEC_SENT_MASK 0xff
#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
#define MVNETA_TXQ_SENT_DESC_SHIFT 16 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
...@@ -525,6 +526,7 @@ struct mvneta_tx_queue { ...@@ -525,6 +526,7 @@ struct mvneta_tx_queue {
* descriptor ring * descriptor ring
*/ */
int count; int count;
int pending;
int tx_stop_threshold; int tx_stop_threshold;
int tx_wake_threshold; int tx_wake_threshold;
...@@ -818,8 +820,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, ...@@ -818,8 +820,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
/* Only 255 descriptors can be added at once ; Assume caller /* Only 255 descriptors can be added at once ; Assume caller
* process TX desriptors in quanta less than 256 * process TX desriptors in quanta less than 256
*/ */
val = pend_desc; val = pend_desc + txq->pending;
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
txq->pending = 0;
} }
/* Get pointer to next TX descriptor to be processed (send) by HW */ /* Get pointer to next TX descriptor to be processed (send) by HW */
...@@ -2399,11 +2402,15 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -2399,11 +2402,15 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
txq->count += frags; txq->count += frags;
mvneta_txq_pend_desc_add(pp, txq, frags);
if (txq->count >= txq->tx_stop_threshold) if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq); netif_tx_stop_queue(nq);
if (!skb->xmit_more || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
else
txq->pending += frags;
u64_stats_update_begin(&stats->syncp); u64_stats_update_begin(&stats->syncp);
stats->tx_packets++; stats->tx_packets++;
stats->tx_bytes += len; stats->tx_bytes += len;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册