提交 5b7da515 编写于 作者: A Alexander Duyck 提交者: Jeff Kirsher

ixgbe: combine some stats into a union to allow for Tx/Rx stats overlap

This change moved some of the RX and TX stats into separate structures and
them placed those structures in a union in order to help reduce the size of
the ring structure.
Signed-off-by: NAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: NRoss Brattain <ross.b.brattain@intel.com>
Signed-off-by: NJeff Kirsher <jeffrey.t.kirsher@intel.com>
上级 b6ec895e
...@@ -146,6 +146,19 @@ struct ixgbe_queue_stats { ...@@ -146,6 +146,19 @@ struct ixgbe_queue_stats {
u64 bytes; u64 bytes;
}; };
struct ixgbe_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
};
struct ixgbe_rx_queue_stats {
u64 rsc_count;
u64 rsc_flush;
u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
};
struct ixgbe_ring { struct ixgbe_ring {
void *desc; /* descriptor ring memory */ void *desc; /* descriptor ring memory */
struct device *dev; /* device for DMA mapping */ struct device *dev; /* device for DMA mapping */
...@@ -183,13 +196,12 @@ struct ixgbe_ring { ...@@ -183,13 +196,12 @@ struct ixgbe_ring {
struct ixgbe_queue_stats stats; struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
int numa_node; union {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
unsigned long reinit_state; unsigned long reinit_state;
u64 rsc_count; /* stat for coalesced packets */ int numa_node;
u64 rsc_flush; /* stats for flushed packets */
u32 restart_queue; /* track tx queue restarts */
u32 non_eop_descs; /* track hardware descriptor chaining */
unsigned int size; /* length in bytes */ unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */ dma_addr_t dma; /* phys. address of descriptor ring */
struct rcu_head rcu; struct rcu_head rcu;
......
...@@ -783,7 +783,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, ...@@ -783,7 +783,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!test_bit(__IXGBE_DOWN, &adapter->state)) { !test_bit(__IXGBE_DOWN, &adapter->state)) {
netif_wake_subqueue(netdev, tx_ring->queue_index); netif_wake_subqueue(netdev, tx_ring->queue_index);
++tx_ring->restart_queue; ++tx_ring->tx_stats.restart_queue;
} }
} }
...@@ -1024,7 +1024,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, ...@@ -1024,7 +1024,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
skb = netdev_alloc_skb_ip_align(adapter->netdev, skb = netdev_alloc_skb_ip_align(adapter->netdev,
rx_ring->rx_buf_len); rx_ring->rx_buf_len);
if (!skb) { if (!skb) {
adapter->alloc_rx_buff_failed++; rx_ring->rx_stats.alloc_rx_buff_failed++;
goto no_buffers; goto no_buffers;
} }
/* initialize queue mapping */ /* initialize queue mapping */
...@@ -1038,7 +1038,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, ...@@ -1038,7 +1038,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
rx_ring->rx_buf_len, rx_ring->rx_buf_len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, bi->dma)) { if (dma_mapping_error(rx_ring->dev, bi->dma)) {
adapter->alloc_rx_buff_failed++; rx_ring->rx_stats.alloc_rx_buff_failed++;
bi->dma = 0; bi->dma = 0;
goto no_buffers; goto no_buffers;
} }
...@@ -1048,7 +1048,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, ...@@ -1048,7 +1048,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
if (!bi->page) { if (!bi->page) {
bi->page = netdev_alloc_page(adapter->netdev); bi->page = netdev_alloc_page(adapter->netdev);
if (!bi->page) { if (!bi->page) {
adapter->alloc_rx_page_failed++; rx_ring->rx_stats.alloc_rx_page_failed++;
goto no_buffers; goto no_buffers;
} }
} }
...@@ -1063,7 +1063,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, ...@@ -1063,7 +1063,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, if (dma_mapping_error(rx_ring->dev,
bi->page_dma)) { bi->page_dma)) {
adapter->alloc_rx_page_failed++; rx_ring->rx_stats.alloc_rx_page_failed++;
bi->page_dma = 0; bi->page_dma = 0;
goto no_buffers; goto no_buffers;
} }
...@@ -1258,7 +1258,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1258,7 +1258,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (staterr & IXGBE_RXD_STAT_EOP) { if (staterr & IXGBE_RXD_STAT_EOP) {
if (skb->prev) if (skb->prev)
skb = ixgbe_transform_rsc_queue(skb, skb = ixgbe_transform_rsc_queue(skb,
&(rx_ring->rsc_count)); &(rx_ring->rx_stats.rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
if (IXGBE_RSC_CB(skb)->delay_unmap) { if (IXGBE_RSC_CB(skb)->delay_unmap) {
dma_unmap_single(rx_ring->dev, dma_unmap_single(rx_ring->dev,
...@@ -1269,11 +1269,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1269,11 +1269,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
IXGBE_RSC_CB(skb)->delay_unmap = false; IXGBE_RSC_CB(skb)->delay_unmap = false;
} }
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
rx_ring->rsc_count += rx_ring->rx_stats.rsc_count +=
skb_shinfo(skb)->nr_frags; skb_shinfo(skb)->nr_frags;
else else
rx_ring->rsc_count++; rx_ring->rx_stats.rsc_count++;
rx_ring->rsc_flush++; rx_ring->rx_stats.rsc_flush++;
} }
u64_stats_update_begin(&rx_ring->syncp); u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets++; rx_ring->stats.packets++;
...@@ -1289,7 +1289,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1289,7 +1289,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->next = next_buffer->skb; skb->next = next_buffer->skb;
skb->next->prev = skb; skb->next->prev = skb;
} }
rx_ring->non_eop_descs++; rx_ring->rx_stats.non_eop_descs++;
goto next_desc; goto next_desc;
} }
...@@ -5406,10 +5406,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) ...@@ -5406,10 +5406,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw_stats *hwstats = &adapter->stats;
u64 total_mpc = 0; u64 total_mpc = 0;
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
u64 non_eop_descs = 0, restart_queue = 0; u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
struct ixgbe_hw_stats *hwstats = &adapter->stats; u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
u64 bytes = 0, packets = 0;
if (test_bit(__IXGBE_DOWN, &adapter->state) || if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state)) test_bit(__IXGBE_RESETTING, &adapter->state))
...@@ -5422,21 +5424,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) ...@@ -5422,21 +5424,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->hw_rx_no_dma_resources += adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
rsc_count += adapter->rx_ring[i]->rsc_count; rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
rsc_flush += adapter->rx_ring[i]->rsc_flush; rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
} }
adapter->rsc_total_count = rsc_count; adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush; adapter->rsc_total_flush = rsc_flush;
} }
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
bytes += rx_ring->stats.bytes;
packets += rx_ring->stats.packets;
}
adapter->non_eop_descs = non_eop_descs;
adapter->alloc_rx_page_failed = alloc_rx_page_failed;
adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
netdev->stats.rx_bytes = bytes;
netdev->stats.rx_packets = packets;
bytes = 0;
packets = 0;
/* gather some stats to the adapter struct that are per queue */ /* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++) {
restart_queue += adapter->tx_ring[i]->restart_queue; struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
bytes += tx_ring->stats.bytes;
packets += tx_ring->stats.packets;
}
adapter->restart_queue = restart_queue; adapter->restart_queue = restart_queue;
adapter->tx_busy = tx_busy;
for (i = 0; i < adapter->num_rx_queues; i++) netdev->stats.tx_bytes = bytes;
non_eop_descs += adapter->rx_ring[i]->non_eop_descs; netdev->stats.tx_packets = packets;
adapter->non_eop_descs = non_eop_descs;
hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
...@@ -6223,7 +6245,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev, ...@@ -6223,7 +6245,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
/* A reprieve! - use start_queue because it doesn't call schedule */ /* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(netdev, tx_ring->queue_index); netif_start_subqueue(netdev, tx_ring->queue_index);
++tx_ring->restart_queue; ++tx_ring->tx_stats.restart_queue;
return 0; return 0;
} }
...@@ -6339,7 +6361,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev ...@@ -6339,7 +6361,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
adapter->tx_busy++; tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册