提交 f7d12cdc 编写于 作者: B Ben Hutchings 提交者: David S. Miller

sfc: Refactor channel and queue lookup and iteration

In preparation for changes to the way channels and queue structures
are allocated, revise the macros and functions used to look up and
iterator over them.

- Replace efx_for_each_tx_queue() with iteration over channels then TX
  queues
- Replace efx_for_each_rx_queue() with iteration over channels then RX
  queues (with one exception, shortly to be removed)
- Introduce efx_get_{channel,rx_queue,tx_queue}() functions to look up
  channels and queues by index
- Introduce efx_channel_get_{rx,tx}_queue() functions to look up a
  channel's queues
Signed-off-by: NBen Hutchings <bhutchings@solarflare.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 ba1e8a35
...@@ -248,7 +248,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget) ...@@ -248,7 +248,7 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_rx_strategy(channel); efx_rx_strategy(channel);
efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
return spent; return spent;
} }
...@@ -1050,7 +1050,8 @@ static void efx_probe_interrupts(struct efx_nic *efx) ...@@ -1050,7 +1050,8 @@ static void efx_probe_interrupts(struct efx_nic *efx)
efx->n_rx_channels = efx->n_channels; efx->n_rx_channels = efx->n_channels;
} }
for (i = 0; i < n_channels; i++) for (i = 0; i < n_channels; i++)
efx->channel[i].irq = xentries[i].vector; efx_get_channel(efx, i)->irq =
xentries[i].vector;
} else { } else {
/* Fall back to single channel MSI */ /* Fall back to single channel MSI */
efx->interrupt_mode = EFX_INT_MODE_MSI; efx->interrupt_mode = EFX_INT_MODE_MSI;
...@@ -1066,7 +1067,7 @@ static void efx_probe_interrupts(struct efx_nic *efx) ...@@ -1066,7 +1067,7 @@ static void efx_probe_interrupts(struct efx_nic *efx)
efx->n_tx_channels = 1; efx->n_tx_channels = 1;
rc = pci_enable_msi(efx->pci_dev); rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) { if (rc == 0) {
efx->channel[0].irq = efx->pci_dev->irq; efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
} else { } else {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"could not enable MSI\n"); "could not enable MSI\n");
...@@ -1355,20 +1356,20 @@ static unsigned irq_mod_ticks(int usecs, int resolution) ...@@ -1355,20 +1356,20 @@ static unsigned irq_mod_ticks(int usecs, int resolution)
void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
bool rx_adaptive) bool rx_adaptive)
{ {
struct efx_tx_queue *tx_queue; struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION);
unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION);
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
efx_for_each_tx_queue(tx_queue, efx)
tx_queue->channel->irq_moderation = tx_ticks;
efx->irq_rx_adaptive = rx_adaptive; efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks; efx->irq_rx_moderation = rx_ticks;
efx_for_each_rx_queue(rx_queue, efx) efx_for_each_channel(channel, efx) {
rx_queue->channel->irq_moderation = rx_ticks; if (efx_channel_get_rx_queue(channel))
channel->irq_moderation = rx_ticks;
else if (efx_channel_get_tx_queue(channel, 0))
channel->irq_moderation = tx_ticks;
}
} }
/************************************************************************** /**************************************************************************
...@@ -1767,6 +1768,7 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -1767,6 +1768,7 @@ static int efx_register_netdev(struct efx_nic *efx)
static void efx_unregister_netdev(struct efx_nic *efx) static void efx_unregister_netdev(struct efx_nic *efx)
{ {
struct efx_channel *channel;
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
if (!efx->net_dev) if (!efx->net_dev)
...@@ -1777,8 +1779,10 @@ static void efx_unregister_netdev(struct efx_nic *efx) ...@@ -1777,8 +1779,10 @@ static void efx_unregister_netdev(struct efx_nic *efx)
/* Free up any skbs still remaining. This has to happen before /* Free up any skbs still remaining. This has to happen before
* we try to unregister the netdev as running their destructors * we try to unregister the netdev as running their destructors
* may be needed to get the device ref. count to 0. */ * may be needed to get the device ref. count to 0. */
efx_for_each_tx_queue(tx_queue, efx) efx_for_each_channel(channel, efx) {
efx_release_tx_buffers(tx_queue); efx_for_each_channel_tx_queue(tx_queue, channel)
efx_release_tx_buffers(tx_queue);
}
if (efx_dev_registered(efx)) { if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
......
...@@ -328,9 +328,10 @@ static int efx_fill_loopback_test(struct efx_nic *efx, ...@@ -328,9 +328,10 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
unsigned int test_index, unsigned int test_index,
struct ethtool_string *strings, u64 *data) struct ethtool_string *strings, u64 *data)
{ {
struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) { efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_fill_test(test_index++, strings, data, efx_fill_test(test_index++, strings, data,
&lb_tests->tx_sent[tx_queue->queue], &lb_tests->tx_sent[tx_queue->queue],
EFX_TX_QUEUE_NAME(tx_queue), EFX_TX_QUEUE_NAME(tx_queue),
...@@ -673,15 +674,15 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, ...@@ -673,15 +674,15 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
struct ethtool_coalesce *coalesce) struct ethtool_coalesce *coalesce)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue;
struct efx_channel *channel; struct efx_channel *channel;
memset(coalesce, 0, sizeof(*coalesce)); memset(coalesce, 0, sizeof(*coalesce));
/* Find lowest IRQ moderation across all used TX queues */ /* Find lowest IRQ moderation across all used TX queues */
coalesce->tx_coalesce_usecs_irq = ~((u32) 0); coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
efx_for_each_tx_queue(tx_queue, efx) { efx_for_each_channel(channel, efx) {
channel = tx_queue->channel; if (!efx_channel_get_tx_queue(channel, 0))
continue;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
if (channel->channel < efx->n_rx_channels) if (channel->channel < efx->n_rx_channels)
coalesce->tx_coalesce_usecs_irq = coalesce->tx_coalesce_usecs_irq =
...@@ -708,7 +709,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, ...@@ -708,7 +709,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel; struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned tx_usecs, rx_usecs, adaptive; unsigned tx_usecs, rx_usecs, adaptive;
if (coalesce->use_adaptive_tx_coalesce) if (coalesce->use_adaptive_tx_coalesce)
...@@ -725,8 +725,9 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, ...@@ -725,8 +725,9 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
adaptive = coalesce->use_adaptive_rx_coalesce; adaptive = coalesce->use_adaptive_rx_coalesce;
/* If the channel is shared only allow RX parameters to be set */ /* If the channel is shared only allow RX parameters to be set */
efx_for_each_tx_queue(tx_queue, efx) { efx_for_each_channel(channel, efx) {
if ((tx_queue->channel->channel < efx->n_rx_channels) && if (efx_channel_get_rx_queue(channel) &&
efx_channel_get_tx_queue(channel, 0) &&
tx_usecs) { tx_usecs) {
netif_err(efx, drv, efx->net_dev, "Channel is shared. " netif_err(efx, drv, efx->net_dev, "Channel is shared. "
"Only RX coalescing may be set\n"); "Only RX coalescing may be set\n");
......
...@@ -909,18 +909,34 @@ struct efx_nic_type { ...@@ -909,18 +909,34 @@ struct efx_nic_type {
* *
*************************************************************************/ *************************************************************************/
static inline struct efx_channel *
efx_get_channel(struct efx_nic *efx, unsigned index)
{
EFX_BUG_ON_PARANOID(index >= efx->n_channels);
return &efx->channel[index];
}
/* Iterate over all used channels */ /* Iterate over all used channels */
#define efx_for_each_channel(_channel, _efx) \ #define efx_for_each_channel(_channel, _efx) \
for (_channel = &((_efx)->channel[0]); \ for (_channel = &((_efx)->channel[0]); \
_channel < &((_efx)->channel[(efx)->n_channels]); \ _channel < &((_efx)->channel[(efx)->n_channels]); \
_channel++) _channel++)
/* Iterate over all used TX queues */ static inline struct efx_tx_queue *
#define efx_for_each_tx_queue(_tx_queue, _efx) \ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
for (_tx_queue = &((_efx)->tx_queue[0]); \ {
_tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \ EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
(_efx)->n_tx_channels]); \ type >= EFX_TXQ_TYPES);
_tx_queue++) return &efx->tx_queue[index * EFX_TXQ_TYPES + type];
}
static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{
struct efx_tx_queue *tx_queue = channel->tx_queue;
EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES);
return tx_queue ? tx_queue + type : NULL;
}
/* Iterate over all TX queues belonging to a channel */ /* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
...@@ -928,12 +944,27 @@ struct efx_nic_type { ...@@ -928,12 +944,27 @@ struct efx_nic_type {
_tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
_tx_queue++) _tx_queue++)
static inline struct efx_rx_queue *
efx_get_rx_queue(struct efx_nic *efx, unsigned index)
{
EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels);
return &efx->rx_queue[index];
}
/* Iterate over all used RX queues */ /* Iterate over all used RX queues */
#define efx_for_each_rx_queue(_rx_queue, _efx) \ #define efx_for_each_rx_queue(_rx_queue, _efx) \
for (_rx_queue = &((_efx)->rx_queue[0]); \ for (_rx_queue = &((_efx)->rx_queue[0]); \
_rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \ _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
_rx_queue++) _rx_queue++)
static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel *channel)
{
struct efx_rx_queue *rx_queue =
&channel->efx->rx_queue[channel->channel];
return rx_queue->channel == channel ? rx_queue : NULL;
}
/* Iterate over all RX queues belonging to a channel */ /* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \ for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
......
...@@ -682,7 +682,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) ...@@ -682,7 +682,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Transmit completion */ /* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label]; tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
EFX_TXQ_MASK); EFX_TXQ_MASK);
channel->irq_mod_score += tx_packets; channel->irq_mod_score += tx_packets;
...@@ -690,7 +691,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) ...@@ -690,7 +691,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */ /* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label]; tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
if (efx_dev_registered(efx)) if (efx_dev_registered(efx))
netif_tx_lock(efx->net_dev); netif_tx_lock(efx->net_dev);
...@@ -830,7 +832,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) ...@@ -830,7 +832,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel); channel->channel);
rx_queue = &efx->rx_queue[channel->channel]; rx_queue = efx_channel_get_rx_queue(channel);
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK; expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
...@@ -882,7 +884,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) ...@@ -882,7 +884,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
/* The queue must be empty, so we won't receive any rx /* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the * events, so efx_process_channel() won't refill the
* queue. Refill it here */ * queue. Refill it here */
efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
else else
netif_dbg(efx, hw, efx->net_dev, "channel %d received " netif_dbg(efx, hw, efx->net_dev, "channel %d received "
"generated event "EFX_QWORD_FMT"\n", "generated event "EFX_QWORD_FMT"\n",
...@@ -1166,7 +1168,7 @@ void efx_nic_generate_fill_event(struct efx_channel *channel) ...@@ -1166,7 +1168,7 @@ void efx_nic_generate_fill_event(struct efx_channel *channel)
static void efx_poll_flush_events(struct efx_nic *efx) static void efx_poll_flush_events(struct efx_nic *efx)
{ {
struct efx_channel *channel = &efx->channel[0]; struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr; unsigned int read_ptr = channel->eventq_read_ptr;
...@@ -1188,7 +1190,9 @@ static void efx_poll_flush_events(struct efx_nic *efx) ...@@ -1188,7 +1190,9 @@ static void efx_poll_flush_events(struct efx_nic *efx)
ev_queue = EFX_QWORD_FIELD(*event, ev_queue = EFX_QWORD_FIELD(*event,
FSF_AZ_DRIVER_EV_SUBDATA); FSF_AZ_DRIVER_EV_SUBDATA);
if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
tx_queue = efx->tx_queue + ev_queue; tx_queue = efx_get_tx_queue(
efx, ev_queue / EFX_TXQ_TYPES,
ev_queue % EFX_TXQ_TYPES);
tx_queue->flushed = FLUSH_DONE; tx_queue->flushed = FLUSH_DONE;
} }
} else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
...@@ -1198,7 +1202,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) ...@@ -1198,7 +1202,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
ev_failed = EFX_QWORD_FIELD( ev_failed = EFX_QWORD_FIELD(
*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
if (ev_queue < efx->n_rx_channels) { if (ev_queue < efx->n_rx_channels) {
rx_queue = efx->rx_queue + ev_queue; rx_queue = efx_get_rx_queue(efx, ev_queue);
rx_queue->flushed = rx_queue->flushed =
ev_failed ? FLUSH_FAILED : FLUSH_DONE; ev_failed ? FLUSH_FAILED : FLUSH_DONE;
} }
...@@ -1219,6 +1223,7 @@ static void efx_poll_flush_events(struct efx_nic *efx) ...@@ -1219,6 +1223,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* serialise them */ * serialise them */
int efx_nic_flush_queues(struct efx_nic *efx) int efx_nic_flush_queues(struct efx_nic *efx)
{ {
struct efx_channel *channel;
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
int i, tx_pending, rx_pending; int i, tx_pending, rx_pending;
...@@ -1227,29 +1232,35 @@ int efx_nic_flush_queues(struct efx_nic *efx) ...@@ -1227,29 +1232,35 @@ int efx_nic_flush_queues(struct efx_nic *efx)
efx->type->prepare_flush(efx); efx->type->prepare_flush(efx);
/* Flush all tx queues in parallel */ /* Flush all tx queues in parallel */
efx_for_each_tx_queue(tx_queue, efx) efx_for_each_channel(channel, efx) {
efx_flush_tx_queue(tx_queue); efx_for_each_channel_tx_queue(tx_queue, channel)
efx_flush_tx_queue(tx_queue);
}
/* The hardware supports four concurrent rx flushes, each of which may /* The hardware supports four concurrent rx flushes, each of which may
* need to be retried if there is an outstanding descriptor fetch */ * need to be retried if there is an outstanding descriptor fetch */
for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
rx_pending = tx_pending = 0; rx_pending = tx_pending = 0;
efx_for_each_rx_queue(rx_queue, efx) { efx_for_each_channel(channel, efx) {
if (rx_queue->flushed == FLUSH_PENDING) efx_for_each_channel_rx_queue(rx_queue, channel) {
++rx_pending; if (rx_queue->flushed == FLUSH_PENDING)
} ++rx_pending;
efx_for_each_rx_queue(rx_queue, efx) {
if (rx_pending == EFX_RX_FLUSH_COUNT)
break;
if (rx_queue->flushed == FLUSH_FAILED ||
rx_queue->flushed == FLUSH_NONE) {
efx_flush_rx_queue(rx_queue);
++rx_pending;
} }
} }
efx_for_each_tx_queue(tx_queue, efx) { efx_for_each_channel(channel, efx) {
if (tx_queue->flushed != FLUSH_DONE) efx_for_each_channel_rx_queue(rx_queue, channel) {
++tx_pending; if (rx_pending == EFX_RX_FLUSH_COUNT)
break;
if (rx_queue->flushed == FLUSH_FAILED ||
rx_queue->flushed == FLUSH_NONE) {
efx_flush_rx_queue(rx_queue);
++rx_pending;
}
}
efx_for_each_channel_tx_queue(tx_queue, channel) {
if (tx_queue->flushed != FLUSH_DONE)
++tx_pending;
}
} }
if (rx_pending == 0 && tx_pending == 0) if (rx_pending == 0 && tx_pending == 0)
...@@ -1261,19 +1272,21 @@ int efx_nic_flush_queues(struct efx_nic *efx) ...@@ -1261,19 +1272,21 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Mark the queues as all flushed. We're going to return failure /* Mark the queues as all flushed. We're going to return failure
* leading to a reset, or fake up success anyway */ * leading to a reset, or fake up success anyway */
efx_for_each_tx_queue(tx_queue, efx) { efx_for_each_channel(channel, efx) {
if (tx_queue->flushed != FLUSH_DONE) efx_for_each_channel_tx_queue(tx_queue, channel) {
netif_err(efx, hw, efx->net_dev, if (tx_queue->flushed != FLUSH_DONE)
"tx queue %d flush command timed out\n", netif_err(efx, hw, efx->net_dev,
tx_queue->queue); "tx queue %d flush command timed out\n",
tx_queue->flushed = FLUSH_DONE; tx_queue->queue);
} tx_queue->flushed = FLUSH_DONE;
efx_for_each_rx_queue(rx_queue, efx) { }
if (rx_queue->flushed != FLUSH_DONE) efx_for_each_channel_rx_queue(rx_queue, channel) {
netif_err(efx, hw, efx->net_dev, if (rx_queue->flushed != FLUSH_DONE)
"rx queue %d flush command timed out\n", netif_err(efx, hw, efx->net_dev,
efx_rx_queue_index(rx_queue)); "rx queue %d flush command timed out\n",
rx_queue->flushed = FLUSH_DONE; efx_rx_queue_index(rx_queue));
rx_queue->flushed = FLUSH_DONE;
}
} }
return -ETIMEDOUT; return -ETIMEDOUT;
......
...@@ -311,7 +311,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, ...@@ -311,7 +311,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
struct efx_rx_buffer *rx_buf) struct efx_rx_buffer *rx_buf)
{ {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
struct efx_rx_queue *rx_queue = &efx->rx_queue[channel->channel]; struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
struct efx_rx_buffer *new_buf; struct efx_rx_buffer *new_buf;
unsigned index; unsigned index;
......
...@@ -567,7 +567,7 @@ static int efx_wait_for_link(struct efx_nic *efx) ...@@ -567,7 +567,7 @@ static int efx_wait_for_link(struct efx_nic *efx)
efx->type->monitor(efx); efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
} else { } else {
struct efx_channel *channel = &efx->channel[0]; struct efx_channel *channel = efx_get_channel(efx, 0);
if (channel->work_pending) if (channel->work_pending)
efx_process_channel_now(channel); efx_process_channel_now(channel);
} }
...@@ -594,6 +594,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -594,6 +594,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
{ {
enum efx_loopback_mode mode; enum efx_loopback_mode mode;
struct efx_loopback_state *state; struct efx_loopback_state *state;
struct efx_channel *channel = efx_get_channel(efx, 0);
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
int rc = 0; int rc = 0;
...@@ -634,7 +635,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -634,7 +635,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
} }
/* Test both types of TX queue */ /* Test both types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) { efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue & state->offload_csum = (tx_queue->queue &
EFX_TXQ_TYPE_OFFLOAD); EFX_TXQ_TYPE_OFFLOAD);
rc = efx_test_loopback(tx_queue, rc = efx_test_loopback(tx_queue,
......
...@@ -37,8 +37,9 @@ ...@@ -37,8 +37,9 @@
void efx_stop_queue(struct efx_channel *channel) void efx_stop_queue(struct efx_channel *channel)
{ {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
if (!channel->tx_queue) if (!tx_queue)
return; return;
spin_lock_bh(&channel->tx_stop_lock); spin_lock_bh(&channel->tx_stop_lock);
...@@ -46,9 +47,8 @@ void efx_stop_queue(struct efx_channel *channel) ...@@ -46,9 +47,8 @@ void efx_stop_queue(struct efx_channel *channel)
atomic_inc(&channel->tx_stop_count); atomic_inc(&channel->tx_stop_count);
netif_tx_stop_queue( netif_tx_stop_queue(
netdev_get_tx_queue( netdev_get_tx_queue(efx->net_dev,
efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES));
channel->tx_queue->queue / EFX_TXQ_TYPES));
spin_unlock_bh(&channel->tx_stop_lock); spin_unlock_bh(&channel->tx_stop_lock);
} }
...@@ -57,8 +57,9 @@ void efx_stop_queue(struct efx_channel *channel) ...@@ -57,8 +57,9 @@ void efx_stop_queue(struct efx_channel *channel)
void efx_wake_queue(struct efx_channel *channel) void efx_wake_queue(struct efx_channel *channel)
{ {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
if (!channel->tx_queue) if (!tx_queue)
return; return;
local_bh_disable(); local_bh_disable();
...@@ -66,9 +67,8 @@ void efx_wake_queue(struct efx_channel *channel) ...@@ -66,9 +67,8 @@ void efx_wake_queue(struct efx_channel *channel)
&channel->tx_stop_lock)) { &channel->tx_stop_lock)) {
netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n"); netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
netif_tx_wake_queue( netif_tx_wake_queue(
netdev_get_tx_queue( netdev_get_tx_queue(efx->net_dev,
efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES));
channel->tx_queue->queue / EFX_TXQ_TYPES));
spin_unlock(&channel->tx_stop_lock); spin_unlock(&channel->tx_stop_lock);
} }
local_bh_enable(); local_bh_enable();
...@@ -390,9 +390,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, ...@@ -390,9 +390,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
if (unlikely(efx->port_inhibited)) if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)]; tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb),
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) skb->ip_summed == CHECKSUM_PARTIAL ?
tx_queue += EFX_TXQ_TYPE_OFFLOAD; EFX_TXQ_TYPE_OFFLOAD : 0);
return efx_enqueue_skb(tx_queue, skb); return efx_enqueue_skb(tx_queue, skb);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册