提交 1646a6f3 编写于 作者: B Ben Hutchings

sfc: Clean up test interrupt handling

Interrupts are normally generated by the event queues, moderated by
timers.  However, they may also be triggered by detection of a 'fatal'
error condition (e.g. memory parity error) or by the host writing to
certain CSR fields as part of a self-test.

The IRQ level/index used for these on Falcon rev B0 and Siena is set
by the KER_INT_LEVE_SEL field and cached by the driver in
efx_nic::fatal_irq_level.  Since this value is also relevant to
self-tests rename the field to just 'irq_level'.

Avoid unnecessary cache traffic by using a per-channel 'last_irq_cpu'
field and only writing to the per-controller field when the interrupt
matches efx_nic::irq_level.  Remove the volatile qualifier and use
ACCESS_ONCE in the places we read these fields.
Signed-off-by: NBen Hutchings <bhutchings@solarflare.com>
上级 f70d1847
...@@ -145,6 +145,12 @@ static inline void efx_schedule_channel(struct efx_channel *channel) ...@@ -145,6 +145,12 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
napi_schedule(&channel->napi_str); napi_schedule(&channel->napi_str);
} }
static inline void efx_schedule_channel_irq(struct efx_channel *channel)
{
channel->last_irq_cpu = raw_smp_processor_id();
efx_schedule_channel(channel);
}
extern void efx_link_status_changed(struct efx_nic *efx); extern void efx_link_status_changed(struct efx_nic *efx);
extern void efx_link_set_advertising(struct efx_nic *efx, u32); extern void efx_link_set_advertising(struct efx_nic *efx, u32);
extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8); extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
......
...@@ -189,9 +189,9 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) ...@@ -189,9 +189,9 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
falcon_irq_ack_a1(efx); falcon_irq_ack_a1(efx);
if (queues & 1) if (queues & 1)
efx_schedule_channel(efx_get_channel(efx, 0)); efx_schedule_channel_irq(efx_get_channel(efx, 0));
if (queues & 2) if (queues & 2)
efx_schedule_channel(efx_get_channel(efx, 1)); efx_schedule_channel_irq(efx_get_channel(efx, 1));
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/************************************************************************** /**************************************************************************
......
...@@ -325,6 +325,7 @@ enum efx_rx_alloc_method { ...@@ -325,6 +325,7 @@ enum efx_rx_alloc_method {
* @eventq_mask: Event queue pointer mask * @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer * @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value. * @last_eventq_read_ptr: Last event queue read pointer value.
* @last_irq_cpu: Last CPU to handle interrupt for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision * @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score * @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
...@@ -355,6 +356,7 @@ struct efx_channel { ...@@ -355,6 +356,7 @@ struct efx_channel {
unsigned int eventq_read_ptr; unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr; unsigned int last_eventq_read_ptr;
int last_irq_cpu;
unsigned int irq_count; unsigned int irq_count;
unsigned int irq_mod_score; unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
...@@ -648,7 +650,7 @@ struct efx_filter_state; ...@@ -648,7 +650,7 @@ struct efx_filter_state;
* @int_error_expire: Time at which error count will be expired * @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer * @irq_status: Interrupt status buffer
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @fatal_irq_level: IRQ level (bit number) used for serious errors * @irq_level: IRQ level/index for IRQs not triggered by an event queue
* @mtd_list: List of MTDs attached to the NIC * @mtd_list: List of MTDs attached to the NIC
* @nic_data: Hardware dependent state * @nic_data: Hardware dependent state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
...@@ -679,10 +681,9 @@ struct efx_filter_state; ...@@ -679,10 +681,9 @@ struct efx_filter_state;
* @loopback_selftest: Offline self-test private state * @loopback_selftest: Offline self-test private state
* @monitor_work: Hardware monitor workitem * @monitor_work: Hardware monitor workitem
* @biu_lock: BIU (bus interface unit) lock * @biu_lock: BIU (bus interface unit) lock
* @last_irq_cpu: Last CPU to handle interrupt. * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
* This register is written with the SMP processor ID whenever an * field is used by efx_test_interrupts() to verify that an
* interrupt is handled. It is used by efx_nic_test_interrupt() * interrupt has occurred.
* to verify that an interrupt has occurred.
* @n_rx_nodesc_drop_cnt: RX no descriptor drop count * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
* @mac_stats: MAC statistics. These include all statistics the MACs * @mac_stats: MAC statistics. These include all statistics the MACs
* can provide. Generic code converts these into a standard * can provide. Generic code converts these into a standard
...@@ -735,7 +736,7 @@ struct efx_nic { ...@@ -735,7 +736,7 @@ struct efx_nic {
struct efx_buffer irq_status; struct efx_buffer irq_status;
unsigned irq_zero_count; unsigned irq_zero_count;
unsigned fatal_irq_level; unsigned irq_level;
#ifdef CONFIG_SFC_MTD #ifdef CONFIG_SFC_MTD
struct list_head mtd_list; struct list_head mtd_list;
...@@ -779,7 +780,7 @@ struct efx_nic { ...@@ -779,7 +780,7 @@ struct efx_nic {
struct delayed_work monitor_work ____cacheline_aligned_in_smp; struct delayed_work monitor_work ____cacheline_aligned_in_smp;
spinlock_t biu_lock; spinlock_t biu_lock;
volatile signed int last_irq_cpu; int last_irq_cpu;
unsigned n_rx_nodesc_drop_cnt; unsigned n_rx_nodesc_drop_cnt;
struct efx_mac_stats mac_stats; struct efx_mac_stats mac_stats;
spinlock_t stats_lock; spinlock_t stats_lock;
......
...@@ -1311,7 +1311,7 @@ static inline void efx_nic_interrupts(struct efx_nic *efx, ...@@ -1311,7 +1311,7 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
efx_oword_t int_en_reg_ker; efx_oword_t int_en_reg_ker;
EFX_POPULATE_OWORD_3(int_en_reg_ker, EFX_POPULATE_OWORD_3(int_en_reg_ker,
FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level, FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
FRF_AZ_KER_INT_KER, force, FRF_AZ_KER_INT_KER, force,
FRF_AZ_DRV_INT_EN_KER, enabled); FRF_AZ_DRV_INT_EN_KER, enabled);
efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
...@@ -1427,11 +1427,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) ...@@ -1427,11 +1427,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
efx_readd(efx, &reg, FR_BZ_INT_ISR0); efx_readd(efx, &reg, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31); queues = EFX_EXTRACT_DWORD(reg, 0, 31);
/* Check to see if we have a serious error condition */ /* Handle non-event-queue sources */
if (queues & (1U << efx->fatal_irq_level)) { if (queues & (1U << efx->irq_level)) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr)) if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx); return efx_nic_fatal_interrupt(efx);
efx->last_irq_cpu = raw_smp_processor_id();
} }
if (queues != 0) { if (queues != 0) {
...@@ -1441,7 +1442,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) ...@@ -1441,7 +1442,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
/* Schedule processing of any interrupting queues */ /* Schedule processing of any interrupting queues */
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
if (queues & 1) if (queues & 1)
efx_schedule_channel(channel); efx_schedule_channel_irq(channel);
queues >>= 1; queues >>= 1;
} }
result = IRQ_HANDLED; result = IRQ_HANDLED;
...@@ -1458,18 +1459,16 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) ...@@ -1458,18 +1459,16 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
event = efx_event(channel, channel->eventq_read_ptr); event = efx_event(channel, channel->eventq_read_ptr);
if (efx_event_present(event)) if (efx_event_present(event))
efx_schedule_channel(channel); efx_schedule_channel_irq(channel);
else else
efx_nic_eventq_read_ack(channel); efx_nic_eventq_read_ack(channel);
} }
} }
if (result == IRQ_HANDLED) { if (result == IRQ_HANDLED)
efx->last_irq_cpu = raw_smp_processor_id();
netif_vdbg(efx, intr, efx->net_dev, netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
}
return result; return result;
} }
...@@ -1488,20 +1487,20 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) ...@@ -1488,20 +1487,20 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
efx_oword_t *int_ker = efx->irq_status.addr; efx_oword_t *int_ker = efx->irq_status.addr;
int syserr; int syserr;
efx->last_irq_cpu = raw_smp_processor_id();
netif_vdbg(efx, intr, efx->net_dev, netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Check to see if we have a serious error condition */ /* Handle non-event-queue sources */
if (channel->channel == efx->fatal_irq_level) { if (channel->channel == efx->irq_level) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr)) if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx); return efx_nic_fatal_interrupt(efx);
efx->last_irq_cpu = raw_smp_processor_id();
} }
/* Schedule processing of the channel */ /* Schedule processing of the channel */
efx_schedule_channel(channel); efx_schedule_channel_irq(channel);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -1640,10 +1639,10 @@ void efx_nic_init_common(struct efx_nic *efx) ...@@ -1640,10 +1639,10 @@ void efx_nic_init_common(struct efx_nic *efx)
if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
/* Use an interrupt level unused by event queues */ /* Use an interrupt level unused by event queues */
efx->fatal_irq_level = 0x1f; efx->irq_level = 0x1f;
else else
/* Use a valid MSI-X vector */ /* Use a valid MSI-X vector */
efx->fatal_irq_level = 0; efx->irq_level = 0;
/* Enable all the genuinely fatal interrupts. (They are still /* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by * masked by the overall interrupt mask, controlled by
......
...@@ -130,6 +130,8 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) ...@@ -130,6 +130,8 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
static int efx_test_interrupts(struct efx_nic *efx, static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests) struct efx_self_tests *tests)
{ {
int cpu;
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1; tests->interrupt = -1;
...@@ -142,7 +144,8 @@ static int efx_test_interrupts(struct efx_nic *efx, ...@@ -142,7 +144,8 @@ static int efx_test_interrupts(struct efx_nic *efx,
/* Wait for arrival of test interrupt. */ /* Wait for arrival of test interrupt. */
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
schedule_timeout_uninterruptible(HZ / 10); schedule_timeout_uninterruptible(HZ / 10);
if (efx->last_irq_cpu >= 0) cpu = ACCESS_ONCE(efx->last_irq_cpu);
if (cpu >= 0)
goto success; goto success;
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
...@@ -150,8 +153,7 @@ static int efx_test_interrupts(struct efx_nic *efx, ...@@ -150,8 +153,7 @@ static int efx_test_interrupts(struct efx_nic *efx,
success: success:
netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
INT_MODE(efx), INT_MODE(efx), cpu);
efx->last_irq_cpu);
tests->interrupt = 1; tests->interrupt = 1;
return 0; return 0;
} }
...@@ -165,7 +167,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, ...@@ -165,7 +167,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
bool napi_ran, dma_seen, int_seen; bool napi_ran, dma_seen, int_seen;
read_ptr = channel->eventq_read_ptr; read_ptr = channel->eventq_read_ptr;
channel->efx->last_irq_cpu = -1; channel->last_irq_cpu = -1;
smp_wmb(); smp_wmb();
efx_nic_generate_test_event(channel); efx_nic_generate_test_event(channel);
...@@ -182,7 +184,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel, ...@@ -182,7 +184,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
} else { } else {
napi_ran = false; napi_ran = false;
dma_seen = efx_nic_event_present(channel); dma_seen = efx_nic_event_present(channel);
int_seen = efx->last_irq_cpu >= 0; int_seen = ACCESS_ONCE(channel->last_irq_cpu) >= 0;
} }
napi_enable(&channel->napi_str); napi_enable(&channel->napi_str);
efx_nic_eventq_read_ack(channel); efx_nic_eventq_read_ack(channel);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册