aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/nic.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2012-10-05 23:35:41 +0100
committerBen Hutchings <bhutchings@solarflare.com>2013-08-21 19:47:23 +0100
commitd829118705f8213ffeffa4fefa8931dea6b7f016 (patch)
treea6e8f3cb560c2e113af0574363cb197b1b938345 /drivers/net/ethernet/sfc/nic.c
parentsfc: Remove efx_process_channel_now() (diff)
downloadlinux-dev-d829118705f8213ffeffa4fefa8931dea6b7f016.tar.xz
linux-dev-d829118705f8213ffeffa4fefa8931dea6b7f016.zip
sfc: Rework IRQ enable/disable
There are many problems with the current efx_stop_interrupts() and efx_start_interrupts(): 1. On Siena, it is unsafe to disable the master IRQ enable bit (DRV_INT_EN_KER) while any IRQ sources are enabled. 2. On EF10 there is no master IRQ enable bit, so we cannot expect to defer IRQs without tearing down event queues. (Though I don't think we will need to keep any event queues around while the device is down, as we do for VFDI on Siena.) 3. synchronize_irq() only waits for a running IRQ handler to finish, not for any propagation through IRQ controllers. Therefore an IRQ may still be received and handled after efx_stop_interrupts() returns. IRQ handlers can then race with channel reallocation. To fix this: a. Introduce a software IRQ enable flag. So long as this is clear, IRQ handlers will only acknowledge IRQs and not touch the channel structures. b. Define a new struct efx_msi_context as the context for MSIs. This is never reallocated and is sufficient to find the software enable flag and the channel structure. It also includes the channel/IRQ name, which was previously separated out as it must also not be reallocated. c. Split efx_{start,stop}_interrupts() into efx_{,soft_}_{enable,disable}_interrupts(). The 'soft' functions don't touch the hardware master enable flag (if it exists) and don't reinitialise or tear down channels with the keep_eventq flag set. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/sfc/nic.c53
1 files changed, 28 insertions, 25 deletions
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index f758333b4d85..c17d659f4525 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -1567,6 +1567,7 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
@@ -1574,12 +1575,6 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
u32 queues;
int syserr;
- /* Could this be ours? If interrupts are disabled then the
- * channel state may not be valid.
- */
- if (!efx->legacy_irq_enabled)
- return result;
-
/* Read the ISR which also ACKs the interrupts */
efx_readd(efx, &reg, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
@@ -1595,7 +1590,7 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
}
/* Handle non-event-queue sources */
- if (queues & (1U << efx->irq_level)) {
+ if (queues & (1U << efx->irq_level) && soft_enabled) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx);
@@ -1607,10 +1602,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
efx->irq_zero_count = 0;
/* Schedule processing of any interrupting queues */
- efx_for_each_channel(channel, efx) {
- if (queues & 1)
- efx_schedule_channel_irq(channel);
- queues >>= 1;
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
}
result = IRQ_HANDLED;
@@ -1623,12 +1620,15 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
result = IRQ_HANDLED;
/* Ensure we schedule or rearm all event queues */
- efx_for_each_channel(channel, efx) {
- event = efx_event(channel, channel->eventq_read_ptr);
- if (efx_event_present(event))
- efx_schedule_channel_irq(channel);
- else
- efx_nic_eventq_read_ack(channel);
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ event = efx_event(channel,
+ channel->eventq_read_ptr);
+ if (efx_event_present(event))
+ efx_schedule_channel_irq(channel);
+ else
+ efx_nic_eventq_read_ack(channel);
+ }
}
}
@@ -1649,8 +1649,8 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
*/
static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
{
- struct efx_channel *channel = *(struct efx_channel **)dev_id;
- struct efx_nic *efx = channel->efx;
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
efx_oword_t *int_ker = efx->irq_status.addr;
int syserr;
@@ -1658,8 +1658,11 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
/* Handle non-event-queue sources */
- if (channel->channel == efx->irq_level) {
+ if (context->index == efx->irq_level) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx);
@@ -1667,7 +1670,7 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
}
/* Schedule processing of the channel */
- efx_schedule_channel_irq(channel);
+ efx_schedule_channel_irq(efx->channel[context->index]);
return IRQ_HANDLED;
}
@@ -1739,8 +1742,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, efx_msi_interrupt,
IRQF_PROBE_SHARED, /* Not shared */
- efx->channel_name[channel->channel],
- &efx->channel[channel->channel]);
+ efx->msi_context[channel->channel].name,
+ &efx->msi_context[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
@@ -1769,7 +1772,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
if (n_irqs-- == 0)
break;
- free_irq(channel->irq, &efx->channel[channel->channel]);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
}
fail1:
return rc;
@@ -1787,7 +1790,7 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
- free_irq(channel->irq, &efx->channel[channel->channel]);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
/* ACK legacy interrupt */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)