aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c98
1 files changed, 60 insertions, 38 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 78c2fe185281..6b6765431eaf 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3440,25 +3440,22 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
nv_msi_workaround(np);
+#ifdef CONFIG_FORCEDETH_NAPI
spin_lock(&np->lock);
- nv_tx_done(dev, np->tx_ring_size);
- spin_unlock(&np->lock);
+ napi_schedule(&np->napi);
-#ifdef CONFIG_FORCEDETH_NAPI
- if (np->events & NVREG_IRQ_RX_ALL) {
- spin_lock(&np->lock);
- napi_schedule(&np->napi);
+ /* Disable furthur irq's
+ (msix not enabled with napi) */
+ writel(0, base + NvRegIrqMask);
- /* Disable furthur receive irq's */
- np->irqmask &= ~NVREG_IRQ_RX_ALL;
+ spin_unlock(&np->lock);
- if (np->msi_flags & NV_MSI_X_ENABLED)
- writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
- spin_unlock(&np->lock);
- }
+ return IRQ_HANDLED;
#else
+ spin_lock(&np->lock);
+ nv_tx_done(dev, np->tx_ring_size);
+ spin_unlock(&np->lock);
+
if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
if (unlikely(nv_alloc_rx(dev))) {
spin_lock(&np->lock);
@@ -3467,7 +3464,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
spin_unlock(&np->lock);
}
}
-#endif
+
if (unlikely(np->events & NVREG_IRQ_LINK)) {
spin_lock(&np->lock);
nv_link_irq(dev);
@@ -3513,7 +3510,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
break;
}
-
+#endif
}
dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
@@ -3548,25 +3545,22 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
nv_msi_workaround(np);
+#ifdef CONFIG_FORCEDETH_NAPI
spin_lock(&np->lock);
- nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
- spin_unlock(&np->lock);
+ napi_schedule(&np->napi);
-#ifdef CONFIG_FORCEDETH_NAPI
- if (np->events & NVREG_IRQ_RX_ALL) {
- spin_lock(&np->lock);
- napi_schedule(&np->napi);
+ /* Disable furthur irq's
+ (msix not enabled with napi) */
+ writel(0, base + NvRegIrqMask);
- /* Disable furthur receive irq's */
- np->irqmask &= ~NVREG_IRQ_RX_ALL;
+ spin_unlock(&np->lock);
- if (np->msi_flags & NV_MSI_X_ENABLED)
- writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
- spin_unlock(&np->lock);
- }
+ return IRQ_HANDLED;
#else
+ spin_lock(&np->lock);
+ nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
+ spin_unlock(&np->lock);
+
if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
if (unlikely(nv_alloc_rx_optimized(dev))) {
spin_lock(&np->lock);
@@ -3575,7 +3569,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
spin_unlock(&np->lock);
}
}
-#endif
+
if (unlikely(np->events & NVREG_IRQ_LINK)) {
spin_lock(&np->lock);
nv_link_irq(dev);
@@ -3622,7 +3616,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
break;
}
-
+#endif
}
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
@@ -3682,9 +3676,17 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
int pkts, retcode;
if (!nv_optimized(np)) {
+ spin_lock_irqsave(&np->lock, flags);
+ nv_tx_done(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
+
pkts = nv_rx_process(dev, budget);
retcode = nv_alloc_rx(dev);
} else {
+ spin_lock_irqsave(&np->lock, flags);
+ nv_tx_done_optimized(dev, np->tx_ring_size);
+ spin_unlock_irqrestore(&np->lock, flags);
+
pkts = nv_rx_process_optimized(dev, budget);
retcode = nv_alloc_rx_optimized(dev);
}
@@ -3696,17 +3698,37 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&np->lock, flags);
}
+ if (unlikely(np->events & NVREG_IRQ_LINK)) {
+ spin_lock_irqsave(&np->lock, flags);
+ nv_link_irq(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+ }
+ if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
+ spin_lock_irqsave(&np->lock, flags);
+ nv_linkchange(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+ np->link_timeout = jiffies + LINK_TIMEOUT;
+ }
+ if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
+ spin_lock_irqsave(&np->lock, flags);
+ if (!np->in_shutdown) {
+ np->nic_poll_irq = np->irqmask;
+ np->recover_error = 1;
+ mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+ __napi_complete(napi);
+ return pkts;
+ }
+
if (pkts < budget) {
- /* re-enable receive interrupts */
+ /* re-enable interrupts
+ (msix not enabled in napi) */
spin_lock_irqsave(&np->lock, flags);
__napi_complete(napi);
- np->irqmask |= NVREG_IRQ_RX_ALL;
- if (np->msi_flags & NV_MSI_X_ENABLED)
- writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
- else
- writel(np->irqmask, base + NvRegIrqMask);
+ writel(np->irqmask, base + NvRegIrqMask);
spin_unlock_irqrestore(&np->lock, flags);
}