aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cadence
diff options
context:
space:
mode:
authorNicolas Ferre <nicolas.ferre@microchip.com>2020-07-20 10:56:53 +0200
committerDavid S. Miller <davem@davemloft.net>2020-07-20 17:01:45 -0700
commit9d45c8e89079dfe5489efaae9cea12a539ebc3ff (patch)
tree0b5f0169a4addfde4b7e9868a3fac25c45ec5c4d /drivers/net/ethernet/cadence
parentnet: macb: WoL support for GEM type of Ethernet controller (diff)
downloadlinux-dev-9d45c8e89079dfe5489efaae9cea12a539ebc3ff.tar.xz
linux-dev-9d45c8e89079dfe5489efaae9cea12a539ebc3ff.zip
net: macb: Add WoL interrupt support for MACB type of Ethernet controller
Handle the Wake-on-Lan interrupt for the Cadence MACB Ethernet controller. As we do for the GEM version, we handle of WoL interrupt in a specialized interrupt handler for MACB version that is positionned just between suspend() and resume() calls. Cc: Claudiu Beznea <claudiu.beznea@microchip.com> Cc: Harini Katakam <harini.katakam@xilinx.com> Signed-off-by: Nicolas Ferre <nicolas.ferre@microchip.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/cadence')
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c40
1 files changed, 39 insertions, 1 deletions
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 0f2417f09514..a6a35e1b0115 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1517,6 +1517,35 @@ static void macb_tx_restart(struct macb_queue *queue)
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
}
+static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
+{
+ struct macb_queue *queue = dev_id;
+ struct macb *bp = queue->bp;
+ u32 status;
+
+ status = queue_readl(queue, ISR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ spin_lock(&bp->lock);
+
+ if (status & MACB_BIT(WOL)) {
+ queue_writel(queue, IDR, MACB_BIT(WOL));
+ macb_writel(bp, WOL, 0);
+ netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
+ (unsigned int)(queue - bp->queues),
+ (unsigned long)status);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(WOL));
+ pm_wakeup_event(&bp->pdev->dev, 0);
+ }
+
+ spin_unlock(&bp->lock);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
@@ -4619,8 +4648,8 @@ static int __maybe_unused macb_suspend(struct device *dev)
/* Change interrupt handler and
* Enable WoL IRQ on queue 0
*/
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
if (macb_is_gem(bp)) {
- devm_free_irq(dev, bp->queues[0].irq, bp->queues);
err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
if (err) {
@@ -4633,6 +4662,15 @@ static int __maybe_unused macb_suspend(struct device *dev)
queue_writel(bp->queues, IER, GEM_BIT(WOL));
gem_writel(bp, WOL, MACB_BIT(MAG));
} else {
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
+ IRQF_SHARED, netdev->name, bp->queues);
+ if (err) {
+ dev_err(dev,
+ "Unable to request IRQ %d (error %d)\n",
+ bp->queues[0].irq, err);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ return err;
+ }
queue_writel(bp->queues, IER, MACB_BIT(WOL));
macb_writel(bp, WOL, MACB_BIT(MAG));
}