aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/idt77252.c8
-rw-r--r--drivers/net/can/Kconfig14
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/c_can/c_can.c15
-rw-r--r--drivers/net/can/c_can/c_can.h8
-rw-r--r--drivers/net/can/c_can/c_can_pci.c31
-rw-r--r--drivers/net/can/c_can/c_can_platform.c84
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/rcar_can.c876
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/usb/Kconfig8
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/gs_usb.c971
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h30
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c61
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c150
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h9
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ieee802154/fakelb.c6
-rw-r--r--drivers/net/tun.c54
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/wimax/i2400m/driver.c7
22 files changed, 2255 insertions, 91 deletions
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1bdf104e90bb..b621f56a36be 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2551,12 +2551,12 @@ done:
timeout = 5 * 1000;
while (atomic_read(&vc->scq->used) > 0) {
timeout = msleep_interruptible(timeout);
- if (!timeout)
+ if (!timeout) {
+ pr_warn("%s: SCQ drain timeout: %u used\n",
+ card->name, atomic_read(&vc->scq->used));
break;
+ }
}
- if (!timeout)
- printk("%s: SCQ drain timeout: %u used\n",
- card->name, atomic_read(&vc->scq->used));
writel(TCMDQ_HALT | vc->index, SAR_REG_TCMDQ);
clear_scd(card, vc->scq, vc->class);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 4aacaa9b478a..714b18790caf 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -65,7 +65,7 @@ config CAN_LEDS
config CAN_AT91
tristate "Atmel AT91 onchip CAN controller"
- depends on ARM
+ depends on ARCH_AT91 || COMPILE_TEST
---help---
This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
and AT91SAM9X5 processors.
@@ -104,7 +104,7 @@ config CAN_FLEXCAN
config PCH_CAN
tristate "Intel EG20T PCH CAN controller"
- depends on PCI
+ depends on PCI && (X86_32 || COMPILE_TEST)
---help---
This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which
is an IOH for x86 embedded processor (Intel Atom E6xx series).
@@ -119,6 +119,16 @@ config CAN_GRCAN
endian syntheses of the cores would need some modifications on
the hardware level to work.
+config CAN_RCAR
+ tristate "Renesas R-Car CAN controller"
+ depends on ARM
+ ---help---
+ Say Y here if you want to use CAN controller found on Renesas R-Car
+ SoCs.
+
+ To compile this driver as a module, choose M here: the module will
+ be called rcar_can.
+
source "drivers/net/can/mscan/Kconfig"
source "drivers/net/can/sja1000/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index c42058868b0f..90f538c73f8c 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -25,5 +25,6 @@ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o
obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o
obj-$(CONFIG_PCH_CAN) += pch_can.o
obj-$(CONFIG_CAN_GRCAN) += grcan.o
+obj-$(CONFIG_CAN_RCAR) += rcar_can.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index a2ca820b5373..e154b4cb0f1a 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -252,8 +252,7 @@ static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj
struct c_can_priv *priv = netdev_priv(dev);
int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface);
- priv->write_reg(priv, reg + 1, cmd);
- priv->write_reg(priv, reg, obj);
+ priv->write_reg32(priv, reg, (cmd << 16) | obj);
for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) {
if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY))
@@ -328,8 +327,7 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
change_bit(idx, &priv->tx_dir);
}
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb);
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
@@ -391,8 +389,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
frame->can_dlc = get_can_dlc(ctrl & 0x0F);
- arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
- arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
+ arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface));
if (arb & IF_ARB_MSGXTD)
frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -424,12 +421,10 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
struct c_can_priv *priv = netdev_priv(dev);
mask |= BIT(29);
- priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
- priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask);
id |= IF_ARB_MSGVAL;
- priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id);
- priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16);
+ priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id);
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index c56f1b1c11ca..99ad1aa576b0 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -78,6 +78,7 @@ enum reg {
C_CAN_INTPND2_REG,
C_CAN_MSGVAL1_REG,
C_CAN_MSGVAL2_REG,
+ C_CAN_FUNCTION_REG,
};
static const u16 reg_map_c_can[] = {
@@ -129,6 +130,7 @@ static const u16 reg_map_d_can[] = {
[C_CAN_BRPEXT_REG] = 0x0E,
[C_CAN_INT_REG] = 0x10,
[C_CAN_TEST_REG] = 0x14,
+ [C_CAN_FUNCTION_REG] = 0x18,
[C_CAN_TXRQST1_REG] = 0x88,
[C_CAN_TXRQST2_REG] = 0x8A,
[C_CAN_NEWDAT1_REG] = 0x9C,
@@ -176,8 +178,10 @@ struct c_can_priv {
atomic_t tx_active;
unsigned long tx_dir;
int last_status;
- u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
- void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
+ u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
+ void (*write_reg) (const struct c_can_priv *priv, enum reg index, u16 val);
+ u32 (*read_reg32) (const struct c_can_priv *priv, enum reg index);
+ void (*write_reg32) (const struct c_can_priv *priv, enum reg index, u32 val);
void __iomem *base;
const u16 *regs;
void *priv; /* for board-specific data */
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 58f71e1fcc4e..5d11e0e4225b 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -47,42 +47,59 @@ struct c_can_pci_data {
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
*/
-static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + priv->regs[index]);
}
-static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + priv->regs[index]);
}
-static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + 2 * priv->regs[index]);
}
-static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + 2 * priv->regs[index]);
}
-static u16 c_can_pci_read_reg_32bit(struct c_can_priv *priv,
+static u16 c_can_pci_read_reg_32bit(const struct c_can_priv *priv,
enum reg index)
{
return (u16)ioread32(priv->base + 2 * priv->regs[index]);
}
-static void c_can_pci_write_reg_32bit(struct c_can_priv *priv,
+static void c_can_pci_write_reg_32bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
iowrite32((u32)val, priv->base + 2 * priv->regs[index]);
}
+static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+ u32 val;
+
+ val = priv->read_reg(priv, index);
+ val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+
+ return val;
+}
+
+static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index,
+ u32 val)
+{
+ priv->write_reg(priv, index + 1, val >> 16);
+ priv->write_reg(priv, index, val);
+}
+
static void c_can_pci_reset_pch(const struct c_can_priv *priv, bool enable)
{
if (enable) {
@@ -187,6 +204,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
ret = -EINVAL;
goto out_free_c_can;
}
+ priv->read_reg32 = c_can_pci_read_reg32;
+ priv->write_reg32 = c_can_pci_write_reg32;
priv->raminit = c_can_pci_data->init;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 1df0b322d1e4..824108cd9fd5 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -40,6 +40,7 @@
#define CAN_RAMINIT_START_MASK(i) (0x001 << (i))
#define CAN_RAMINIT_DONE_MASK(i) (0x100 << (i))
#define CAN_RAMINIT_ALL_MASK(i) (0x101 << (i))
+#define DCAN_RAM_INIT_BIT (1 << 3)
static DEFINE_SPINLOCK(raminit_lock);
/*
* 16-bit c_can registers can be arranged differently in the memory
@@ -47,31 +48,31 @@ static DEFINE_SPINLOCK(raminit_lock);
* registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
* Handle the same by providing a common read/write interface.
*/
-static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + priv->regs[index]);
}
-static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + priv->regs[index]);
}
-static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index)
{
return readw(priv->base + 2 * priv->regs[index]);
}
-static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv,
enum reg index, u16 val)
{
writew(val, priv->base + 2 * priv->regs[index]);
}
-static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
+static void c_can_hw_raminit_wait_ti(const struct c_can_priv *priv, u32 mask,
u32 val)
{
/* We look only at the bits of our instance. */
@@ -80,7 +81,7 @@ static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask,
udelay(1);
}
-static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
{
u32 mask = CAN_RAMINIT_ALL_MASK(priv->instance);
u32 ctrl;
@@ -96,18 +97,68 @@ static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
writel(ctrl, priv->raminit_ctrlreg);
ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
- c_can_hw_raminit_wait(priv, ctrl, mask);
+ c_can_hw_raminit_wait_ti(priv, ctrl, mask);
if (enable) {
/* Set start bit and wait for the done bit. */
ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
writel(ctrl, priv->raminit_ctrlreg);
ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
- c_can_hw_raminit_wait(priv, ctrl, mask);
+ c_can_hw_raminit_wait_ti(priv, ctrl, mask);
}
spin_unlock(&raminit_lock);
}
+static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+ u32 val;
+
+ val = priv->read_reg(priv, index);
+ val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
+
+ return val;
+}
+
+static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
+ u32 val)
+{
+ priv->write_reg(priv, index + 1, val >> 16);
+ priv->write_reg(priv, index, val);
+}
+
+static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index)
+{
+ return readl(priv->base + priv->regs[index]);
+}
+
+static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index,
+ u32 val)
+{
+ writel(val, priv->base + priv->regs[index]);
+}
+
+static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask)
+{
+ while (priv->read_reg32(priv, C_CAN_FUNCTION_REG) & mask)
+ udelay(1);
+}
+
+static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable)
+{
+ u32 ctrl;
+
+ ctrl = priv->read_reg32(priv, C_CAN_FUNCTION_REG);
+ ctrl &= ~DCAN_RAM_INIT_BIT;
+ priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
+ c_can_hw_raminit_wait(priv, ctrl);
+
+ if (enable) {
+ ctrl |= DCAN_RAM_INIT_BIT;
+ priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl);
+ c_can_hw_raminit_wait(priv, ctrl);
+ }
+}
+
static struct platform_device_id c_can_id_table[] = {
[BOSCH_C_CAN_PLATFORM] = {
.name = KBUILD_MODNAME,
@@ -201,11 +252,15 @@ static int c_can_plat_probe(struct platform_device *pdev)
case IORESOURCE_MEM_32BIT:
priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+ priv->read_reg32 = c_can_plat_read_reg32;
+ priv->write_reg32 = c_can_plat_write_reg32;
break;
case IORESOURCE_MEM_16BIT:
default:
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ priv->read_reg32 = c_can_plat_read_reg32;
+ priv->write_reg32 = c_can_plat_write_reg32;
break;
}
break;
@@ -214,6 +269,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ priv->read_reg32 = d_can_plat_read_reg32;
+ priv->write_reg32 = d_can_plat_write_reg32;
if (pdev->dev.of_node)
priv->instance = of_alias_get_id(pdev->dev.of_node, "d_can");
@@ -221,11 +278,20 @@ static int c_can_plat_probe(struct platform_device *pdev)
priv->instance = pdev->id;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ /* Not all D_CAN modules have a separate register for the D_CAN
+ * RAM initialization. Use default RAM init bit in D_CAN module
+ * if not specified in DT.
+ */
+ if (!res) {
+ priv->raminit = c_can_hw_raminit;
+ break;
+ }
+
priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else
- priv->raminit = c_can_hw_raminit;
+ priv->raminit = c_can_hw_raminit_ti;
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index f19be5269e7b..81c711719490 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
config CAN_MSCAN
- depends on PPC || M68K
+ depends on PPC
tristate "Support for Freescale MSCAN based chips"
---help---
The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
new file mode 100644
index 000000000000..5268d216ecfa
--- /dev/null
+++ b/drivers/net/can/rcar_can.c
@@ -0,0 +1,876 @@
+/* Renesas R-Car CAN device driver
+ *
+ * Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/can/led.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
+#include <linux/can/platform/rcar_can.h>
+
+#define RCAR_CAN_DRV_NAME "rcar_can"
+
+/* Mailbox configuration:
+ * mailbox 60 - 63 - Rx FIFO mailboxes
+ * mailbox 56 - 59 - Tx FIFO mailboxes
+ * non-FIFO mailboxes are not used
+ */
+#define RCAR_CAN_N_MBX 64 /* Number of mailboxes in non-FIFO mode */
+#define RCAR_CAN_RX_FIFO_MBX 60 /* Mailbox - window to Rx FIFO */
+#define RCAR_CAN_TX_FIFO_MBX 56 /* Mailbox - window to Tx FIFO */
+#define RCAR_CAN_FIFO_DEPTH 4
+
+/* Mailbox registers structure */
+struct rcar_can_mbox_regs {
+ u32 id; /* IDE and RTR bits, SID and EID */
+ u8 stub; /* Not used */
+ u8 dlc; /* Data Length Code - bits [0..3] */
+ u8 data[8]; /* Data Bytes */
+ u8 tsh; /* Time Stamp Higher Byte */
+ u8 tsl; /* Time Stamp Lower Byte */
+};
+
+struct rcar_can_regs {
+ struct rcar_can_mbox_regs mb[RCAR_CAN_N_MBX]; /* Mailbox registers */
+ u32 mkr_2_9[8]; /* Mask Registers 2-9 */
+ u32 fidcr[2]; /* FIFO Received ID Compare Register */
+ u32 mkivlr1; /* Mask Invalid Register 1 */
+ u32 mier1; /* Mailbox Interrupt Enable Register 1 */
+ u32 mkr_0_1[2]; /* Mask Registers 0-1 */
+ u32 mkivlr0; /* Mask Invalid Register 0*/
+ u32 mier0; /* Mailbox Interrupt Enable Register 0 */
+ u8 pad_440[0x3c0];
+ u8 mctl[64]; /* Message Control Registers */
+ u16 ctlr; /* Control Register */
+ u16 str; /* Status register */
+ u8 bcr[3]; /* Bit Configuration Register */
+ u8 clkr; /* Clock Select Register */
+ u8 rfcr; /* Receive FIFO Control Register */
+ u8 rfpcr; /* Receive FIFO Pointer Control Register */
+ u8 tfcr; /* Transmit FIFO Control Register */
+ u8 tfpcr; /* Transmit FIFO Pointer Control Register */
+ u8 eier; /* Error Interrupt Enable Register */
+ u8 eifr; /* Error Interrupt Factor Judge Register */
+ u8 recr; /* Receive Error Count Register */
+ u8 tecr; /* Transmit Error Count Register */
+ u8 ecsr; /* Error Code Store Register */
+ u8 cssr; /* Channel Search Support Register */
+ u8 mssr; /* Mailbox Search Status Register */
+ u8 msmr; /* Mailbox Search Mode Register */
+ u16 tsr; /* Time Stamp Register */
+ u8 afsr; /* Acceptance Filter Support Register */
+ u8 pad_857;
+ u8 tcr; /* Test Control Register */
+ u8 pad_859[7];
+ u8 ier; /* Interrupt Enable Register */
+ u8 isr; /* Interrupt Status Register */
+ u8 pad_862;
+ u8 mbsmr; /* Mailbox Search Mask Register */
+};
+
+struct rcar_can_priv {
+ struct can_priv can; /* Must be the first member! */
+ struct net_device *ndev;
+ struct napi_struct napi;
+ struct rcar_can_regs __iomem *regs;
+ struct clk *clk;
+ u8 tx_dlc[RCAR_CAN_FIFO_DEPTH];
+ u32 tx_head;
+ u32 tx_tail;
+ u8 clock_select;
+ u8 ier;
+};
+
+static const struct can_bittiming_const rcar_can_bittiming_const = {
+ .name = RCAR_CAN_DRV_NAME,
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+/* Control Register bits */
+#define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */
+#define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */
+ /* at bus-off entry */
+#define RCAR_CAN_CTLR_SLPM (1 << 10)
+#define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */
+#define RCAR_CAN_CTLR_CANM_HALT (1 << 9)
+#define RCAR_CAN_CTLR_CANM_RESET (1 << 8)
+#define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8)
+#define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */
+#define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */
+#define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */
+#define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */
+
+/* Status Register bits */
+#define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */
+
+/* FIFO Received ID Compare Registers 0 and 1 bits */
+#define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */
+#define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */
+
+/* Receive FIFO Control Register bits */
+#define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */
+#define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */
+
+/* Transmit FIFO Control Register bits */
+#define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */
+ /* Number Status Bits */
+#define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */
+ /* Message Number Status Bits */
+#define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */
+
+#define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */
+ /* for Rx mailboxes 0-31 */
+#define RCAR_CAN_N_RX_MKREGS2 8
+
+/* Bit Configuration Register settings */
+#define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20)
+#define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8)
+#define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4)
+#define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07)
+
+/* Mailbox and Mask Registers bits */
+#define RCAR_CAN_IDE (1 << 31)
+#define RCAR_CAN_RTR (1 << 30)
+#define RCAR_CAN_SID_SHIFT 18
+
+/* Mailbox Interrupt Enable Register 1 bits */
+#define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */
+#define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */
+
+/* Interrupt Enable Register bits */
+#define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */
+#define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */
+ /* Enable Bit */
+#define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */
+ /* Enable Bit */
+/* Interrupt Status Register bits */
+#define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */
+#define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */
+ /* Status Bit */
+#define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */
+ /* Status Bit */
+
+/* Error Interrupt Enable Register bits */
+#define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */
+#define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */
+ /* Interrupt Enable */
+#define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */
+#define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */
+#define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */
+#define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */
+#define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */
+#define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */
+
+/* Error Interrupt Factor Judge Register bits */
+#define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */
+#define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */
+ /* Detect Flag */
+#define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */
+#define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */
+#define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */
+#define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */
+#define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */
+#define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */
+
+/* Error Code Store Register bits */
+#define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */
+#define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */
+#define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */
+#define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */
+#define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */
+#define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */
+#define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */
+#define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */
+
+#define RCAR_CAN_NAPI_WEIGHT 4
+#define MAX_STR_READS 0x100
+
+static void tx_failure_cleanup(struct net_device *ndev)
+{
+ int i;
+
+ for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++)
+ can_free_echo_skb(ndev, i);
+}
+
+static void rcar_can_error(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 eifr, txerr = 0, rxerr = 0;
+
+ /* Propagate the error condition to the CAN stack */
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ eifr = readb(&priv->regs->eifr);
+ if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) {
+ txerr = readb(&priv->regs->tecr);
+ rxerr = readb(&priv->regs->recr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+ }
+ if (eifr & RCAR_CAN_EIFR_BEIF) {
+ int rx_errors = 0, tx_errors = 0;
+ u8 ecsr;
+
+ netdev_dbg(priv->ndev, "Bus error interrupt:\n");
+ if (skb) {
+ cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_UNSPEC;
+ }
+ ecsr = readb(&priv->regs->ecsr);
+ if (ecsr & RCAR_CAN_ECSR_ADEF) {
+ netdev_dbg(priv->ndev, "ACK Delimiter Error\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL;
+ }
+ if (ecsr & RCAR_CAN_ECSR_BE0F) {
+ netdev_dbg(priv->ndev, "Bit Error (dominant)\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ }
+ if (ecsr & RCAR_CAN_ECSR_BE1F) {
+ netdev_dbg(priv->ndev, "Bit Error (recessive)\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ }
+ if (ecsr & RCAR_CAN_ECSR_CEF) {
+ netdev_dbg(priv->ndev, "CRC Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ if (ecsr & RCAR_CAN_ECSR_AEF) {
+ netdev_dbg(priv->ndev, "ACK Error\n");
+ tx_errors++;
+ writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[3] |= CAN_ERR_PROT_LOC_ACK;
+ }
+ }
+ if (ecsr & RCAR_CAN_ECSR_FEF) {
+ netdev_dbg(priv->ndev, "Form Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+ if (ecsr & RCAR_CAN_ECSR_SEF) {
+ netdev_dbg(priv->ndev, "Stuff Error\n");
+ rx_errors++;
+ writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr);
+ if (skb)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+
+ priv->can.can_stats.bus_error++;
+ ndev->stats.rx_errors += rx_errors;
+ ndev->stats.tx_errors += tx_errors;
+ writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr);
+ }
+ if (eifr & RCAR_CAN_EIFR_EWIF) {
+ netdev_dbg(priv->ndev, "Error warning interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr);
+ if (skb)
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ }
+ if (eifr & RCAR_CAN_EIFR_EPIF) {
+ netdev_dbg(priv->ndev, "Error passive interrupt\n");
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr);
+ if (skb)
+ cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ }
+ if (eifr & RCAR_CAN_EIFR_BOEIF) {
+ netdev_dbg(priv->ndev, "Bus-off entry interrupt\n");
+ tx_failure_cleanup(ndev);
+ priv->ier = RCAR_CAN_IER_ERSIE;
+ writeb(priv->ier, &priv->regs->ier);
+ priv->can.state = CAN_STATE_BUS_OFF;
+ /* Clear interrupt condition */
+ writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr);
+ can_bus_off(ndev);
+ if (skb)
+ cf->can_id |= CAN_ERR_BUSOFF;
+ }
+ if (eifr & RCAR_CAN_EIFR_ORIF) {
+ netdev_dbg(priv->ndev, "Receive overrun error interrupt\n");
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_errors++;
+ writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ }
+ }
+ if (eifr & RCAR_CAN_EIFR_OLIF) {
+ netdev_dbg(priv->ndev,
+ "Overload Frame Transmission error interrupt\n");
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_errors++;
+ writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr);
+ if (skb) {
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+ }
+ }
+
+ if (skb) {
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+}
+
+static void rcar_can_tx_done(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ u8 isr;
+
+ while (1) {
+ u8 unsent = readb(&priv->regs->tfcr);
+
+ unsent = (unsent & RCAR_CAN_TFCR_TFUST) >>
+ RCAR_CAN_TFCR_TFUST_SHIFT;
+ if (priv->tx_head - priv->tx_tail <= unsent)
+ break;
+ stats->tx_packets++;
+ stats->tx_bytes += priv->tx_dlc[priv->tx_tail %
+ RCAR_CAN_FIFO_DEPTH];
+ priv->tx_dlc[priv->tx_tail % RCAR_CAN_FIFO_DEPTH] = 0;
+ can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH);
+ priv->tx_tail++;
+ netif_wake_queue(ndev);
+ }
+ /* Clear interrupt */
+ isr = readb(&priv->regs->isr);
+ writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr);
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+}
+
+static irqreturn_t rcar_can_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u8 isr;
+
+ isr = readb(&priv->regs->isr);
+ if (!(isr & priv->ier))
+ return IRQ_NONE;
+
+ if (isr & RCAR_CAN_ISR_ERSF)
+ rcar_can_error(ndev);
+
+ if (isr & RCAR_CAN_ISR_TXFF)
+ rcar_can_tx_done(ndev);
+
+ if (isr & RCAR_CAN_ISR_RXFF) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable Rx FIFO interrupts */
+ priv->ier &= ~RCAR_CAN_IER_RXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void rcar_can_set_bittiming(struct net_device *dev)
+{
+ struct rcar_can_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u32 bcr;
+
+ bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) |
+ RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) |
+ RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1);
+ /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access.
+ * All the registers are big-endian but they get byte-swapped on 32-bit
+ * read/write (but not on 8-bit, contrary to the manuals)...
+ */
+ writel((bcr << 8) | priv->clock_select, &priv->regs->bcr);
+}
+
+static void rcar_can_start(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int i;
+
+ /* Set controller to known mode:
+ * - FIFO mailbox mode
+ * - accept all messages
+ * - overrun mode
+ * CAN is in sleep mode after MCU hardware or software reset.
+ */
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ /* Go to reset mode */
+ ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ writew(ctlr, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+ break;
+ }
+ rcar_can_set_bittiming(ndev);
+ ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */
+ ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */
+ /* at bus-off */
+ ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */
+ ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */
+ writew(ctlr, &priv->regs->ctlr);
+
+ /* Accept all SID and EID */
+ writel(0, &priv->regs->mkr_2_9[6]);
+ writel(0, &priv->regs->mkr_2_9[7]);
+ /* In FIFO mailbox mode, write "0" to bits 24 to 31 */
+ writel(0, &priv->regs->mkivlr1);
+ /* Accept all frames */
+ writel(0, &priv->regs->fidcr[0]);
+ writel(RCAR_CAN_FIDCR_IDE | RCAR_CAN_FIDCR_RTR, &priv->regs->fidcr[1]);
+ /* Enable and configure FIFO mailbox interrupts */
+ writel(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, &priv->regs->mier1);
+
+ priv->ier = RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE |
+ RCAR_CAN_IER_TXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+
+ /* Accumulate error codes */
+ writeb(RCAR_CAN_ECSR_EDPM, &priv->regs->ecsr);
+ /* Enable error interrupts */
+ writeb(RCAR_CAN_EIER_EWIE | RCAR_CAN_EIER_EPIE | RCAR_CAN_EIER_BOEIE |
+ (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING ?
+ RCAR_CAN_EIER_BEIE : 0) | RCAR_CAN_EIER_ORIE |
+ RCAR_CAN_EIER_OLIE, &priv->regs->eier);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ /* Go to operation mode */
+ writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST))
+ break;
+ }
+ /* Enable Rx and Tx FIFO */
+ writeb(RCAR_CAN_RFCR_RFE, &priv->regs->rfcr);
+ writeb(RCAR_CAN_TFCR_TFE, &priv->regs->tfcr);
+}
+
+static int rcar_can_open(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ netdev_err(ndev, "clk_prepare_enable() failed, error %d\n",
+ err);
+ goto out;
+ }
+ err = open_candev(ndev);
+ if (err) {
+ netdev_err(ndev, "open_candev() failed, error %d\n", err);
+ goto out_clock;
+ }
+ napi_enable(&priv->napi);
+ err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
+ if (err) {
+ netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
+ goto out_close;
+ }
+ can_led_event(ndev, CAN_LED_EVENT_OPEN);
+ rcar_can_start(ndev);
+ netif_start_queue(ndev);
+ return 0;
+out_close:
+ napi_disable(&priv->napi);
+ close_candev(ndev);
+out_clock:
+ clk_disable_unprepare(priv->clk);
+out:
+ return err;
+}
+
+static void rcar_can_stop(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int i;
+
+ /* Go to (force) reset mode */
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET;
+ writew(ctlr, &priv->regs->ctlr);
+ for (i = 0; i < MAX_STR_READS; i++) {
+ if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)
+ break;
+ }
+ writel(0, &priv->regs->mier0);
+ writel(0, &priv->regs->mier1);
+ writeb(0, &priv->regs->ier);
+ writeb(0, &priv->regs->eier);
+ /* Go to sleep mode */
+ ctlr |= RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_STOPPED;
+}
+
+static int rcar_can_close(struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ rcar_can_stop(ndev);
+ free_irq(ndev->irq, ndev);
+ napi_disable(&priv->napi);
+ clk_disable_unprepare(priv->clk);
+ close_candev(ndev);
+ can_led_event(ndev, CAN_LED_EVENT_STOP);
+ return 0;
+}
+
+static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ u32 data, i;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
+ data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE;
+ else /* Standard frame format */
+ data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT;
+
+ if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */
+ data |= RCAR_CAN_RTR;
+ } else {
+ for (i = 0; i < cf->can_dlc; i++)
+ writeb(cf->data[i],
+ &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]);
+ }
+
+ writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id);
+
+ writeb(cf->can_dlc, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc);
+
+ priv->tx_dlc[priv->tx_head % RCAR_CAN_FIFO_DEPTH] = cf->can_dlc;
+ can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH);
+ priv->tx_head++;
+ /* Start Tx: write 0xff to the TFPCR register to increment
+ * the CPU-side pointer for the transmit FIFO to the next
+ * mailbox location
+ */
+ writeb(0xff, &priv->regs->tfpcr);
+ /* Stop the queue if we've filled all FIFO entries */
+ if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH)
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops rcar_can_netdev_ops = {
+ .ndo_open = rcar_can_open,
+ .ndo_stop = rcar_can_close,
+ .ndo_start_xmit = rcar_can_start_xmit,
+};
+
+static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u32 data;
+ u8 dlc;
+
+ skb = alloc_can_skb(priv->ndev, &cf);
+ if (!skb) {
+ stats->rx_dropped++;
+ return;
+ }
+
+ data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id);
+ if (data & RCAR_CAN_IDE)
+ cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ else
+ cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK;
+
+ dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc);
+ cf->can_dlc = get_can_dlc(dlc);
+ if (data & RCAR_CAN_RTR) {
+ cf->can_id |= CAN_RTR_FLAG;
+ } else {
+ for (dlc = 0; dlc < cf->can_dlc; dlc++)
+ cf->data[dlc] =
+ readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]);
+ }
+
+ can_led_event(priv->ndev, CAN_LED_EVENT_RX);
+
+ stats->rx_bytes += cf->can_dlc;
+ stats->rx_packets++;
+ netif_receive_skb(skb);
+}
+
+static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
+{
+ struct rcar_can_priv *priv = container_of(napi,
+ struct rcar_can_priv, napi);
+ int num_pkts;
+
+ for (num_pkts = 0; num_pkts < quota; num_pkts++) {
+ u8 rfcr, isr;
+
+ isr = readb(&priv->regs->isr);
+ /* Clear interrupt bit */
+ if (isr & RCAR_CAN_ISR_RXFF)
+ writeb(isr & ~RCAR_CAN_ISR_RXFF, &priv->regs->isr);
+ rfcr = readb(&priv->regs->rfcr);
+ if (rfcr & RCAR_CAN_RFCR_RFEST)
+ break;
+ rcar_can_rx_pkt(priv);
+ /* Write 0xff to the RFPCR register to increment
+ * the CPU-side pointer for the receive FIFO
+ * to the next mailbox location
+ */
+ writeb(0xff, &priv->regs->rfpcr);
+ }
+ /* All packets processed */
+ if (num_pkts < quota) {
+ napi_complete(napi);
+ priv->ier |= RCAR_CAN_IER_RXFIE;
+ writeb(priv->ier, &priv->regs->ier);
+ }
+ return num_pkts;
+}
+
+static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ rcar_can_start(ndev);
+ netif_wake_queue(ndev);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int rcar_can_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct rcar_can_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+ bec->txerr = readb(&priv->regs->tecr);
+ bec->rxerr = readb(&priv->regs->recr);
+ clk_disable_unprepare(priv->clk);
+ return 0;
+}
+
+static int rcar_can_probe(struct platform_device *pdev)
+{
+ struct rcar_can_platform_data *pdata;
+ struct rcar_can_priv *priv;
+ struct net_device *ndev;
+ struct resource *mem;
+ void __iomem *addr;
+ int err = -ENODEV;
+ int irq;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data provided!\n");
+ goto fail;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "No IRQ resource\n");
+ goto fail;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ addr = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(addr)) {
+ err = PTR_ERR(addr);
+ goto fail;
+ }
+
+ ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH);
+ if (!ndev) {
+ dev_err(&pdev->dev, "alloc_candev() failed\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ priv = netdev_priv(ndev);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ err = PTR_ERR(priv->clk);
+ dev_err(&pdev->dev, "cannot get clock: %d\n", err);
+ goto fail_clk;
+ }
+
+ ndev->netdev_ops = &rcar_can_netdev_ops;
+ ndev->irq = irq;
+ ndev->flags |= IFF_ECHO;
+ priv->ndev = ndev;
+ priv->regs = addr;
+ priv->clock_select = pdata->clock_select;
+ priv->can.clock.freq = clk_get_rate(priv->clk);
+ priv->can.bittiming_const = &rcar_can_bittiming_const;
+ priv->can.do_set_mode = rcar_can_do_set_mode;
+ priv->can.do_get_berr_counter = rcar_can_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING;
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rcar_can_rx_poll,
+ RCAR_CAN_NAPI_WEIGHT);
+ err = register_candev(ndev);
+ if (err) {
+ dev_err(&pdev->dev, "register_candev() failed, error %d\n",
+ err);
+ goto fail_candev;
+ }
+
+ devm_can_led_init(ndev);
+
+ dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
+ priv->regs, ndev->irq);
+
+ return 0;
+fail_candev:
+ netif_napi_del(&priv->napi);
+fail_clk:
+ free_candev(ndev);
+fail:
+ return err;
+}
+
+static int rcar_can_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+
+ unregister_candev(ndev);
+ netif_napi_del(&priv->napi);
+ free_candev(ndev);
+ return 0;
+}
+
+static int __maybe_unused rcar_can_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ }
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_CANM_HALT;
+ writew(ctlr, &priv->regs->ctlr);
+ ctlr |= RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ clk_disable(priv->clk);
+ return 0;
+}
+
+static int __maybe_unused rcar_can_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct rcar_can_priv *priv = netdev_priv(ndev);
+ u16 ctlr;
+ int err;
+
+ err = clk_enable(priv->clk);
+ if (err) {
+ netdev_err(ndev, "clk_enable() failed, error %d\n", err);
+ return err;
+ }
+
+ ctlr = readw(&priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_SLPM;
+ writew(ctlr, &priv->regs->ctlr);
+ ctlr &= ~RCAR_CAN_CTLR_CANM;
+ writew(ctlr, &priv->regs->ctlr);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(ndev)) {
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume);
+
+static struct platform_driver rcar_can_driver = {
+ .driver = {
+ .name = RCAR_CAN_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = &rcar_can_pm_ops,
+ },
+ .probe = rcar_can_probe,
+ .remove = rcar_can_remove,
+};
+
+module_platform_driver(rcar_can_driver);
+
+MODULE_AUTHOR("Cogent Embedded, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CAN driver for Renesas R-Car SoC");
+MODULE_ALIAS("platform:" RCAR_CAN_DRV_NAME);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index bc235f9dc754..5df239e68812 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -951,7 +951,7 @@ static int mcp251x_open(struct net_device *net)
priv->tx_len = 0;
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- flags, DEVICE_NAME, priv);
+ flags | IRQF_ONESHOT, DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
mcp251x_power_enable(priv->transceiver, 0);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 0b918ebad76b..a77db919363c 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -13,6 +13,14 @@ config CAN_ESD_USB2
This driver supports the CAN-USB/2 interface
from esd electronic system design gmbh (http://www.esd.eu).
+config CAN_GS_USB
+ tristate "Geschwister Schneider UG interfaces"
+ ---help---
+ This driver supports the Geschwister Schneider USB/CAN devices.
+ If unsure choose N,
+ choose Y for built in support,
+ M to compile as module (module will be named: gs_usb).
+
config CAN_KVASER_USB
tristate "Kvaser CAN/USB interface"
---help---
diff --git a/drivers/net/can/usb/Makefile b/drivers/net/can/usb/Makefile
index becef460a91a..7b9a393b1ac8 100644
--- a/drivers/net/can/usb/Makefile
+++ b/drivers/net/can/usb/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
obj-$(CONFIG_CAN_ESD_USB2) += esd_usb2.o
+obj-$(CONFIG_CAN_GS_USB) += gs_usb.o
obj-$(CONFIG_CAN_KVASER_USB) += kvaser_usb.o
obj-$(CONFIG_CAN_PEAK_USB) += peak_usb/
obj-$(CONFIG_CAN_8DEV_USB) += usb_8dev.o
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
new file mode 100644
index 000000000000..c9f1f99682b1
--- /dev/null
+++ b/drivers/net/can/usb/gs_usb.c
@@ -0,0 +1,971 @@
+/* CAN driver for Geschwister Schneider USB/CAN devices.
+ *
+ * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ *
+ * Many thanks to all socketcan devs!
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/signal.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/usb.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+/* Device specific constants */
+#define USB_GSUSB_1_VENDOR_ID 0x1d50
+#define USB_GSUSB_1_PRODUCT_ID 0x606f
+
+#define GSUSB_ENDPOINT_IN 1
+#define GSUSB_ENDPOINT_OUT 2
+
+/* Device specific constants */
+enum gs_usb_breq {
+ GS_USB_BREQ_HOST_FORMAT = 0,
+ GS_USB_BREQ_BITTIMING,
+ GS_USB_BREQ_MODE,
+ GS_USB_BREQ_BERR,
+ GS_USB_BREQ_BT_CONST,
+ GS_USB_BREQ_DEVICE_CONFIG
+};
+
+enum gs_can_mode {
+ /* reset a channel. turns it off */
+ GS_CAN_MODE_RESET = 0,
+ /* starts a channel */
+ GS_CAN_MODE_START
+};
+
+enum gs_can_state {
+ GS_CAN_STATE_ERROR_ACTIVE = 0,
+ GS_CAN_STATE_ERROR_WARNING,
+ GS_CAN_STATE_ERROR_PASSIVE,
+ GS_CAN_STATE_BUS_OFF,
+ GS_CAN_STATE_STOPPED,
+ GS_CAN_STATE_SLEEPING
+};
+
+/* data types passed between host and device */
+struct gs_host_config {
+ u32 byte_order;
+} __packed;
+/* All data exchanged between host and device is exchanged in host byte order,
+ * thanks to the struct gs_host_config byte_order member, which is sent first
+ * to indicate the desired byte order.
+ */
+
+struct gs_device_config {
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ u8 icount;
+ u32 sw_version;
+ u32 hw_version;
+} __packed;
+
+#define GS_CAN_MODE_NORMAL 0
+#define GS_CAN_MODE_LISTEN_ONLY (1<<0)
+#define GS_CAN_MODE_LOOP_BACK (1<<1)
+#define GS_CAN_MODE_TRIPLE_SAMPLE (1<<2)
+#define GS_CAN_MODE_ONE_SHOT (1<<3)
+
+struct gs_device_mode {
+ u32 mode;
+ u32 flags;
+} __packed;
+
+struct gs_device_state {
+ u32 state;
+ u32 rxerr;
+ u32 txerr;
+} __packed;
+
+struct gs_device_bittiming {
+ u32 prop_seg;
+ u32 phase_seg1;
+ u32 phase_seg2;
+ u32 sjw;
+ u32 brp;
+} __packed;
+
+#define GS_CAN_FEATURE_LISTEN_ONLY (1<<0)
+#define GS_CAN_FEATURE_LOOP_BACK (1<<1)
+#define GS_CAN_FEATURE_TRIPLE_SAMPLE (1<<2)
+#define GS_CAN_FEATURE_ONE_SHOT (1<<3)
+
+struct gs_device_bt_const {
+ u32 feature;
+ u32 fclk_can;
+ u32 tseg1_min;
+ u32 tseg1_max;
+ u32 tseg2_min;
+ u32 tseg2_max;
+ u32 sjw_max;
+ u32 brp_min;
+ u32 brp_max;
+ u32 brp_inc;
+} __packed;
+
+#define GS_CAN_FLAG_OVERFLOW 1
+
+struct gs_host_frame {
+ u32 echo_id;
+ u32 can_id;
+
+ u8 can_dlc;
+ u8 channel;
+ u8 flags;
+ u8 reserved;
+
+ u8 data[8];
+} __packed;
+/* The GS USB devices make use of the same flags and masks as in
+ * linux/can.h and linux/can/error.h, and no additional mapping is necessary.
+ */
+
+/* Only send a max of GS_MAX_TX_URBS frames per channel at a time. */
+#define GS_MAX_TX_URBS 10
+/* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */
+#define GS_MAX_RX_URBS 30
+/* Maximum number of interfaces the driver supports per device.
+ * Current hardware only supports 2 interfaces. The future may vary.
+ */
+#define GS_MAX_INTF 2
+
+struct gs_tx_context {
+ struct gs_can *dev;
+ unsigned int echo_id;
+};
+
+struct gs_can {
+ struct can_priv can; /* must be the first member */
+
+ struct gs_usb *parent;
+
+ struct net_device *netdev;
+ struct usb_device *udev;
+ struct usb_interface *iface;
+
+ struct can_bittiming_const bt_const;
+ unsigned int channel; /* channel number */
+
+ /* This lock prevents a race condition between xmit and recieve. */
+ spinlock_t tx_ctx_lock;
+ struct gs_tx_context tx_context[GS_MAX_TX_URBS];
+
+ struct usb_anchor tx_submitted;
+ atomic_t active_tx_urbs;
+};
+
+/* usb interface struct */
+struct gs_usb {
+ struct gs_can *canch[GS_MAX_INTF];
+ struct usb_anchor rx_submitted;
+ atomic_t active_channels;
+ struct usb_device *udev;
+};
+
+/* 'allocate' a tx context.
+ * returns a valid tx context or NULL if there is no space.
+ */
+static struct gs_tx_context *gs_alloc_tx_context(struct gs_can *dev)
+{
+ int i = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->tx_ctx_lock, flags);
+
+ for (; i < GS_MAX_TX_URBS; i++) {
+ if (dev->tx_context[i].echo_id == GS_MAX_TX_URBS) {
+ dev->tx_context[i].echo_id = i;
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ return &dev->tx_context[i];
+ }
+ }
+
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ return NULL;
+}
+
+/* releases a tx context
+ */
+static void gs_free_tx_context(struct gs_tx_context *txc)
+{
+ txc->echo_id = GS_MAX_TX_URBS;
+}
+
+/* Get a tx context by id.
+ */
+static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id)
+{
+ unsigned long flags;
+
+ if (id < GS_MAX_TX_URBS) {
+ spin_lock_irqsave(&dev->tx_ctx_lock, flags);
+ if (dev->tx_context[id].echo_id == id) {
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ return &dev->tx_context[id];
+ }
+ spin_unlock_irqrestore(&dev->tx_ctx_lock, flags);
+ }
+ return NULL;
+}
+
+static int gs_cmd_reset(struct gs_usb *gsusb, struct gs_can *gsdev)
+{
+ struct gs_device_mode *dm;
+ struct usb_interface *intf = gsdev->iface;
+ int rc;
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return -ENOMEM;
+
+ dm->mode = GS_CAN_MODE_RESET;
+
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ gsdev->channel,
+ 0,
+ dm,
+ sizeof(*dm),
+ 1000);
+
+ return rc;
+}
+
+static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
+{
+ struct can_device_stats *can_stats = &dev->can.can_stats;
+
+ if (cf->can_id & CAN_ERR_RESTARTED) {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ can_stats->restarts++;
+ } else if (cf->can_id & CAN_ERR_BUSOFF) {
+ dev->can.state = CAN_STATE_BUS_OFF;
+ can_stats->bus_off++;
+ } else if (cf->can_id & CAN_ERR_CRTL) {
+ if ((cf->data[1] & CAN_ERR_CRTL_TX_WARNING) ||
+ (cf->data[1] & CAN_ERR_CRTL_RX_WARNING)) {
+ dev->can.state = CAN_STATE_ERROR_WARNING;
+ can_stats->error_warning++;
+ } else if ((cf->data[1] & CAN_ERR_CRTL_TX_PASSIVE) ||
+ (cf->data[1] & CAN_ERR_CRTL_RX_PASSIVE)) {
+ dev->can.state = CAN_STATE_ERROR_PASSIVE;
+ can_stats->error_passive++;
+ } else {
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+ }
+}
+
+static void gs_usb_recieve_bulk_callback(struct urb *urb)
+{
+ struct gs_usb *usbcan = urb->context;
+ struct gs_can *dev;
+ struct net_device *netdev;
+ int rc;
+ struct net_device_stats *stats;
+ struct gs_host_frame *hf = urb->transfer_buffer;
+ struct gs_tx_context *txc;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+
+ BUG_ON(!usbcan);
+
+ switch (urb->status) {
+ case 0: /* success */
+ break;
+ case -ENOENT:
+ case -ESHUTDOWN:
+ return;
+ default:
+ /* do not resubmit aborted urbs. eg: when device goes down */
+ return;
+ }
+
+ /* device reports out of range channel id */
+ if (hf->channel >= GS_MAX_INTF)
+ goto resubmit_urb;
+
+ dev = usbcan->canch[hf->channel];
+
+ netdev = dev->netdev;
+ stats = &netdev->stats;
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (hf->echo_id == -1) { /* normal rx */
+ skb = alloc_can_skb(dev->netdev, &cf);
+ if (!skb)
+ return;
+
+ cf->can_id = hf->can_id;
+
+ cf->can_dlc = get_can_dlc(hf->can_dlc);
+ memcpy(cf->data, hf->data, 8);
+
+ /* ERROR frames tell us information about the controller */
+ if (hf->can_id & CAN_ERR_FLAG)
+ gs_update_state(dev, cf);
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += hf->can_dlc;
+
+ netif_rx(skb);
+ } else { /* echo_id == hf->echo_id */
+ if (hf->echo_id >= GS_MAX_TX_URBS) {
+ netdev_err(netdev,
+ "Unexpected out of range echo id %d\n",
+ hf->echo_id);
+ goto resubmit_urb;
+ }
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += hf->can_dlc;
+
+ txc = gs_get_tx_context(dev, hf->echo_id);
+
+ /* bad devices send bad echo_ids. */
+ if (!txc) {
+ netdev_err(netdev,
+ "Unexpected unused echo id %d\n",
+ hf->echo_id);
+ goto resubmit_urb;
+ }
+
+ can_get_echo_skb(netdev, hf->echo_id);
+
+ gs_free_tx_context(txc);
+
+ netif_wake_queue(netdev);
+ }
+
+ if (hf->flags & GS_CAN_FLAG_OVERFLOW) {
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb)
+ goto resubmit_urb;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->can_dlc = CAN_ERR_DLC;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ netif_rx(skb);
+ }
+
+ resubmit_urb:
+ usb_fill_bulk_urb(urb,
+ usbcan->udev,
+ usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
+ hf,
+ sizeof(struct gs_host_frame),
+ gs_usb_recieve_bulk_callback,
+ usbcan
+ );
+
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+
+ /* USB failure take down all interfaces */
+ if (rc == -ENODEV) {
+ for (rc = 0; rc < GS_MAX_INTF; rc++) {
+ if (usbcan->canch[rc])
+ netif_device_detach(usbcan->canch[rc]->netdev);
+ }
+ }
+}
+
+static int gs_usb_set_bittiming(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.bittiming;
+ struct usb_interface *intf = dev->iface;
+ int rc;
+ struct gs_device_bittiming *dbt;
+
+ dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
+ if (!dbt)
+ return -ENOMEM;
+
+ dbt->prop_seg = bt->prop_seg;
+ dbt->phase_seg1 = bt->phase_seg1;
+ dbt->phase_seg2 = bt->phase_seg2;
+ dbt->sjw = bt->sjw;
+ dbt->brp = bt->brp;
+
+ /* request bit timings */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_BITTIMING,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ dev->channel,
+ 0,
+ dbt,
+ sizeof(*dbt),
+ 1000);
+
+ kfree(dbt);
+
+ if (rc < 0)
+ dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
+ rc);
+
+ return rc;
+}
+
+static void gs_usb_xmit_callback(struct urb *urb)
+{
+ struct gs_tx_context *txc = urb->context;
+ struct gs_can *dev = txc->dev;
+ struct net_device *netdev = dev->netdev;
+
+ if (urb->status)
+ netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id);
+
+ usb_free_coherent(urb->dev,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer,
+ urb->transfer_dma);
+
+ atomic_dec(&dev->active_tx_urbs);
+
+ if (!netif_device_present(netdev))
+ return;
+
+ if (netif_queue_stopped(netdev))
+ netif_wake_queue(netdev);
+}
+
+static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct net_device_stats *stats = &dev->netdev->stats;
+ struct urb *urb;
+ struct gs_host_frame *hf;
+ struct can_frame *cf;
+ int rc;
+ unsigned int idx;
+ struct gs_tx_context *txc;
+
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
+ /* find an empty context to keep track of transmission */
+ txc = gs_alloc_tx_context(dev);
+ if (!txc)
+ return NETDEV_TX_BUSY;
+
+ /* create a URB, and a buffer for it */
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(netdev, "No memory left for URB\n");
+ goto nomem_urb;
+ }
+
+ hf = usb_alloc_coherent(dev->udev, sizeof(*hf), GFP_ATOMIC,
+ &urb->transfer_dma);
+ if (!hf) {
+ netdev_err(netdev, "No memory left for USB buffer\n");
+ goto nomem_hf;
+ }
+
+ idx = txc->echo_id;
+
+ if (idx >= GS_MAX_TX_URBS) {
+ netdev_err(netdev, "Invalid tx context %d\n", idx);
+ goto badidx;
+ }
+
+ hf->echo_id = idx;
+ hf->channel = dev->channel;
+
+ cf = (struct can_frame *)skb->data;
+
+ hf->can_id = cf->can_id;
+ hf->can_dlc = cf->can_dlc;
+ memcpy(hf->data, cf->data, cf->can_dlc);
+
+ usb_fill_bulk_urb(urb, dev->udev,
+ usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
+ hf,
+ sizeof(*hf),
+ gs_usb_xmit_callback,
+ txc);
+
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ usb_anchor_urb(urb, &dev->tx_submitted);
+
+ can_put_echo_skb(skb, netdev, idx);
+
+ atomic_inc(&dev->active_tx_urbs);
+
+ rc = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(rc)) { /* usb send failed */
+ atomic_dec(&dev->active_tx_urbs);
+
+ can_free_echo_skb(netdev, idx);
+ gs_free_tx_context(txc);
+
+ usb_unanchor_urb(urb);
+ usb_free_coherent(dev->udev,
+ sizeof(*hf),
+ hf,
+ urb->transfer_dma);
+
+
+ if (rc == -ENODEV) {
+ netif_device_detach(netdev);
+ } else {
+ netdev_err(netdev, "usb_submit failed (err=%d)\n", rc);
+ stats->tx_dropped++;
+ }
+ } else {
+ /* Slow down tx path */
+ if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS)
+ netif_stop_queue(netdev);
+ }
+
+ /* let usb core take care of this urb */
+ usb_free_urb(urb);
+
+ return NETDEV_TX_OK;
+
+ badidx:
+ usb_free_coherent(dev->udev,
+ sizeof(*hf),
+ hf,
+ urb->transfer_dma);
+ nomem_hf:
+ usb_free_urb(urb);
+
+ nomem_urb:
+ gs_free_tx_context(txc);
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static int gs_can_open(struct net_device *netdev)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_usb *parent = dev->parent;
+ int rc, i;
+ struct gs_device_mode *dm;
+ u32 ctrlmode;
+
+ rc = open_candev(netdev);
+ if (rc)
+ return rc;
+
+ if (atomic_add_return(1, &parent->active_channels) == 1) {
+ for (i = 0; i < GS_MAX_RX_URBS; i++) {
+ struct urb *urb;
+ u8 *buf;
+
+ /* alloc rx urb */
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ netdev_err(netdev,
+ "No memory left for URB\n");
+ return -ENOMEM;
+ }
+
+ /* alloc rx buffer */
+ buf = usb_alloc_coherent(dev->udev,
+ sizeof(struct gs_host_frame),
+ GFP_KERNEL,
+ &urb->transfer_dma);
+ if (!buf) {
+ netdev_err(netdev,
+ "No memory left for USB buffer\n");
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+
+ /* fill, anchor, and submit rx urb */
+ usb_fill_bulk_urb(urb,
+ dev->udev,
+ usb_rcvbulkpipe(dev->udev,
+ GSUSB_ENDPOINT_IN),
+ buf,
+ sizeof(struct gs_host_frame),
+ gs_usb_recieve_bulk_callback,
+ parent);
+ urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+
+ usb_anchor_urb(urb, &parent->rx_submitted);
+
+ rc = usb_submit_urb(urb, GFP_KERNEL);
+ if (rc) {
+ if (rc == -ENODEV)
+ netif_device_detach(dev->netdev);
+
+ netdev_err(netdev,
+ "usb_submit failed (err=%d)\n",
+ rc);
+
+ usb_unanchor_urb(urb);
+ break;
+ }
+
+ /* Drop reference,
+ * USB core will take care of freeing it
+ */
+ usb_free_urb(urb);
+ }
+ }
+
+ dm = kmalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return -ENOMEM;
+
+ /* flags */
+ ctrlmode = dev->can.ctrlmode;
+ dm->flags = 0;
+
+ if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
+ dm->flags |= GS_CAN_MODE_LOOP_BACK;
+ else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
+
+ /* Controller is not allowed to retry TX
+ * this mode is unavailable on atmels uc3c hardware
+ */
+ if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ dm->flags |= GS_CAN_MODE_ONE_SHOT;
+
+ if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+
+ /* finally start device */
+ dm->mode = GS_CAN_MODE_START;
+ rc = usb_control_msg(interface_to_usbdev(dev->iface),
+ usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ dev->channel,
+ 0,
+ dm,
+ sizeof(*dm),
+ 1000);
+
+ if (rc < 0) {
+ netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
+ kfree(dm);
+ return rc;
+ }
+
+ kfree(dm);
+
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static int gs_can_close(struct net_device *netdev)
+{
+ int rc;
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_usb *parent = dev->parent;
+
+ netif_stop_queue(netdev);
+
+ /* Stop polling */
+ if (atomic_dec_and_test(&parent->active_channels))
+ usb_kill_anchored_urbs(&parent->rx_submitted);
+
+ /* Stop sending URBs */
+ usb_kill_anchored_urbs(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+
+ /* reset the device */
+ rc = gs_cmd_reset(parent, dev);
+ if (rc < 0)
+ netdev_warn(netdev, "Couldn't shutdown device (err=%d)", rc);
+
+ /* reset tx contexts */
+ for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
+ dev->tx_context[rc].dev = dev;
+ dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
+ }
+
+ /* close the netdev */
+ close_candev(netdev);
+
+ return 0;
+}
+
+static const struct net_device_ops gs_usb_netdev_ops = {
+ .ndo_open = gs_can_open,
+ .ndo_stop = gs_can_close,
+ .ndo_start_xmit = gs_can_start_xmit,
+};
+
+static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
+{
+ struct gs_can *dev;
+ struct net_device *netdev;
+ int rc;
+ struct gs_device_bt_const *bt_const;
+
+ bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
+ if (!bt_const)
+ return ERR_PTR(-ENOMEM);
+
+ /* fetch bit timing constants */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_BT_CONST,
+ USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ channel,
+ 0,
+ bt_const,
+ sizeof(*bt_const),
+ 1000);
+
+ if (rc < 0) {
+ dev_err(&intf->dev,
+ "Couldn't get bit timing const for channel (err=%d)\n",
+ rc);
+ kfree(bt_const);
+ return ERR_PTR(rc);
+ }
+
+ /* create netdev */
+ netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
+ if (!netdev) {
+ dev_err(&intf->dev, "Couldn't allocate candev\n");
+ kfree(bt_const);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dev = netdev_priv(netdev);
+
+ netdev->netdev_ops = &gs_usb_netdev_ops;
+
+ netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
+
+ /* dev settup */
+ strcpy(dev->bt_const.name, "gs_usb");
+ dev->bt_const.tseg1_min = bt_const->tseg1_min;
+ dev->bt_const.tseg1_max = bt_const->tseg1_max;
+ dev->bt_const.tseg2_min = bt_const->tseg2_min;
+ dev->bt_const.tseg2_max = bt_const->tseg2_max;
+ dev->bt_const.sjw_max = bt_const->sjw_max;
+ dev->bt_const.brp_min = bt_const->brp_min;
+ dev->bt_const.brp_max = bt_const->brp_max;
+ dev->bt_const.brp_inc = bt_const->brp_inc;
+
+ dev->udev = interface_to_usbdev(intf);
+ dev->iface = intf;
+ dev->netdev = netdev;
+ dev->channel = channel;
+
+ init_usb_anchor(&dev->tx_submitted);
+ atomic_set(&dev->active_tx_urbs, 0);
+ spin_lock_init(&dev->tx_ctx_lock);
+ for (rc = 0; rc < GS_MAX_TX_URBS; rc++) {
+ dev->tx_context[rc].dev = dev;
+ dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
+ }
+
+ /* can settup */
+ dev->can.state = CAN_STATE_STOPPED;
+ dev->can.clock.freq = bt_const->fclk_can;
+ dev->can.bittiming_const = &dev->bt_const;
+ dev->can.do_set_bittiming = gs_usb_set_bittiming;
+
+ dev->can.ctrlmode_supported = 0;
+
+ if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
+
+ if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
+
+ if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+
+ if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
+ dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
+
+ kfree(bt_const);
+
+ SET_NETDEV_DEV(netdev, &intf->dev);
+
+ rc = register_candev(dev->netdev);
+ if (rc) {
+ free_candev(dev->netdev);
+ dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
+ return ERR_PTR(rc);
+ }
+
+ return dev;
+}
+
+static void gs_destroy_candev(struct gs_can *dev)
+{
+ unregister_candev(dev->netdev);
+ free_candev(dev->netdev);
+ kfree(dev);
+ usb_kill_anchored_urbs(&dev->tx_submitted);
+}
+
+static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct gs_usb *dev;
+ int rc = -ENOMEM;
+ unsigned int icount, i;
+ struct gs_host_config *hconf;
+ struct gs_device_config *dconf;
+
+ hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
+ if (!hconf)
+ return -ENOMEM;
+
+ hconf->byte_order = 0x0000beef;
+
+ /* send host config */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_sndctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_HOST_FORMAT,
+ USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 1,
+ intf->altsetting[0].desc.bInterfaceNumber,
+ hconf,
+ sizeof(*hconf),
+ 1000);
+
+ kfree(hconf);
+
+ if (rc < 0) {
+ dev_err(&intf->dev, "Couldn't send data format (err=%d)\n",
+ rc);
+ return rc;
+ }
+
+ dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
+ if (!dconf)
+ return -ENOMEM;
+
+ /* read device config */
+ rc = usb_control_msg(interface_to_usbdev(intf),
+ usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
+ GS_USB_BREQ_DEVICE_CONFIG,
+ USB_DIR_IN|USB_TYPE_VENDOR|USB_RECIP_INTERFACE,
+ 1,
+ intf->altsetting[0].desc.bInterfaceNumber,
+ dconf,
+ sizeof(*dconf),
+ 1000);
+ if (rc < 0) {
+ dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
+ rc);
+
+ kfree(dconf);
+
+ return rc;
+ }
+
+ icount = dconf->icount+1;
+
+ kfree(dconf);
+
+ dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
+
+ if (icount > GS_MAX_INTF) {
+ dev_err(&intf->dev,
+ "Driver cannot handle more that %d CAN interfaces\n",
+ GS_MAX_INTF);
+ return -EINVAL;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ init_usb_anchor(&dev->rx_submitted);
+
+ atomic_set(&dev->active_channels, 0);
+
+ usb_set_intfdata(intf, dev);
+ dev->udev = interface_to_usbdev(intf);
+
+ for (i = 0; i < icount; i++) {
+ dev->canch[i] = gs_make_candev(i, intf);
+ if (IS_ERR_OR_NULL(dev->canch[i])) {
+ /* on failure destroy previously created candevs */
+ icount = i;
+ for (i = 0; i < icount; i++) {
+ gs_destroy_candev(dev->canch[i]);
+ dev->canch[i] = NULL;
+ }
+ kfree(dev);
+ return rc;
+ }
+ dev->canch[i]->parent = dev;
+ }
+
+ return 0;
+}
+
+static void gs_usb_disconnect(struct usb_interface *intf)
+{
+ unsigned i;
+ struct gs_usb *dev = usb_get_intfdata(intf);
+ usb_set_intfdata(intf, NULL);
+
+ if (!dev) {
+ dev_err(&intf->dev, "Disconnect (nodata)\n");
+ return;
+ }
+
+ for (i = 0; i < GS_MAX_INTF; i++) {
+ struct gs_can *can = dev->canch[i];
+
+ if (!can)
+ continue;
+
+ gs_destroy_candev(can);
+ }
+
+ usb_kill_anchored_urbs(&dev->rx_submitted);
+}
+
+static const struct usb_device_id gs_usb_table[] = {
+ {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, gs_usb_table);
+
+static struct usb_driver gs_usb_driver = {
+ .name = "gs_usb",
+ .probe = gs_usb_probe,
+ .disconnect = gs_usb_disconnect,
+ .id_table = gs_usb_table,
+};
+
+module_usb_driver(gs_usb_driver);
+
+MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
+MODULE_DESCRIPTION(
+"Socket CAN device driver for Geschwister Schneider Technologie-, "
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index e35c8e0202ad..f23ef321606c 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_AIC_LARGE_PKT_DIFF 3
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ];
@@ -50,6 +52,33 @@ struct enic_msix_entry {
void *devid;
};
+/* Store only the lower range. Higher range is given by fw. */
+struct enic_intr_mod_range {
+ u32 small_pkt_range_start;
+ u32 large_pkt_range_start;
+};
+
+struct enic_intr_mod_table {
+ u32 rx_rate;
+ u32 range_percent;
+};
+
+#define ENIC_MAX_LINK_SPEEDS 3
+#define ENIC_LINK_SPEED_10G 10000
+#define ENIC_LINK_SPEED_4G 4000
+#define ENIC_LINK_40G_INDEX 2
+#define ENIC_LINK_10G_INDEX 1
+#define ENIC_LINK_4G_INDEX 0
+#define ENIC_RX_COALESCE_RANGE_END 125
+#define ENIC_AIC_TS_BREAK 100
+
+struct enic_rx_coal {
+ u32 small_pkt_range_start;
+ u32 large_pkt_range_start;
+ u32 range_end;
+ u32 use_adaptive_rx_coalesce;
+};
+
/* priv_flags */
#define ENIC_SRIOV_ENABLED (1 << 0)
@@ -92,6 +121,7 @@ struct enic {
unsigned int mc_count;
unsigned int uc_count;
u32 port_mtu;
+ struct enic_rx_coal rx_coalesce_setting;
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 58a8c67638e3..1882db230e13 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -79,6 +79,17 @@ static const struct enic_stat enic_rx_stats[] = {
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
+{
+ int i;
+ int intr;
+
+ for (i = 0; i < enic->rq_count; i++) {
+ intr = enic_msix_rq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ }
+}
+
static int enic_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -178,9 +189,14 @@ static int enic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ecmd)
{
struct enic *enic = netdev_priv(netdev);
+ struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+ if (rxcoal->use_adaptive_rx_coalesce)
+ ecmd->use_adaptive_rx_coalesce = 1;
+ ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
+ ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
return 0;
}
@@ -191,17 +207,31 @@ static int enic_set_coalesce(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
+ u32 rx_coalesce_usecs_low;
+ u32 rx_coalesce_usecs_high;
+ u32 coalesce_usecs_max;
unsigned int i, intr;
+ struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
+ coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ coalesce_usecs_max);
rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ coalesce_usecs_max);
+
+ rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
+ coalesce_usecs_max);
+ rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
+ coalesce_usecs_max);
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
if (tx_coalesce_usecs != rx_coalesce_usecs)
return -EINVAL;
+ if (ecmd->use_adaptive_rx_coalesce ||
+ ecmd->rx_coalesce_usecs_low ||
+ ecmd->rx_coalesce_usecs_high)
+ return -EOPNOTSUPP;
intr = enic_legacy_io_intr();
vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -210,6 +240,10 @@ static int enic_set_coalesce(struct net_device *netdev,
case VNIC_DEV_INTR_MODE_MSI:
if (tx_coalesce_usecs != rx_coalesce_usecs)
return -EINVAL;
+ if (ecmd->use_adaptive_rx_coalesce ||
+ ecmd->rx_coalesce_usecs_low ||
+ ecmd->rx_coalesce_usecs_high)
+ return -EOPNOTSUPP;
vnic_intr_coalescing_timer_set(&enic->intr[0],
tx_coalesce_usecs);
@@ -221,12 +255,27 @@ static int enic_set_coalesce(struct net_device *netdev,
tx_coalesce_usecs);
}
- for (i = 0; i < enic->rq_count; i++) {
- intr = enic_msix_rq_intr(enic, i);
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- rx_coalesce_usecs);
+ if (rxcoal->use_adaptive_rx_coalesce) {
+ if (!ecmd->use_adaptive_rx_coalesce) {
+ rxcoal->use_adaptive_rx_coalesce = 0;
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+ }
+ } else {
+ if (ecmd->use_adaptive_rx_coalesce)
+ rxcoal->use_adaptive_rx_coalesce = 1;
+ else
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
}
+ if (ecmd->rx_coalesce_usecs_high) {
+ if (rx_coalesce_usecs_high <
+ (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+ return -EINVAL;
+ rxcoal->range_end = rx_coalesce_usecs_high;
+ rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+ rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+ ENIC_AIC_LARGE_PKT_DIFF;
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 2945718ce806..0d8995cc92ed 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -38,6 +38,7 @@
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
#include <net/ip6_checksum.h>
+#include <linux/ktime.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -72,6 +73,35 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+#define ENIC_MAX_COALESCE_TIMERS 10
+/* Interrupt moderation table, which will be used to decide the
+ * coalescing timer values
+ * {rx_rate in Mbps, mapping percentage of the range}
+ */
+struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
+ {4000, 0},
+ {4400, 10},
+ {5060, 20},
+ {5230, 30},
+ {5540, 40},
+ {5820, 50},
+ {6120, 60},
+ {6435, 70},
+ {6745, 80},
+ {7000, 90},
+ {0xFFFFFFFF, 100}
+};
+
+/* This table helps the driver to pick different ranges for rx coalescing
+ * timer depending on the link speed.
+ */
+struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
+ {0, 0}, /* 0 - 4 Gbps */
+ {0, 3}, /* 4 - 10 Gbps */
+ {3, 6}, /* 10 - 40 Gbps */
+};
+
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -979,6 +1009,15 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque)
@@ -986,6 +1025,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct enic *enic = vnic_dev_priv(rq->vdev);
struct net_device *netdev = enic->netdev;
struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1056,6 +1096,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
napi_gro_receive(&enic->napi[q_number], skb);
else
netif_receive_skb(skb);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
} else {
/* Buffer overflow
@@ -1134,6 +1177,64 @@ static int enic_poll(struct napi_struct *napi, int budget)
return rq_work_done;
}
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ u32 timer = cq->tobe_rx_coal_timeval;
+
+ if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+ }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+ int index;
+ u32 timer;
+ u32 range_start;
+ u32 traffic;
+ u64 delta;
+ ktime_t now = ktime_get();
+
+ delta = ktime_us_delta(now, cq->prev_ts);
+ if (delta < ENIC_AIC_TS_BREAK)
+ return;
+ cq->prev_ts = now;
+
+ traffic = pkt_size_counter->large_pkt_bytes_cnt +
+ pkt_size_counter->small_pkt_bytes_cnt;
+ /* The table takes Mbps
+ * traffic *= 8 => bits
+ * traffic *= (10^6 / delta) => bps
+ * traffic /= 10^6 => Mbps
+ *
+ * Combining, traffic *= (8 / delta)
+ */
+
+ traffic <<= 3;
+ traffic /= delta;
+
+ for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+ if (traffic < mod_table[index].rx_rate)
+ break;
+ range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+ pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+ rx_coal->small_pkt_range_start :
+ rx_coal->large_pkt_range_start;
+ timer = range_start + ((rx_coal->range_end - range_start) *
+ mod_table[index].range_percent / 100);
+ /* Damping */
+ cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+ pkt_size_counter->large_pkt_bytes_cnt = 0;
+ pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
static int enic_poll_msix(struct napi_struct *napi, int budget)
{
struct net_device *netdev = napi->dev;
@@ -1171,6 +1272,13 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
if (err)
work_done = work_to_do;
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ /* Call the function which refreshes
+ * the intr coalescing timer value based on
+ * the traffic. This is supported only in
+ * the case of MSI-x mode
+ */
+ enic_calc_int_moderation(enic, &enic->rq[rq]);
if (work_done < work_to_do) {
@@ -1179,6 +1287,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
*/
napi_complete(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
}
@@ -1314,6 +1424,42 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* If intr mode is not MSIX, do not do adaptive coalescing */
+ if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
+ netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
+ return;
+ }
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (ENIC_LINK_SPEED_10G < speed)
+ index = ENIC_LINK_40G_INDEX;
+ else if (ENIC_LINK_SPEED_4G < speed)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -2231,6 +2377,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
enic->notify_timer.function = enic_notify_timer;
enic->notify_timer.data = (unsigned long)enic;
+ enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
@@ -2250,6 +2397,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+ /* rx coalesce time already got initialized. This gets used
+ * if adaptive coal is turned off
+ */
enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 579315cbe803..4e6aa65857f7 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -50,6 +50,11 @@ struct vnic_cq_ctrl {
u32 pad10;
};
+struct vnic_rx_bytes_counter {
+ unsigned int small_pkt_bytes_cnt;
+ unsigned int large_pkt_bytes_cnt;
+};
+
struct vnic_cq {
unsigned int index;
struct vnic_dev *vdev;
@@ -58,6 +63,10 @@ struct vnic_cq {
unsigned int to_clean;
unsigned int last_color;
unsigned int interrupt_offset;
+ struct vnic_rx_bytes_counter pkt_size_counter;
+ unsigned int cur_rx_coal_timeval;
+ unsigned int tobe_rx_coal_timeval;
+ ktime_t prev_ts;
};
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index d107bcbb8543..7a0deadd53bf 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2122,7 +2122,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
{
fifo->interrupt_count++;
- if (jiffies > fifo->jiffies + HZ / 100) {
+ if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
struct __vxge_hw_fifo *hw_fifo = fifo->handle;
fifo->jiffies = jiffies;
@@ -2150,7 +2150,7 @@ static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
{
ring->interrupt_count++;
- if (jiffies > ring->jiffies + HZ / 100) {
+ if (time_before(ring->jiffies + HZ / 100, jiffies)) {
struct __vxge_hw_ring *hw_ring = ring->handle;
ring->jiffies = jiffies;
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index b8d22173925d..27d83207d24c 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -26,6 +26,7 @@
#include <linux/timer.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
+#include <linux/device.h>
#include <linux/spinlock.h>
#include <net/mac802154.h>
#include <net/wpan-phy.h>
@@ -228,7 +229,8 @@ static int fakelb_probe(struct platform_device *pdev)
int err = -ENOMEM;
int i;
- priv = kzalloc(sizeof(struct fakelb_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
+ GFP_KERNEL);
if (!priv)
goto err_alloc;
@@ -248,7 +250,6 @@ static int fakelb_probe(struct platform_device *pdev)
err_slave:
list_for_each_entry(dp, &priv->list, list)
fakelb_del(dp);
- kfree(priv);
err_alloc:
return err;
}
@@ -260,7 +261,6 @@ static int fakelb_remove(struct platform_device *pdev)
list_for_each_entry_safe(dp, temp, &priv->list, list)
fakelb_del(dp);
- kfree(priv);
return 0;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ee328ba101e7..98bad1fb1bfb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -498,12 +498,12 @@ static void tun_detach_all(struct net_device *dev)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
- wake_up_all(&tfile->wq.wait);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
--tun->numqueues;
}
list_for_each_entry(tfile, &tun->disabled, next) {
- wake_up_all(&tfile->wq.wait);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
}
BUG_ON(tun->numqueues != 0);
@@ -807,8 +807,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
- POLLRDNORM | POLLRDBAND);
+ tfile->socket.sk->sk_data_ready(tfile->socket.sk);
rcu_read_unlock();
return NETDEV_TX_OK;
@@ -965,7 +964,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
- poll_wait(file, &tfile->wq.wait, wait);
+ poll_wait(file, sk_sleep(sk), wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
@@ -1330,47 +1329,26 @@ done:
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
const struct iovec *iv, ssize_t len, int noblock)
{
- DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
ssize_t ret = 0;
+ int peeked, err, off = 0;
tun_debug(KERN_INFO, tun, "tun_do_read\n");
- if (unlikely(!noblock))
- add_wait_queue(&tfile->wq.wait, &wait);
- while (len) {
- if (unlikely(!noblock))
- current->state = TASK_INTERRUPTIBLE;
+ if (!len)
+ return ret;
- /* Read frames from the queue */
- if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
- if (noblock) {
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- if (tun->dev->reg_state != NETREG_REGISTERED) {
- ret = -EIO;
- break;
- }
-
- /* Nothing to read, let's sleep */
- schedule();
- continue;
- }
+ if (tun->dev->reg_state != NETREG_REGISTERED)
+ return -EIO;
+ /* Read frames from queue */
+ skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
+ &peeked, &off, &err);
+ if (skb) {
ret = tun_put_user(tun, tfile, skb, iv, len);
kfree_skb(skb);
- break;
- }
-
- if (unlikely(!noblock)) {
- current->state = TASK_RUNNING;
- remove_wait_queue(&tfile->wq.wait, &wait);
- }
+ } else
+ ret = err;
return ret;
}
@@ -2199,8 +2177,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->flags = 0;
tfile->ifindex = 0;
- rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
init_waitqueue_head(&tfile->wq.wait);
+ RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 2d0caf1eea25..ad2a386a6e92 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -138,7 +138,7 @@ static int cdc_ncm_get_coalesce(struct net_device *netdev,
ec->tx_max_coalesced_frames = ctx->tx_max / ctx->max_datagram_size;
/* the timer will fire CDC_NCM_TIMER_PENDING_CNT times in a row */
- ec->tx_coalesce_usecs = (ctx->timer_interval * CDC_NCM_TIMER_PENDING_CNT) / NSEC_PER_USEC;
+ ec->tx_coalesce_usecs = ctx->timer_interval / (NSEC_PER_USEC / CDC_NCM_TIMER_PENDING_CNT);
return 0;
}
@@ -164,7 +164,7 @@ static int cdc_ncm_set_coalesce(struct net_device *netdev,
return -EINVAL;
spin_lock_bh(&ctx->mtx);
- ctx->timer_interval = ec->tx_coalesce_usecs * NSEC_PER_USEC / CDC_NCM_TIMER_PENDING_CNT;
+ ctx->timer_interval = ec->tx_coalesce_usecs * (NSEC_PER_USEC / CDC_NCM_TIMER_PENDING_CNT);
if (!ctx->timer_interval)
ctx->tx_timer_pending = 0;
spin_unlock_bh(&ctx->mtx);
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 9c34d2fccfac..9c78090e72f8 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -500,26 +500,23 @@ int i2400m_pm_notifier(struct notifier_block *notifier,
*/
int i2400m_pre_reset(struct i2400m *i2400m)
{
- int result;
struct device *dev = i2400m_dev(i2400m);
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
d_printf(1, dev, "pre-reset shut down\n");
- result = 0;
mutex_lock(&i2400m->init_mutex);
if (i2400m->updown) {
netif_tx_disable(i2400m->wimax_dev.net_dev);
__i2400m_dev_stop(i2400m);
- result = 0;
/* down't set updown to zero -- this way
* post_reset can restore properly */
}
mutex_unlock(&i2400m->init_mutex);
if (i2400m->bus_release)
i2400m->bus_release(i2400m);
- d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
- return result;
+ d_fnend(3, dev, "(i2400m %p) = 0\n", i2400m);
+ return 0;
}
EXPORT_SYMBOL_GPL(i2400m_pre_reset);