diff options
Diffstat (limited to 'drivers/net')
268 files changed, 21378 insertions, 4195 deletions
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index d94dae216820..c7d05027a7a0 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -79,7 +79,7 @@ static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, int (*compare)(struct sk_buff *a, struct sk_buff *b)) { - struct sk_buff *pos, *insert = (struct sk_buff *)head; + struct sk_buff *pos, *insert = NULL; skb_queue_reverse_walk(head, pos) { const struct can_rx_offload_cb *cb_pos, *cb_new; @@ -99,8 +99,10 @@ static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buf insert = pos; break; } - - __skb_queue_after(head, insert, new); + if (!insert) + __skb_queue_head(head, new); + else + __skb_queue_after(head, insert, new); } static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index d3ce1e4cb4d3..7c09d8f195ae 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -23,6 +23,14 @@ config NET_DSA_LOOP This enables support for a fake mock-up switch chip which exercises the DSA APIs. +config NET_DSA_LANTIQ_GSWIP + tristate "Lantiq / Intel GSWIP" + depends on NET_DSA + select NET_DSA_TAG_GSWIP + ---help--- + This enables support for the Lantiq / Intel GSWIP 2.1 found in + the xrx200 / VR9 SoC. + config NET_DSA_MT7530 tristate "Mediatek MT7530 Ethernet switch support" depends on NET_DSA diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index 46c1cba91ffe..82e5d794c41f 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o ifdef CONFIG_NET_DSA_LOOP obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o endif +obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o diff --git a/drivers/net/dsa/b53/Kconfig b/drivers/net/dsa/b53/Kconfig index 2f988216dab9..e83ebfafd881 100644 --- a/drivers/net/dsa/b53/Kconfig +++ b/drivers/net/dsa/b53/Kconfig @@ -23,6 +23,7 @@ config B53_MDIO_DRIVER config B53_MMAP_DRIVER tristate "B53 MMAP connected switch driver" depends on B53 && HAS_IOMEM + default BCM63XX || BMIPS_GENERIC help Select to enable support for memory-mapped switches like the BCM63XX integrated switches. @@ -30,6 +31,14 @@ config B53_MMAP_DRIVER config B53_SRAB_DRIVER tristate "B53 SRAB connected switch driver" depends on B53 && HAS_IOMEM + default ARCH_BCM_IPROC help Select to enable support for memory-mapped Switch Register Access Bridge Registers (SRAB) like it is found on the BCM53010 + +config B53_SERDES + tristate "B53 SerDes support" + depends on B53 + default ARCH_BCM_NSP + help + Select to enable support for SerDes on e.g: Northstar Plus SoCs. diff --git a/drivers/net/dsa/b53/Makefile b/drivers/net/dsa/b53/Makefile index 4256fb42a4dd..b1be13023ae4 100644 --- a/drivers/net/dsa/b53/Makefile +++ b/drivers/net/dsa/b53/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o obj-$(CONFIG_B53_MDIO_DRIVER) += b53_mdio.o obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o +obj-$(CONFIG_B53_SERDES) += b53_serdes.o diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index d93c790bfbe8..dbf5b86a07fe 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/platform_data/b53.h> #include <linux/phy.h> +#include <linux/phylink.h> #include <linux/etherdevice.h> #include <linux/if_bridge.h> #include <net/dsa.h> @@ -502,8 +503,14 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct b53_device *dev = ds->priv; unsigned int cpu_port = ds->ports[port].cpu_dp->index; + int ret = 0; u16 pvlan; + if (dev->ops->irq_enable) + ret = dev->ops->irq_enable(dev, port); + if (ret) + return ret; + /* Clear the Rx and Tx disable bits and set to no spanning tree */ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), 0); @@ -536,6 +543,9 @@ void b53_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy) b53_read8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), ®); reg |= PORT_CTRL_RX_DISABLE | PORT_CTRL_TX_DISABLE; b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), reg); + + if (dev->ops->irq_disable) + dev->ops->irq_disable(dev, port); } EXPORT_SYMBOL(b53_disable_port); @@ -755,6 +765,8 @@ static int b53_reset_switch(struct b53_device *priv) memset(priv->vlans, 0, sizeof(*priv->vlans) * priv->num_vlans); memset(priv->ports, 0, sizeof(*priv->ports) * priv->num_ports); + priv->serdes_lane = B53_INVALID_LANE; + return b53_switch_reset(priv); } @@ -938,33 +950,50 @@ static int b53_setup(struct dsa_switch *ds) return ret; } -static void b53_adjust_link(struct dsa_switch *ds, int port, - struct phy_device *phydev) +static void b53_force_link(struct b53_device *dev, int port, int link) { - struct b53_device *dev = ds->priv; - struct ethtool_eee *p = &dev->ports[port].eee; - u8 rgmii_ctrl = 0, reg = 0, off; - - if (!phy_is_pseudo_fixed_link(phydev)) - return; + u8 reg, val, off; /* Override the port settings */ if (port == dev->cpu_port) { off = B53_PORT_OVERRIDE_CTRL; - reg = PORT_OVERRIDE_EN; + val = PORT_OVERRIDE_EN; } else { off = B53_GMII_PORT_OVERRIDE_CTRL(port); - reg = GMII_PO_EN; + val = GMII_PO_EN; } - /* Set the link UP */ - if (phydev->link) + b53_read8(dev, B53_CTRL_PAGE, off, ®); + reg |= val; + if (link) reg |= PORT_OVERRIDE_LINK; + else + reg &= ~PORT_OVERRIDE_LINK; + b53_write8(dev, B53_CTRL_PAGE, off, reg); +} - if (phydev->duplex == DUPLEX_FULL) +static void b53_force_port_config(struct b53_device *dev, int port, + int speed, int duplex, int pause) +{ + u8 reg, val, off; + + /* Override the port settings */ + if (port == dev->cpu_port) { + off = B53_PORT_OVERRIDE_CTRL; + val = PORT_OVERRIDE_EN; + } else { + off = B53_GMII_PORT_OVERRIDE_CTRL(port); + val = GMII_PO_EN; + } + + b53_read8(dev, B53_CTRL_PAGE, off, ®); + reg |= val; + if (duplex == DUPLEX_FULL) reg |= PORT_OVERRIDE_FULL_DUPLEX; + else + reg &= ~PORT_OVERRIDE_FULL_DUPLEX; - switch (phydev->speed) { + switch (speed) { case 2000: reg |= PORT_OVERRIDE_SPEED_2000M; /* fallthrough */ @@ -978,21 +1007,41 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, reg |= PORT_OVERRIDE_SPEED_10M; break; default: - dev_err(ds->dev, "unknown speed: %d\n", phydev->speed); + dev_err(dev->dev, "unknown speed: %d\n", speed); return; } + if (pause & MLO_PAUSE_RX) + reg |= PORT_OVERRIDE_RX_FLOW; + if (pause & MLO_PAUSE_TX) + reg |= PORT_OVERRIDE_TX_FLOW; + + b53_write8(dev, B53_CTRL_PAGE, off, reg); +} + +static void b53_adjust_link(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct b53_device *dev = ds->priv; + struct ethtool_eee *p = &dev->ports[port].eee; + u8 rgmii_ctrl = 0, reg = 0, off; + int pause = 0; + + if (!phy_is_pseudo_fixed_link(phydev)) + return; + /* Enable flow control on BCM5301x's CPU port */ if (is5301x(dev) && port == dev->cpu_port) - reg |= PORT_OVERRIDE_RX_FLOW | PORT_OVERRIDE_TX_FLOW; + pause = MLO_PAUSE_TXRX_MASK; if (phydev->pause) { if (phydev->asym_pause) - reg |= PORT_OVERRIDE_TX_FLOW; - reg |= PORT_OVERRIDE_RX_FLOW; + pause |= MLO_PAUSE_TX; + pause |= MLO_PAUSE_RX; } - b53_write8(dev, B53_CTRL_PAGE, off, reg); + b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause); + b53_force_link(dev, port, phydev->link); if (is531x5(dev) && phy_interface_is_rgmii(phydev)) { if (port == 8) @@ -1052,16 +1101,9 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, } } else if (is5301x(dev)) { if (port != dev->cpu_port) { - u8 po_reg = B53_GMII_PORT_OVERRIDE_CTRL(dev->cpu_port); - u8 gmii_po; - - b53_read8(dev, B53_CTRL_PAGE, po_reg, &gmii_po); - gmii_po |= GMII_PO_LINK | - GMII_PO_RX_FLOW | - GMII_PO_TX_FLOW | - GMII_PO_EN | - GMII_PO_SPEED_2000M; - b53_write8(dev, B53_CTRL_PAGE, po_reg, gmii_po); + b53_force_port_config(dev, dev->cpu_port, 2000, + DUPLEX_FULL, MLO_PAUSE_TXRX_MASK); + b53_force_link(dev, dev->cpu_port, 1); } } @@ -1069,6 +1111,146 @@ static void b53_adjust_link(struct dsa_switch *ds, int port, p->eee_enabled = b53_eee_init(ds, port, phydev); } +void b53_port_event(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds->priv; + bool link; + u16 sts; + + b53_read16(dev, B53_STAT_PAGE, B53_LINK_STAT, &sts); + link = !!(sts & BIT(port)); + dsa_port_phylink_mac_change(ds, port, link); +} +EXPORT_SYMBOL(b53_port_event); + +void b53_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct b53_device *dev = ds->priv; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + if (dev->ops->serdes_phylink_validate) + dev->ops->serdes_phylink_validate(dev, port, mask, state); + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of 5325/5365, MII, Reverse MII and 802.3z, we + * support Gigabit, including Half duplex. + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + !phy_interface_mode_is_8023z(state->interface) && + !(is5325(dev) || is5365(dev))) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + if (!phy_interface_mode_is_8023z(state->interface)) { + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + } + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + + phylink_helper_basex_speed(state); +} +EXPORT_SYMBOL(b53_phylink_validate); + +int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state) +{ + struct b53_device *dev = ds->priv; + int ret = -EOPNOTSUPP; + + if (phy_interface_mode_is_8023z(state->interface) && + dev->ops->serdes_link_state) + ret = dev->ops->serdes_link_state(dev, port, state); + + return ret; +} +EXPORT_SYMBOL(b53_phylink_mac_link_state); + +void b53_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct b53_device *dev = ds->priv; + + if (mode == MLO_AN_PHY) + return; + + if (mode == MLO_AN_FIXED) { + b53_force_port_config(dev, port, state->speed, + state->duplex, state->pause); + return; + } + + if (phy_interface_mode_is_8023z(state->interface) && + dev->ops->serdes_config) + dev->ops->serdes_config(dev, port, mode, state); +} +EXPORT_SYMBOL(b53_phylink_mac_config); + +void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port) +{ + struct b53_device *dev = ds->priv; + + if (dev->ops->serdes_an_restart) + dev->ops->serdes_an_restart(dev, port); +} +EXPORT_SYMBOL(b53_phylink_mac_an_restart); + +void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + struct b53_device *dev = ds->priv; + + if (mode == MLO_AN_PHY) + return; + + if (mode == MLO_AN_FIXED) { + b53_force_link(dev, port, false); + return; + } + + if (phy_interface_mode_is_8023z(interface) && + dev->ops->serdes_link_set) + dev->ops->serdes_link_set(dev, port, mode, interface, false); +} +EXPORT_SYMBOL(b53_phylink_mac_link_down); + +void b53_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev) +{ + struct b53_device *dev = ds->priv; + + if (mode == MLO_AN_PHY) + return; + + if (mode == MLO_AN_FIXED) { + b53_force_link(dev, port, true); + return; + } + + if (phy_interface_mode_is_8023z(interface) && + dev->ops->serdes_link_set) + dev->ops->serdes_link_set(dev, port, mode, interface, true); +} +EXPORT_SYMBOL(b53_phylink_mac_link_up); + int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) { return 0; @@ -1710,6 +1892,12 @@ static const struct dsa_switch_ops b53_switch_ops = { .phy_read = b53_phy_read16, .phy_write = b53_phy_write16, .adjust_link = b53_adjust_link, + .phylink_validate = b53_phylink_validate, + .phylink_mac_link_state = b53_phylink_mac_link_state, + .phylink_mac_config = b53_phylink_mac_config, + .phylink_mac_an_restart = b53_phylink_mac_an_restart, + .phylink_mac_link_down = b53_phylink_mac_link_down, + .phylink_mac_link_up = b53_phylink_mac_link_up, .port_enable = b53_enable_port, .port_disable = b53_disable_port, .get_mac_eee = b53_get_mac_eee, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index df149756c282..ec796482792d 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -29,6 +29,7 @@ struct b53_device; struct net_device; +struct phylink_link_state; struct b53_io_ops { int (*read8)(struct b53_device *dev, u8 page, u8 reg, u8 *value); @@ -43,8 +44,25 @@ struct b53_io_ops { int (*write64)(struct b53_device *dev, u8 page, u8 reg, u64 value); int (*phy_read16)(struct b53_device *dev, int addr, int reg, u16 *value); int (*phy_write16)(struct b53_device *dev, int addr, int reg, u16 value); + int (*irq_enable)(struct b53_device *dev, int port); + void (*irq_disable)(struct b53_device *dev, int port); + u8 (*serdes_map_lane)(struct b53_device *dev, int port); + int (*serdes_link_state)(struct b53_device *dev, int port, + struct phylink_link_state *state); + void (*serdes_config)(struct b53_device *dev, int port, + unsigned int mode, + const struct phylink_link_state *state); + void (*serdes_an_restart)(struct b53_device *dev, int port); + void (*serdes_link_set)(struct b53_device *dev, int port, + unsigned int mode, phy_interface_t interface, + bool link_up); + void (*serdes_phylink_validate)(struct b53_device *dev, int port, + unsigned long *supported, + struct phylink_link_state *state); }; +#define B53_INVALID_LANE 0xff + enum { BCM5325_DEVICE_ID = 0x25, BCM5365_DEVICE_ID = 0x65, @@ -107,6 +125,7 @@ struct b53_device { /* connect specific data */ u8 current_page; struct device *dev; + u8 serdes_lane; /* Master MDIO bus we got probed from */ struct mii_bus *bus; @@ -298,6 +317,23 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge); void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state); void b53_br_fast_age(struct dsa_switch *ds, int port); +void b53_port_event(struct dsa_switch *ds, int port); +void b53_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state); +int b53_phylink_mac_link_state(struct dsa_switch *ds, int port, + struct phylink_link_state *state); +void b53_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state); +void b53_phylink_mac_an_restart(struct dsa_switch *ds, int port); +void b53_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface); +void b53_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev); int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering); int b53_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); diff --git a/drivers/net/dsa/b53/b53_serdes.c b/drivers/net/dsa/b53/b53_serdes.c new file mode 100644 index 000000000000..b45c55e0b8b4 --- /dev/null +++ b/drivers/net/dsa/b53/b53_serdes.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +/* + * Northstar Plus switch SerDes/SGMII PHY main logic + * + * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <net/dsa.h> + +#include "b53_priv.h" +#include "b53_serdes.h" +#include "b53_regs.h" + +static void b53_serdes_write_blk(struct b53_device *dev, u8 offset, u16 block, + u16 value) +{ + b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block); + b53_write16(dev, B53_SERDES_PAGE, offset, value); +} + +static u16 b53_serdes_read_blk(struct b53_device *dev, u8 offset, u16 block) +{ + u16 value; + + b53_write16(dev, B53_SERDES_PAGE, B53_SERDES_BLKADDR, block); + b53_read16(dev, B53_SERDES_PAGE, offset, &value); + + return value; +} + +static void b53_serdes_set_lane(struct b53_device *dev, u8 lane) +{ + if (dev->serdes_lane == lane) + return; + + WARN_ON(lane > 1); + + b53_serdes_write_blk(dev, B53_SERDES_LANE, + SERDES_XGXSBLK0_BLOCKADDRESS, lane); + dev->serdes_lane = lane; +} + +static void b53_serdes_write(struct b53_device *dev, u8 lane, + u8 offset, u16 block, u16 value) +{ + b53_serdes_set_lane(dev, lane); + b53_serdes_write_blk(dev, offset, block, value); +} + +static u16 b53_serdes_read(struct b53_device *dev, u8 lane, + u8 offset, u16 block) +{ + b53_serdes_set_lane(dev, lane); + return b53_serdes_read_blk(dev, offset, block); +} + +void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode, + const struct phylink_link_state *state) +{ + u8 lane = b53_serdes_map_lane(dev, port); + u16 reg; + + if (lane == B53_INVALID_LANE) + return; + + reg = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_CONTROL(1), + SERDES_DIGITAL_BLK); + if (state->interface == PHY_INTERFACE_MODE_1000BASEX) + reg |= FIBER_MODE_1000X; + else + reg &= ~FIBER_MODE_1000X; + b53_serdes_write(dev, lane, B53_SERDES_DIGITAL_CONTROL(1), + SERDES_DIGITAL_BLK, reg); +} +EXPORT_SYMBOL(b53_serdes_config); + +void b53_serdes_an_restart(struct b53_device *dev, int port) +{ + u8 lane = b53_serdes_map_lane(dev, port); + u16 reg; + + if (lane == B53_INVALID_LANE) + return; + + reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR), + SERDES_MII_BLK); + reg |= BMCR_ANRESTART; + b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR), + SERDES_MII_BLK, reg); +} +EXPORT_SYMBOL(b53_serdes_an_restart); + +int b53_serdes_link_state(struct b53_device *dev, int port, + struct phylink_link_state *state) +{ + u8 lane = b53_serdes_map_lane(dev, port); + u16 dig, bmcr, bmsr; + + if (lane == B53_INVALID_LANE) + return 1; + + dig = b53_serdes_read(dev, lane, B53_SERDES_DIGITAL_STATUS, + SERDES_DIGITAL_BLK); + bmcr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR), + SERDES_MII_BLK); + bmsr = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMSR), + SERDES_MII_BLK); + + switch ((dig >> SPEED_STATUS_SHIFT) & SPEED_STATUS_MASK) { + case SPEED_STATUS_10: + state->speed = SPEED_10; + break; + case SPEED_STATUS_100: + state->speed = SPEED_100; + break; + case SPEED_STATUS_1000: + state->speed = SPEED_1000; + break; + default: + case SPEED_STATUS_2500: + state->speed = SPEED_2500; + break; + } + + state->duplex = dig & DUPLEX_STATUS ? DUPLEX_FULL : DUPLEX_HALF; + state->an_enabled = !!(bmcr & BMCR_ANENABLE); + state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); + state->link = !!(dig & LINK_STATUS); + if (dig & PAUSE_RESOLUTION_RX_SIDE) + state->pause |= MLO_PAUSE_RX; + if (dig & PAUSE_RESOLUTION_TX_SIDE) + state->pause |= MLO_PAUSE_TX; + + return 0; +} +EXPORT_SYMBOL(b53_serdes_link_state); + +void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, + phy_interface_t interface, bool link_up) +{ + u8 lane = b53_serdes_map_lane(dev, port); + u16 reg; + + if (lane == B53_INVALID_LANE) + return; + + reg = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_BMCR), + SERDES_MII_BLK); + if (link_up) + reg &= ~BMCR_PDOWN; + else + reg |= BMCR_PDOWN; + b53_serdes_write(dev, lane, B53_SERDES_MII_REG(MII_BMCR), + SERDES_MII_BLK, reg); +} +EXPORT_SYMBOL(b53_serdes_link_set); + +void b53_serdes_phylink_validate(struct b53_device *dev, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + u8 lane = b53_serdes_map_lane(dev, port); + + if (lane == B53_INVALID_LANE) + return; + + switch (lane) { + case 0: + phylink_set(supported, 2500baseX_Full); + /* fallthrough */ + case 1: + phylink_set(supported, 1000baseX_Full); + break; + default: + break; + } +} +EXPORT_SYMBOL(b53_serdes_phylink_validate); + +int b53_serdes_init(struct b53_device *dev, int port) +{ + u8 lane = b53_serdes_map_lane(dev, port); + u16 id0, msb, lsb; + + if (lane == B53_INVALID_LANE) + return -EINVAL; + + id0 = b53_serdes_read(dev, lane, B53_SERDES_ID0, SERDES_ID0); + msb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID1), + SERDES_MII_BLK); + lsb = b53_serdes_read(dev, lane, B53_SERDES_MII_REG(MII_PHYSID2), + SERDES_MII_BLK); + if (id0 == 0 || id0 == 0xffff) { + dev_err(dev->dev, "SerDes not initialized, check settings\n"); + return -ENODEV; + } + + dev_info(dev->dev, + "SerDes lane %d, model: %d, rev %c%d (OUI: 0x%08x)\n", + lane, id0 & SERDES_ID0_MODEL_MASK, + (id0 >> SERDES_ID0_REV_LETTER_SHIFT) + 0x41, + (id0 >> SERDES_ID0_REV_NUM_SHIFT) & SERDES_ID0_REV_NUM_MASK, + (u32)msb << 16 | lsb); + + return 0; +} +EXPORT_SYMBOL(b53_serdes_init); + +MODULE_AUTHOR("Florian Fainelli <f.fainelli@gmail.com>"); +MODULE_DESCRIPTION("B53 Switch SerDes driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/dsa/b53/b53_serdes.h b/drivers/net/dsa/b53/b53_serdes.h new file mode 100644 index 000000000000..eed7c9357091 --- /dev/null +++ b/drivers/net/dsa/b53/b53_serdes.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause + * + * Northstar Plus switch SerDes/SGMII PHY definitions + * + * Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com> + */ + +#include <linux/phy.h> +#include <linux/types.h> + +/* Non-standard page used to access SerDes PHY registers on NorthStar Plus */ +#define B53_SERDES_PAGE 0x16 +#define B53_SERDES_BLKADDR 0x3e +#define B53_SERDES_LANE 0x3c + +#define B53_SERDES_ID0 0x20 +#define SERDES_ID0_MODEL_MASK 0x3f +#define SERDES_ID0_REV_NUM_SHIFT 11 +#define SERDES_ID0_REV_NUM_MASK 0x7 +#define SERDES_ID0_REV_LETTER_SHIFT 14 + +#define B53_SERDES_MII_REG(x) (0x20 + (x) * 2) +#define B53_SERDES_DIGITAL_CONTROL(x) (0x18 + (x) * 2) +#define B53_SERDES_DIGITAL_STATUS 0x28 + +/* SERDES_DIGITAL_CONTROL1 */ +#define FIBER_MODE_1000X BIT(0) +#define TBI_INTERFACE BIT(1) +#define SIGNAL_DETECT_EN BIT(2) +#define INVERT_SIGNAL_DETECT BIT(3) +#define AUTODET_EN BIT(4) +#define SGMII_MASTER_MODE BIT(5) +#define DISABLE_DLL_PWRDOWN BIT(6) +#define CRC_CHECKER_DIS BIT(7) +#define COMMA_DET_EN BIT(8) +#define ZERO_COMMA_DET_EN BIT(9) +#define REMOTE_LOOPBACK BIT(10) +#define SEL_RX_PKTS_FOR_CNTR BIT(11) +#define MASTER_MDIO_PHY_SEL BIT(13) +#define DISABLE_SIGNAL_DETECT_FLT BIT(14) + +/* SERDES_DIGITAL_CONTROL2 */ +#define EN_PARALLEL_DET BIT(0) +#define DIS_FALSE_LINK BIT(1) +#define FLT_FORCE_LINK BIT(2) +#define EN_AUTONEG_ERR_TIMER BIT(3) +#define DIS_REMOTE_FAULT_SENSING BIT(4) +#define FORCE_XMIT_DATA BIT(5) +#define AUTONEG_FAST_TIMERS BIT(6) +#define DIS_CARRIER_EXTEND BIT(7) +#define DIS_TRRR_GENERATION BIT(8) +#define BYPASS_PCS_RX BIT(9) +#define BYPASS_PCS_TX BIT(10) +#define TEST_CNTR_EN BIT(11) +#define TX_PACKET_SEQ_TEST BIT(12) +#define TX_IDLE_JAM_SEQ_TEST BIT(13) +#define CLR_BER_CNTR BIT(14) + +/* SERDES_DIGITAL_CONTROL3 */ +#define TX_FIFO_RST BIT(0) +#define FIFO_ELAST_TX_RX_SHIFT 1 +#define FIFO_ELAST_TX_RX_5K 0 +#define FIFO_ELAST_TX_RX_10K 1 +#define FIFO_ELAST_TX_RX_13_5K 2 +#define FIFO_ELAST_TX_RX_18_5K 3 +#define BLOCK_TXEN_MODE BIT(9) +#define JAM_FALSE_CARRIER_MODE BIT(10) +#define EXT_PHY_CRS_MODE BIT(11) +#define INVERT_EXT_PHY_CRS BIT(12) +#define DISABLE_TX_CRS BIT(13) + +/* SERDES_DIGITAL_STATUS */ +#define SGMII_MODE BIT(0) +#define LINK_STATUS BIT(1) +#define DUPLEX_STATUS BIT(2) +#define SPEED_STATUS_SHIFT 3 +#define SPEED_STATUS_10 0 +#define SPEED_STATUS_100 1 +#define SPEED_STATUS_1000 2 +#define SPEED_STATUS_2500 3 +#define SPEED_STATUS_MASK SPEED_STATUS_2500 +#define PAUSE_RESOLUTION_TX_SIDE BIT(5) +#define PAUSE_RESOLUTION_RX_SIDE BIT(6) +#define LINK_STATUS_CHANGE BIT(7) +#define EARLY_END_EXT_DET BIT(8) +#define CARRIER_EXT_ERR_DET BIT(9) +#define RX_ERR_DET BIT(10) +#define TX_ERR_DET BIT(11) +#define CRC_ERR_DET BIT(12) +#define FALSE_CARRIER_ERR_DET BIT(13) +#define RXFIFO_ERR_DET BIT(14) +#define TXFIFO_ERR_DET BIT(15) + +/* Block offsets */ +#define SERDES_DIGITAL_BLK 0x8300 +#define SERDES_ID0 0x8310 +#define SERDES_MII_BLK 0xffe0 +#define SERDES_XGXSBLK0_BLOCKADDRESS 0xffd0 + +struct phylink_link_state; + +static inline u8 b53_serdes_map_lane(struct b53_device *dev, int port) +{ + if (!dev->ops->serdes_map_lane) + return B53_INVALID_LANE; + + return dev->ops->serdes_map_lane(dev, port); +} + +int b53_serdes_get_link(struct b53_device *dev, int port); +int b53_serdes_link_state(struct b53_device *dev, int port, + struct phylink_link_state *state); +void b53_serdes_config(struct b53_device *dev, int port, unsigned int mode, + const struct phylink_link_state *state); +void b53_serdes_an_restart(struct b53_device *dev, int port); +void b53_serdes_link_set(struct b53_device *dev, int port, unsigned int mode, + phy_interface_t interface, bool link_up); +void b53_serdes_phylink_validate(struct b53_device *dev, int port, + unsigned long *supported, + struct phylink_link_state *state); +#if IS_ENABLED(CONFIG_B53_SERDES) +int b53_serdes_init(struct b53_device *dev, int port); +#else +static inline int b53_serdes_init(struct b53_device *dev, int port) +{ + return -ENODEV; +} +#endif diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 91de2ba99ad1..90f514252987 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c @@ -19,11 +19,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> +#include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/platform_data/b53.h> #include <linux/of.h> #include "b53_priv.h" +#include "b53_serdes.h" /* command and status register of the SRAB */ #define B53_SRAB_CMDSTAT 0x2c @@ -47,6 +49,7 @@ /* command and status register of the SRAB */ #define B53_SRAB_CTRLS 0x40 +#define B53_SRAB_CTRLS_HOST_INTR BIT(1) #define B53_SRAB_CTRLS_RCAREQ BIT(3) #define B53_SRAB_CTRLS_RCAGNT BIT(4) #define B53_SRAB_CTRLS_SW_INIT_DONE BIT(6) @@ -60,8 +63,29 @@ #define B53_SRAB_P7_SLEEP_TIMER BIT(11) #define B53_SRAB_IMP0_SLEEP_TIMER BIT(12) +/* Port mux configuration registers */ +#define B53_MUX_CONFIG_P5 0x00 +#define MUX_CONFIG_SGMII 0 +#define MUX_CONFIG_MII_LITE 1 +#define MUX_CONFIG_RGMII 2 +#define MUX_CONFIG_GMII 3 +#define MUX_CONFIG_GPHY 4 +#define MUX_CONFIG_INTERNAL 5 +#define MUX_CONFIG_MASK 0x7 +#define B53_MUX_CONFIG_P4 0x04 + +struct b53_srab_port_priv { + int irq; + bool irq_enabled; + struct b53_device *dev; + unsigned int num; + phy_interface_t mode; +}; + struct b53_srab_priv { void __iomem *regs; + void __iomem *mux_config; + struct b53_srab_port_priv port_intrs[B53_N_PORTS]; }; static int b53_srab_request_grant(struct b53_device *dev) @@ -344,6 +368,81 @@ err: return ret; } +static irqreturn_t b53_srab_port_thread(int irq, void *dev_id) +{ + struct b53_srab_port_priv *port = dev_id; + struct b53_device *dev = port->dev; + + if (port->mode == PHY_INTERFACE_MODE_SGMII) + b53_port_event(dev->ds, port->num); + + return IRQ_HANDLED; +} + +static irqreturn_t b53_srab_port_isr(int irq, void *dev_id) +{ + struct b53_srab_port_priv *port = dev_id; + struct b53_device *dev = port->dev; + struct b53_srab_priv *priv = dev->priv; + + /* Acknowledge the interrupt */ + writel(BIT(port->num), priv->regs + B53_SRAB_INTR); + + return IRQ_WAKE_THREAD; +} + +#if IS_ENABLED(CONFIG_B53_SERDES) +static u8 b53_srab_serdes_map_lane(struct b53_device *dev, int port) +{ + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *p = &priv->port_intrs[port]; + + if (p->mode != PHY_INTERFACE_MODE_SGMII) + return B53_INVALID_LANE; + + switch (port) { + case 5: + return 0; + case 4: + return 1; + default: + return B53_INVALID_LANE; + } +} +#endif + +static int b53_srab_irq_enable(struct b53_device *dev, int port) +{ + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *p = &priv->port_intrs[port]; + int ret = 0; + + /* Interrupt is optional and was not specified, do not make + * this fatal + */ + if (p->irq == -ENXIO) + return ret; + + ret = request_threaded_irq(p->irq, b53_srab_port_isr, + b53_srab_port_thread, 0, + dev_name(dev->dev), p); + if (!ret) + p->irq_enabled = true; + + return ret; +} + +static void b53_srab_irq_disable(struct b53_device *dev, int port) +{ + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *p = &priv->port_intrs[port]; + + if (p->irq_enabled) { + free_irq(p->irq, p); + p->irq_enabled = false; + } +} + static const struct b53_io_ops b53_srab_ops = { .read8 = b53_srab_read8, .read16 = b53_srab_read16, @@ -355,6 +454,16 @@ static const struct b53_io_ops b53_srab_ops = { .write32 = b53_srab_write32, .write48 = b53_srab_write48, .write64 = b53_srab_write64, + .irq_enable = b53_srab_irq_enable, + .irq_disable = b53_srab_irq_disable, +#if IS_ENABLED(CONFIG_B53_SERDES) + .serdes_map_lane = b53_srab_serdes_map_lane, + .serdes_link_state = b53_serdes_link_state, + .serdes_config = b53_serdes_config, + .serdes_an_restart = b53_serdes_an_restart, + .serdes_link_set = b53_serdes_link_set, + .serdes_phylink_validate = b53_serdes_phylink_validate, +#endif }; static const struct of_device_id b53_srab_of_match[] = { @@ -379,6 +488,107 @@ static const struct of_device_id b53_srab_of_match[] = { }; MODULE_DEVICE_TABLE(of, b53_srab_of_match); +static void b53_srab_intr_set(struct b53_srab_priv *priv, bool set) +{ + u32 reg; + + reg = readl(priv->regs + B53_SRAB_CTRLS); + if (set) + reg |= B53_SRAB_CTRLS_HOST_INTR; + else + reg &= ~B53_SRAB_CTRLS_HOST_INTR; + writel(reg, priv->regs + B53_SRAB_CTRLS); +} + +static void b53_srab_prepare_irq(struct platform_device *pdev) +{ + struct b53_device *dev = platform_get_drvdata(pdev); + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *port; + unsigned int i; + char *name; + + /* Clear all pending interrupts */ + writel(0xffffffff, priv->regs + B53_SRAB_INTR); + + if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) + return; + + for (i = 0; i < B53_N_PORTS; i++) { + port = &priv->port_intrs[i]; + + /* There is no port 6 */ + if (i == 6) + continue; + + name = kasprintf(GFP_KERNEL, "link_state_p%d", i); + if (!name) + return; + + port->num = i; + port->dev = dev; + port->irq = platform_get_irq_byname(pdev, name); + kfree(name); + } + + b53_srab_intr_set(priv, true); +} + +static void b53_srab_mux_init(struct platform_device *pdev) +{ + struct b53_device *dev = platform_get_drvdata(pdev); + struct b53_srab_priv *priv = dev->priv; + struct b53_srab_port_priv *p; + struct resource *r; + unsigned int port; + u32 reg, off = 0; + int ret; + + if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) + return; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->mux_config = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(priv->mux_config)) + return; + + /* Obtain the port mux configuration so we know which lanes + * actually map to SerDes lanes + */ + for (port = 5; port > 3; port--, off += 4) { + p = &priv->port_intrs[port]; + + reg = readl(priv->mux_config + B53_MUX_CONFIG_P5 + off); + switch (reg & MUX_CONFIG_MASK) { + case MUX_CONFIG_SGMII: + p->mode = PHY_INTERFACE_MODE_SGMII; + ret = b53_serdes_init(dev, port); + if (ret) + continue; + break; + case MUX_CONFIG_MII_LITE: + p->mode = PHY_INTERFACE_MODE_MII; + break; + case MUX_CONFIG_GMII: + p->mode = PHY_INTERFACE_MODE_GMII; + break; + case MUX_CONFIG_RGMII: + p->mode = PHY_INTERFACE_MODE_RGMII; + break; + case MUX_CONFIG_INTERNAL: + p->mode = PHY_INTERFACE_MODE_INTERNAL; + break; + default: + p->mode = PHY_INTERFACE_MODE_NA; + break; + } + + if (p->mode != PHY_INTERFACE_MODE_NA) + dev_info(&pdev->dev, "Port %d mode: %s\n", + port, phy_modes(p->mode)); + } +} + static int b53_srab_probe(struct platform_device *pdev) { struct b53_platform_data *pdata = pdev->dev.platform_data; @@ -417,13 +627,18 @@ static int b53_srab_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); + b53_srab_prepare_irq(pdev); + b53_srab_mux_init(pdev); + return b53_switch_register(dev); } static int b53_srab_remove(struct platform_device *pdev) { struct b53_device *dev = platform_get_drvdata(pdev); + struct b53_srab_priv *priv = dev->priv; + b53_srab_intr_set(priv, false); if (dev) b53_switch_remove(dev); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index e0066adcd2f3..1fc27e149e7f 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -465,8 +465,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv) { mdiobus_unregister(priv->slave_mii_bus); - if (priv->master_mii_dn) - of_node_put(priv->master_mii_dn); + of_node_put(priv->master_mii_dn); } static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port) diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c new file mode 100644 index 000000000000..693a67f45bef --- /dev/null +++ b/drivers/net/dsa/lantiq_gswip.c @@ -0,0 +1,1167 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lantiq / Intel GSWIP switch driver for VRX200 SoCs + * + * Copyright (C) 2010 Lantiq Deutschland + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> + */ + +#include <linux/clk.h> +#include <linux/etherdevice.h> +#include <linux/firmware.h> +#include <linux/if_bridge.h> +#include <linux/if_vlan.h> +#include <linux/iopoll.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_platform.h> +#include <linux/phy.h> +#include <linux/phylink.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <net/dsa.h> +#include <dt-bindings/mips/lantiq_rcu_gphy.h> + +#include "lantiq_pce.h" + +/* GSWIP MDIO Registers */ +#define GSWIP_MDIO_GLOB 0x00 +#define GSWIP_MDIO_GLOB_ENABLE BIT(15) +#define GSWIP_MDIO_CTRL 0x08 +#define GSWIP_MDIO_CTRL_BUSY BIT(12) +#define GSWIP_MDIO_CTRL_RD BIT(11) +#define GSWIP_MDIO_CTRL_WR BIT(10) +#define GSWIP_MDIO_CTRL_PHYAD_MASK 0x1f +#define GSWIP_MDIO_CTRL_PHYAD_SHIFT 5 +#define GSWIP_MDIO_CTRL_REGAD_MASK 0x1f +#define GSWIP_MDIO_READ 0x09 +#define GSWIP_MDIO_WRITE 0x0A +#define GSWIP_MDIO_MDC_CFG0 0x0B +#define GSWIP_MDIO_MDC_CFG1 0x0C +#define GSWIP_MDIO_PHYp(p) (0x15 - (p)) +#define GSWIP_MDIO_PHY_LINK_MASK 0x6000 +#define GSWIP_MDIO_PHY_LINK_AUTO 0x0000 +#define GSWIP_MDIO_PHY_LINK_DOWN 0x4000 +#define GSWIP_MDIO_PHY_LINK_UP 0x2000 +#define GSWIP_MDIO_PHY_SPEED_MASK 0x1800 +#define GSWIP_MDIO_PHY_SPEED_AUTO 0x1800 +#define GSWIP_MDIO_PHY_SPEED_M10 0x0000 +#define GSWIP_MDIO_PHY_SPEED_M100 0x0800 +#define GSWIP_MDIO_PHY_SPEED_G1 0x1000 +#define GSWIP_MDIO_PHY_FDUP_MASK 0x0600 +#define GSWIP_MDIO_PHY_FDUP_AUTO 0x0000 +#define GSWIP_MDIO_PHY_FDUP_EN 0x0200 +#define GSWIP_MDIO_PHY_FDUP_DIS 0x0600 +#define GSWIP_MDIO_PHY_FCONTX_MASK 0x0180 +#define GSWIP_MDIO_PHY_FCONTX_AUTO 0x0000 +#define GSWIP_MDIO_PHY_FCONTX_EN 0x0100 +#define GSWIP_MDIO_PHY_FCONTX_DIS 0x0180 +#define GSWIP_MDIO_PHY_FCONRX_MASK 0x0060 +#define GSWIP_MDIO_PHY_FCONRX_AUTO 0x0000 +#define GSWIP_MDIO_PHY_FCONRX_EN 0x0020 +#define GSWIP_MDIO_PHY_FCONRX_DIS 0x0060 +#define GSWIP_MDIO_PHY_ADDR_MASK 0x001f +#define GSWIP_MDIO_PHY_MASK (GSWIP_MDIO_PHY_ADDR_MASK | \ + GSWIP_MDIO_PHY_FCONRX_MASK | \ + GSWIP_MDIO_PHY_FCONTX_MASK | \ + GSWIP_MDIO_PHY_LINK_MASK | \ + GSWIP_MDIO_PHY_SPEED_MASK | \ + GSWIP_MDIO_PHY_FDUP_MASK) + +/* GSWIP MII Registers */ +#define GSWIP_MII_CFG0 0x00 +#define GSWIP_MII_CFG1 0x02 +#define GSWIP_MII_CFG5 0x04 +#define GSWIP_MII_CFG_EN BIT(14) +#define GSWIP_MII_CFG_LDCLKDIS BIT(12) +#define GSWIP_MII_CFG_MODE_MIIP 0x0 +#define GSWIP_MII_CFG_MODE_MIIM 0x1 +#define GSWIP_MII_CFG_MODE_RMIIP 0x2 +#define GSWIP_MII_CFG_MODE_RMIIM 0x3 +#define GSWIP_MII_CFG_MODE_RGMII 0x4 +#define GSWIP_MII_CFG_MODE_MASK 0xf +#define GSWIP_MII_CFG_RATE_M2P5 0x00 +#define GSWIP_MII_CFG_RATE_M25 0x10 +#define GSWIP_MII_CFG_RATE_M125 0x20 +#define GSWIP_MII_CFG_RATE_M50 0x30 +#define GSWIP_MII_CFG_RATE_AUTO 0x40 +#define GSWIP_MII_CFG_RATE_MASK 0x70 +#define GSWIP_MII_PCDU0 0x01 +#define GSWIP_MII_PCDU1 0x03 +#define GSWIP_MII_PCDU5 0x05 +#define GSWIP_MII_PCDU_TXDLY_MASK GENMASK(2, 0) +#define GSWIP_MII_PCDU_RXDLY_MASK GENMASK(9, 7) + +/* GSWIP Core Registers */ +#define GSWIP_SWRES 0x000 +#define GSWIP_SWRES_R1 BIT(1) /* GSWIP Software reset */ +#define GSWIP_SWRES_R0 BIT(0) /* GSWIP Hardware reset */ +#define GSWIP_VERSION 0x013 +#define GSWIP_VERSION_REV_SHIFT 0 +#define GSWIP_VERSION_REV_MASK GENMASK(7, 0) +#define GSWIP_VERSION_MOD_SHIFT 8 +#define GSWIP_VERSION_MOD_MASK GENMASK(15, 8) +#define GSWIP_VERSION_2_0 0x100 +#define GSWIP_VERSION_2_1 0x021 +#define GSWIP_VERSION_2_2 0x122 +#define GSWIP_VERSION_2_2_ETC 0x022 + +#define GSWIP_BM_RAM_VAL(x) (0x043 - (x)) +#define GSWIP_BM_RAM_ADDR 0x044 +#define GSWIP_BM_RAM_CTRL 0x045 +#define GSWIP_BM_RAM_CTRL_BAS BIT(15) +#define GSWIP_BM_RAM_CTRL_OPMOD BIT(5) +#define GSWIP_BM_RAM_CTRL_ADDR_MASK GENMASK(4, 0) +#define GSWIP_BM_QUEUE_GCTRL 0x04A +#define GSWIP_BM_QUEUE_GCTRL_GL_MOD BIT(10) +/* buffer management Port Configuration Register */ +#define GSWIP_BM_PCFGp(p) (0x080 + ((p) * 2)) +#define GSWIP_BM_PCFG_CNTEN BIT(0) /* RMON Counter Enable */ +#define GSWIP_BM_PCFG_IGCNT BIT(1) /* Ingres Special Tag RMON count */ +/* buffer management Port Control Register */ +#define GSWIP_BM_RMON_CTRLp(p) (0x81 + ((p) * 2)) +#define GSWIP_BM_CTRL_RMON_RAM1_RES BIT(0) /* Software Reset for RMON RAM 1 */ +#define GSWIP_BM_CTRL_RMON_RAM2_RES BIT(1) /* Software Reset for RMON RAM 2 */ + +/* PCE */ +#define GSWIP_PCE_TBL_KEY(x) (0x447 - (x)) +#define GSWIP_PCE_TBL_MASK 0x448 +#define GSWIP_PCE_TBL_VAL(x) (0x44D - (x)) +#define GSWIP_PCE_TBL_ADDR 0x44E +#define GSWIP_PCE_TBL_CTRL 0x44F +#define GSWIP_PCE_TBL_CTRL_BAS BIT(15) +#define GSWIP_PCE_TBL_CTRL_TYPE BIT(13) +#define GSWIP_PCE_TBL_CTRL_VLD BIT(12) +#define GSWIP_PCE_TBL_CTRL_KEYFORM BIT(11) +#define GSWIP_PCE_TBL_CTRL_GMAP_MASK GENMASK(10, 7) +#define GSWIP_PCE_TBL_CTRL_OPMOD_MASK GENMASK(6, 5) +#define GSWIP_PCE_TBL_CTRL_OPMOD_ADRD 0x00 +#define GSWIP_PCE_TBL_CTRL_OPMOD_ADWR 0x20 +#define GSWIP_PCE_TBL_CTRL_OPMOD_KSRD 0x40 +#define GSWIP_PCE_TBL_CTRL_OPMOD_KSWR 0x60 +#define GSWIP_PCE_TBL_CTRL_ADDR_MASK GENMASK(4, 0) +#define GSWIP_PCE_PMAP1 0x453 /* Monitoring port map */ +#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */ +#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */ +#define GSWIP_PCE_GCTRL_0 0x456 +#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3) +#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */ +#define GSWIP_PCE_GCTRL_1 0x457 +#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */ +#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */ +#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA)) +#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) +#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0 +#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1 +#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2 +#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3 +#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7 +#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0) + +#define GSWIP_MAC_FLEN 0x8C5 +#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC)) +#define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */ + +/* Ethernet Switch Fetch DMA Port Control Register */ +#define GSWIP_FDMA_PCTRLp(p) (0xA80 + ((p) * 0x6)) +#define GSWIP_FDMA_PCTRL_EN BIT(0) /* FDMA Port Enable */ +#define GSWIP_FDMA_PCTRL_STEN BIT(1) /* Special Tag Insertion Enable */ +#define GSWIP_FDMA_PCTRL_VLANMOD_MASK GENMASK(4, 3) /* VLAN Modification Control */ +#define GSWIP_FDMA_PCTRL_VLANMOD_SHIFT 3 /* VLAN Modification Control */ +#define GSWIP_FDMA_PCTRL_VLANMOD_DIS (0x0 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) +#define GSWIP_FDMA_PCTRL_VLANMOD_PRIO (0x1 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) +#define GSWIP_FDMA_PCTRL_VLANMOD_ID (0x2 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) +#define GSWIP_FDMA_PCTRL_VLANMOD_BOTH (0x3 << GSWIP_FDMA_PCTRL_VLANMOD_SHIFT) + +/* Ethernet Switch Store DMA Port Control Register */ +#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6)) +#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */ +#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */ +#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */ + +#define XRX200_GPHY_FW_ALIGN (16 * 1024) + +struct gswip_hw_info { + int max_ports; + int cpu_port; +}; + +struct xway_gphy_match_data { + char *fe_firmware_name; + char *ge_firmware_name; +}; + +struct gswip_gphy_fw { + struct clk *clk_gate; + struct reset_control *reset; + u32 fw_addr_offset; + char *fw_name; +}; + +struct gswip_priv { + __iomem void *gswip; + __iomem void *mdio; + __iomem void *mii; + const struct gswip_hw_info *hw_info; + const struct xway_gphy_match_data *gphy_fw_name_cfg; + struct dsa_switch *ds; + struct device *dev; + struct regmap *rcu_regmap; + int num_gphy_fw; + struct gswip_gphy_fw *gphy_fw; +}; + +struct gswip_rmon_cnt_desc { + unsigned int size; + unsigned int offset; + const char *name; +}; + +#define MIB_DESC(_size, _offset, _name) {.size = _size, .offset = _offset, .name = _name} + +static const struct gswip_rmon_cnt_desc gswip_rmon_cnt[] = { + /** Receive Packet Count (only packets that are accepted and not discarded). */ + MIB_DESC(1, 0x1F, "RxGoodPkts"), + MIB_DESC(1, 0x23, "RxUnicastPkts"), + MIB_DESC(1, 0x22, "RxMulticastPkts"), + MIB_DESC(1, 0x21, "RxFCSErrorPkts"), + MIB_DESC(1, 0x1D, "RxUnderSizeGoodPkts"), + MIB_DESC(1, 0x1E, "RxUnderSizeErrorPkts"), + MIB_DESC(1, 0x1B, "RxOversizeGoodPkts"), + MIB_DESC(1, 0x1C, "RxOversizeErrorPkts"), + MIB_DESC(1, 0x20, "RxGoodPausePkts"), + MIB_DESC(1, 0x1A, "RxAlignErrorPkts"), + MIB_DESC(1, 0x12, "Rx64BytePkts"), + MIB_DESC(1, 0x13, "Rx127BytePkts"), + MIB_DESC(1, 0x14, "Rx255BytePkts"), + MIB_DESC(1, 0x15, "Rx511BytePkts"), + MIB_DESC(1, 0x16, "Rx1023BytePkts"), + /** Receive Size 1024-1522 (or more, if configured) Packet Count. */ + MIB_DESC(1, 0x17, "RxMaxBytePkts"), + MIB_DESC(1, 0x18, "RxDroppedPkts"), + MIB_DESC(1, 0x19, "RxFilteredPkts"), + MIB_DESC(2, 0x24, "RxGoodBytes"), + MIB_DESC(2, 0x26, "RxBadBytes"), + MIB_DESC(1, 0x11, "TxAcmDroppedPkts"), + MIB_DESC(1, 0x0C, "TxGoodPkts"), + MIB_DESC(1, 0x06, "TxUnicastPkts"), + MIB_DESC(1, 0x07, "TxMulticastPkts"), + MIB_DESC(1, 0x00, "Tx64BytePkts"), + MIB_DESC(1, 0x01, "Tx127BytePkts"), + MIB_DESC(1, 0x02, "Tx255BytePkts"), + MIB_DESC(1, 0x03, "Tx511BytePkts"), + MIB_DESC(1, 0x04, "Tx1023BytePkts"), + /** Transmit Size 1024-1522 (or more, if configured) Packet Count. */ + MIB_DESC(1, 0x05, "TxMaxBytePkts"), + MIB_DESC(1, 0x08, "TxSingleCollCount"), + MIB_DESC(1, 0x09, "TxMultCollCount"), + MIB_DESC(1, 0x0A, "TxLateCollCount"), + MIB_DESC(1, 0x0B, "TxExcessCollCount"), + MIB_DESC(1, 0x0D, "TxPauseCount"), + MIB_DESC(1, 0x10, "TxDroppedPkts"), + MIB_DESC(2, 0x0E, "TxGoodBytes"), +}; + +static u32 gswip_switch_r(struct gswip_priv *priv, u32 offset) +{ + return __raw_readl(priv->gswip + (offset * 4)); +} + +static void gswip_switch_w(struct gswip_priv *priv, u32 val, u32 offset) +{ + __raw_writel(val, priv->gswip + (offset * 4)); +} + +static void gswip_switch_mask(struct gswip_priv *priv, u32 clear, u32 set, + u32 offset) +{ + u32 val = gswip_switch_r(priv, offset); + + val &= ~(clear); + val |= set; + gswip_switch_w(priv, val, offset); +} + +static u32 gswip_switch_r_timeout(struct gswip_priv *priv, u32 offset, + u32 cleared) +{ + u32 val; + + return readx_poll_timeout(__raw_readl, priv->gswip + (offset * 4), val, + (val & cleared) == 0, 20, 50000); +} + +static u32 gswip_mdio_r(struct gswip_priv *priv, u32 offset) +{ + return __raw_readl(priv->mdio + (offset * 4)); +} + +static void gswip_mdio_w(struct gswip_priv *priv, u32 val, u32 offset) +{ + __raw_writel(val, priv->mdio + (offset * 4)); +} + +static void gswip_mdio_mask(struct gswip_priv *priv, u32 clear, u32 set, + u32 offset) +{ + u32 val = gswip_mdio_r(priv, offset); + + val &= ~(clear); + val |= set; + gswip_mdio_w(priv, val, offset); +} + +static u32 gswip_mii_r(struct gswip_priv *priv, u32 offset) +{ + return __raw_readl(priv->mii + (offset * 4)); +} + +static void gswip_mii_w(struct gswip_priv *priv, u32 val, u32 offset) +{ + __raw_writel(val, priv->mii + (offset * 4)); +} + +static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set, + u32 offset) +{ + u32 val = gswip_mii_r(priv, offset); + + val &= ~(clear); + val |= set; + gswip_mii_w(priv, val, offset); +} + +static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set, + int port) +{ + switch (port) { + case 0: + gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0); + break; + case 1: + gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1); + break; + case 5: + gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5); + break; + } +} + +static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set, + int port) +{ + switch (port) { + case 0: + gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU0); + break; + case 1: + gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU1); + break; + case 5: + gswip_mii_mask(priv, clear, set, GSWIP_MII_PCDU5); + break; + } +} + +static int gswip_mdio_poll(struct gswip_priv *priv) +{ + int cnt = 100; + + while (likely(cnt--)) { + u32 ctrl = gswip_mdio_r(priv, GSWIP_MDIO_CTRL); + + if ((ctrl & GSWIP_MDIO_CTRL_BUSY) == 0) + return 0; + usleep_range(20, 40); + } + + return -ETIMEDOUT; +} + +static int gswip_mdio_wr(struct mii_bus *bus, int addr, int reg, u16 val) +{ + struct gswip_priv *priv = bus->priv; + int err; + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + gswip_mdio_w(priv, val, GSWIP_MDIO_WRITE); + gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_WR | + ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | + (reg & GSWIP_MDIO_CTRL_REGAD_MASK), + GSWIP_MDIO_CTRL); + + return 0; +} + +static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg) +{ + struct gswip_priv *priv = bus->priv; + int err; + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + gswip_mdio_w(priv, GSWIP_MDIO_CTRL_BUSY | GSWIP_MDIO_CTRL_RD | + ((addr & GSWIP_MDIO_CTRL_PHYAD_MASK) << GSWIP_MDIO_CTRL_PHYAD_SHIFT) | + (reg & GSWIP_MDIO_CTRL_REGAD_MASK), + GSWIP_MDIO_CTRL); + + err = gswip_mdio_poll(priv); + if (err) { + dev_err(&bus->dev, "waiting for MDIO bus busy timed out\n"); + return err; + } + + return gswip_mdio_r(priv, GSWIP_MDIO_READ); +} + +static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np) +{ + struct dsa_switch *ds = priv->ds; + + ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev); + if (!ds->slave_mii_bus) + return -ENOMEM; + + ds->slave_mii_bus->priv = priv; + ds->slave_mii_bus->read = gswip_mdio_rd; + ds->slave_mii_bus->write = gswip_mdio_wr; + ds->slave_mii_bus->name = "lantiq,xrx200-mdio"; + snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "%s-mii", + dev_name(priv->dev)); + ds->slave_mii_bus->parent = priv->dev; + ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; + + return of_mdiobus_register(ds->slave_mii_bus, mdio_np); +} + +static int gswip_port_enable(struct dsa_switch *ds, int port, + struct phy_device *phydev) +{ + struct gswip_priv *priv = ds->priv; + + /* RMON Counter Enable for port */ + gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port)); + + /* enable port fetch/store dma & VLAN Modification */ + gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_EN | + GSWIP_FDMA_PCTRL_VLANMOD_BOTH, + GSWIP_FDMA_PCTRLp(port)); + gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, + GSWIP_SDMA_PCTRLp(port)); + gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS, + GSWIP_PCE_PCTRL_0p(port)); + + if (!dsa_is_cpu_port(ds, port)) { + u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO | + GSWIP_MDIO_PHY_SPEED_AUTO | + GSWIP_MDIO_PHY_FDUP_AUTO | + GSWIP_MDIO_PHY_FCONTX_AUTO | + GSWIP_MDIO_PHY_FCONRX_AUTO | + (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK); + + gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port)); + /* Activate MDIO auto polling */ + gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0); + } + + return 0; +} + +static void gswip_port_disable(struct dsa_switch *ds, int port, + struct phy_device *phy) +{ + struct gswip_priv *priv = ds->priv; + + if (!dsa_is_cpu_port(ds, port)) { + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN, + GSWIP_MDIO_PHY_LINK_MASK, + GSWIP_MDIO_PHYp(port)); + /* Deactivate MDIO auto polling */ + gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0); + } + + gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, + GSWIP_FDMA_PCTRLp(port)); + gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, + GSWIP_SDMA_PCTRLp(port)); +} + +static int gswip_pce_load_microcode(struct gswip_priv *priv) +{ + int i; + int err; + + gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK | + GSWIP_PCE_TBL_CTRL_OPMOD_MASK, + GSWIP_PCE_TBL_CTRL_OPMOD_ADWR, GSWIP_PCE_TBL_CTRL); + gswip_switch_w(priv, 0, GSWIP_PCE_TBL_MASK); + + for (i = 0; i < ARRAY_SIZE(gswip_pce_microcode); i++) { + gswip_switch_w(priv, i, GSWIP_PCE_TBL_ADDR); + gswip_switch_w(priv, gswip_pce_microcode[i].val_0, + GSWIP_PCE_TBL_VAL(0)); + gswip_switch_w(priv, gswip_pce_microcode[i].val_1, + GSWIP_PCE_TBL_VAL(1)); + gswip_switch_w(priv, gswip_pce_microcode[i].val_2, + GSWIP_PCE_TBL_VAL(2)); + gswip_switch_w(priv, gswip_pce_microcode[i].val_3, + GSWIP_PCE_TBL_VAL(3)); + + /* start the table access: */ + gswip_switch_mask(priv, 0, GSWIP_PCE_TBL_CTRL_BAS, + GSWIP_PCE_TBL_CTRL); + err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL, + GSWIP_PCE_TBL_CTRL_BAS); + if (err) + return err; + } + + /* tell the switch that the microcode is loaded */ + gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MC_VALID, + GSWIP_PCE_GCTRL_0); + + return 0; +} + +static int gswip_setup(struct dsa_switch *ds) +{ + struct gswip_priv *priv = ds->priv; + unsigned int cpu_port = priv->hw_info->cpu_port; + int i; + int err; + + gswip_switch_w(priv, GSWIP_SWRES_R0, GSWIP_SWRES); + usleep_range(5000, 10000); + gswip_switch_w(priv, 0, GSWIP_SWRES); + + /* disable port fetch/store dma on all ports */ + for (i = 0; i < priv->hw_info->max_ports; i++) + gswip_port_disable(ds, i, NULL); + + /* enable Switch */ + gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB); + + err = gswip_pce_load_microcode(priv); + if (err) { + dev_err(priv->dev, "writing PCE microcode failed, %i", err); + return err; + } + + /* Default unknown Broadcast/Multicast/Unicast port maps */ + gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP1); + gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2); + gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3); + + /* disable PHY auto polling */ + gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0); + /* Configure the MDIO Clock 2.5 MHz */ + gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1); + + /* Disable the xMII link */ + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0); + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1); + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5); + + /* enable special tag insertion on cpu port */ + gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN, + GSWIP_FDMA_PCTRLp(cpu_port)); + + gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN, + GSWIP_MAC_CTRL_2p(cpu_port)); + gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN); + gswip_switch_mask(priv, 0, GSWIP_BM_QUEUE_GCTRL_GL_MOD, + GSWIP_BM_QUEUE_GCTRL); + + /* VLAN aware Switching */ + gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0); + + /* Mac Address Table Lock */ + gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_1_MAC_GLOCK | + GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD, + GSWIP_PCE_GCTRL_1); + + gswip_port_enable(ds, cpu_port, NULL); + return 0; +} + +static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + return DSA_TAG_PROTO_GSWIP; +} + +static void gswip_phylink_validate(struct dsa_switch *ds, int port, + unsigned long *supported, + struct phylink_link_state *state) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + + switch (port) { + case 0: + case 1: + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII && + state->interface != PHY_INTERFACE_MODE_RMII) + goto unsupported; + break; + case 2: + case 3: + case 4: + if (state->interface != PHY_INTERFACE_MODE_INTERNAL) + goto unsupported; + break; + case 5: + if (!phy_interface_mode_is_rgmii(state->interface) && + state->interface != PHY_INTERFACE_MODE_INTERNAL) + goto unsupported; + break; + default: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, "Unsupported port: %i\n", port); + return; + } + + /* Allow all the expected bits */ + phylink_set(mask, Autoneg); + phylink_set_port_modes(mask); + phylink_set(mask, Pause); + phylink_set(mask, Asym_Pause); + + /* With the exclusion of MII and Reverse MII, we support Gigabit, + * including Half duplex + */ + if (state->interface != PHY_INTERFACE_MODE_MII && + state->interface != PHY_INTERFACE_MODE_REVMII) { + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseT_Half); + } + + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + + bitmap_and(supported, supported, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + +unsupported: + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + dev_err(ds->dev, "Unsupported interface: %d\n", state->interface); + return; +} + +static void gswip_phylink_mac_config(struct dsa_switch *ds, int port, + unsigned int mode, + const struct phylink_link_state *state) +{ + struct gswip_priv *priv = ds->priv; + u32 miicfg = 0; + + miicfg |= GSWIP_MII_CFG_LDCLKDIS; + + switch (state->interface) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_INTERNAL: + miicfg |= GSWIP_MII_CFG_MODE_MIIM; + break; + case PHY_INTERFACE_MODE_REVMII: + miicfg |= GSWIP_MII_CFG_MODE_MIIP; + break; + case PHY_INTERFACE_MODE_RMII: + miicfg |= GSWIP_MII_CFG_MODE_RMIIM; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + miicfg |= GSWIP_MII_CFG_MODE_RGMII; + break; + default: + dev_err(ds->dev, + "Unsupported interface: %d\n", state->interface); + return; + } + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port); + + switch (state->interface) { + case PHY_INTERFACE_MODE_RGMII_ID: + gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK | + GSWIP_MII_PCDU_RXDLY_MASK, 0, port); + break; + case PHY_INTERFACE_MODE_RGMII_RXID: + gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_RXDLY_MASK, 0, port); + break; + case PHY_INTERFACE_MODE_RGMII_TXID: + gswip_mii_mask_pcdu(priv, GSWIP_MII_PCDU_TXDLY_MASK, 0, port); + break; + default: + break; + } +} + +static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface) +{ + struct gswip_priv *priv = ds->priv; + + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port); +} + +static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface, + struct phy_device *phydev) +{ + struct gswip_priv *priv = ds->priv; + + /* Enable the xMII interface only for the external PHY */ + if (interface != PHY_INTERFACE_MODE_INTERNAL) + gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port); +} + +static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset, + uint8_t *data) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) + strncpy(data + i * ETH_GSTRING_LEN, gswip_rmon_cnt[i].name, + ETH_GSTRING_LEN); +} + +static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, + u32 index) +{ + u32 result; + int err; + + gswip_switch_w(priv, index, GSWIP_BM_RAM_ADDR); + gswip_switch_mask(priv, GSWIP_BM_RAM_CTRL_ADDR_MASK | + GSWIP_BM_RAM_CTRL_OPMOD, + table | GSWIP_BM_RAM_CTRL_BAS, + GSWIP_BM_RAM_CTRL); + + err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL, + GSWIP_BM_RAM_CTRL_BAS); + if (err) { + dev_err(priv->dev, "timeout while reading table: %u, index: %u", + table, index); + return 0; + } + + result = gswip_switch_r(priv, GSWIP_BM_RAM_VAL(0)); + result |= gswip_switch_r(priv, GSWIP_BM_RAM_VAL(1)) << 16; + + return result; +} + +static void gswip_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *data) +{ + struct gswip_priv *priv = ds->priv; + const struct gswip_rmon_cnt_desc *rmon_cnt; + int i; + u64 high; + + for (i = 0; i < ARRAY_SIZE(gswip_rmon_cnt); i++) { + rmon_cnt = &gswip_rmon_cnt[i]; + + data[i] = gswip_bcm_ram_entry_read(priv, port, + rmon_cnt->offset); + if (rmon_cnt->size == 2) { + high = gswip_bcm_ram_entry_read(priv, port, + rmon_cnt->offset + 1); + data[i] |= high << 32; + } + } +} + +static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset) +{ + if (sset != ETH_SS_STATS) + return 0; + + return ARRAY_SIZE(gswip_rmon_cnt); +} + +static const struct dsa_switch_ops gswip_switch_ops = { + .get_tag_protocol = gswip_get_tag_protocol, + .setup = gswip_setup, + .port_enable = gswip_port_enable, + .port_disable = gswip_port_disable, + .phylink_validate = gswip_phylink_validate, + .phylink_mac_config = gswip_phylink_mac_config, + .phylink_mac_link_down = gswip_phylink_mac_link_down, + .phylink_mac_link_up = gswip_phylink_mac_link_up, + .get_strings = gswip_get_strings, + .get_ethtool_stats = gswip_get_ethtool_stats, + .get_sset_count = gswip_get_sset_count, +}; + +static const struct xway_gphy_match_data xrx200a1x_gphy_data = { + .fe_firmware_name = "lantiq/xrx200_phy22f_a14.bin", + .ge_firmware_name = "lantiq/xrx200_phy11g_a14.bin", +}; + +static const struct xway_gphy_match_data xrx200a2x_gphy_data = { + .fe_firmware_name = "lantiq/xrx200_phy22f_a22.bin", + .ge_firmware_name = "lantiq/xrx200_phy11g_a22.bin", +}; + +static const struct xway_gphy_match_data xrx300_gphy_data = { + .fe_firmware_name = "lantiq/xrx300_phy22f_a21.bin", + .ge_firmware_name = "lantiq/xrx300_phy11g_a21.bin", +}; + +static const struct of_device_id xway_gphy_match[] = { + { .compatible = "lantiq,xrx200-gphy-fw", .data = NULL }, + { .compatible = "lantiq,xrx200a1x-gphy-fw", .data = &xrx200a1x_gphy_data }, + { .compatible = "lantiq,xrx200a2x-gphy-fw", .data = &xrx200a2x_gphy_data }, + { .compatible = "lantiq,xrx300-gphy-fw", .data = &xrx300_gphy_data }, + { .compatible = "lantiq,xrx330-gphy-fw", .data = &xrx300_gphy_data }, + {}, +}; + +static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gphy_fw) +{ + struct device *dev = priv->dev; + const struct firmware *fw; + void *fw_addr; + dma_addr_t dma_addr; + dma_addr_t dev_addr; + size_t size; + int ret; + + ret = clk_prepare_enable(gphy_fw->clk_gate); + if (ret) + return ret; + + reset_control_assert(gphy_fw->reset); + + ret = request_firmware(&fw, gphy_fw->fw_name, dev); + if (ret) { + dev_err(dev, "failed to load firmware: %s, error: %i\n", + gphy_fw->fw_name, ret); + return ret; + } + + /* GPHY cores need the firmware code in a persistent and contiguous + * memory area with a 16 kB boundary aligned start address. + */ + size = fw->size + XRX200_GPHY_FW_ALIGN; + + fw_addr = dmam_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL); + if (fw_addr) { + fw_addr = PTR_ALIGN(fw_addr, XRX200_GPHY_FW_ALIGN); + dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); + memcpy(fw_addr, fw->data, fw->size); + } else { + dev_err(dev, "failed to alloc firmware memory\n"); + release_firmware(fw); + return -ENOMEM; + } + + release_firmware(fw); + + ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, dev_addr); + if (ret) + return ret; + + reset_control_deassert(gphy_fw->reset); + + return ret; +} + +static int gswip_gphy_fw_probe(struct gswip_priv *priv, + struct gswip_gphy_fw *gphy_fw, + struct device_node *gphy_fw_np, int i) +{ + struct device *dev = priv->dev; + u32 gphy_mode; + int ret; + char gphyname[10]; + + snprintf(gphyname, sizeof(gphyname), "gphy%d", i); + + gphy_fw->clk_gate = devm_clk_get(dev, gphyname); + if (IS_ERR(gphy_fw->clk_gate)) { + dev_err(dev, "Failed to lookup gate clock\n"); + return PTR_ERR(gphy_fw->clk_gate); + } + + ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset); + if (ret) + return ret; + + ret = of_property_read_u32(gphy_fw_np, "lantiq,gphy-mode", &gphy_mode); + /* Default to GE mode */ + if (ret) + gphy_mode = GPHY_MODE_GE; + + switch (gphy_mode) { + case GPHY_MODE_FE: + gphy_fw->fw_name = priv->gphy_fw_name_cfg->fe_firmware_name; + break; + case GPHY_MODE_GE: + gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name; + break; + default: + dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode); + return -EINVAL; + } + + gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np); + if (IS_ERR(gphy_fw->reset)) { + if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER) + dev_err(dev, "Failed to lookup gphy reset\n"); + return PTR_ERR(gphy_fw->reset); + } + + return gswip_gphy_fw_load(priv, gphy_fw); +} + +static void gswip_gphy_fw_remove(struct gswip_priv *priv, + struct gswip_gphy_fw *gphy_fw) +{ + int ret; + + /* check if the device was fully probed */ + if (!gphy_fw->fw_name) + return; + + ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0); + if (ret) + dev_err(priv->dev, "can not reset GPHY FW pointer"); + + clk_disable_unprepare(gphy_fw->clk_gate); + + reset_control_put(gphy_fw->reset); +} + +static int gswip_gphy_fw_list(struct gswip_priv *priv, + struct device_node *gphy_fw_list_np, u32 version) +{ + struct device *dev = priv->dev; + struct device_node *gphy_fw_np; + const struct of_device_id *match; + int err; + int i = 0; + + /* The VRX200 rev 1.1 uses the GSWIP 2.0 and needs the older + * GPHY firmware. The VRX200 rev 1.2 uses the GSWIP 2.1 and also + * needs a different GPHY firmware. + */ + if (of_device_is_compatible(gphy_fw_list_np, "lantiq,xrx200-gphy-fw")) { + switch (version) { + case GSWIP_VERSION_2_0: + priv->gphy_fw_name_cfg = &xrx200a1x_gphy_data; + break; + case GSWIP_VERSION_2_1: + priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data; + break; + default: + dev_err(dev, "unknown GSWIP version: 0x%x", version); + return -ENOENT; + } + } + + match = of_match_node(xway_gphy_match, gphy_fw_list_np); + if (match && match->data) + priv->gphy_fw_name_cfg = match->data; + + if (!priv->gphy_fw_name_cfg) { + dev_err(dev, "GPHY compatible type not supported"); + return -ENOENT; + } + + priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np); + if (!priv->num_gphy_fw) + return -ENOENT; + + priv->rcu_regmap = syscon_regmap_lookup_by_phandle(gphy_fw_list_np, + "lantiq,rcu"); + if (IS_ERR(priv->rcu_regmap)) + return PTR_ERR(priv->rcu_regmap); + + priv->gphy_fw = devm_kmalloc_array(dev, priv->num_gphy_fw, + sizeof(*priv->gphy_fw), + GFP_KERNEL | __GFP_ZERO); + if (!priv->gphy_fw) + return -ENOMEM; + + for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) { + err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i], + gphy_fw_np, i); + if (err) + goto remove_gphy; + i++; + } + + return 0; + +remove_gphy: + for (i = 0; i < priv->num_gphy_fw; i++) + gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); + return err; +} + +static int gswip_probe(struct platform_device *pdev) +{ + struct gswip_priv *priv; + struct resource *gswip_res, *mdio_res, *mii_res; + struct device_node *mdio_np, *gphy_fw_np; + struct device *dev = &pdev->dev; + int err; + int i; + u32 version; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + gswip_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->gswip = devm_ioremap_resource(dev, gswip_res); + if (IS_ERR(priv->gswip)) + return PTR_ERR(priv->gswip); + + mdio_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->mdio = devm_ioremap_resource(dev, mdio_res); + if (IS_ERR(priv->mdio)) + return PTR_ERR(priv->mdio); + + mii_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + priv->mii = devm_ioremap_resource(dev, mii_res); + if (IS_ERR(priv->mii)) + return PTR_ERR(priv->mii); + + priv->hw_info = of_device_get_match_data(dev); + if (!priv->hw_info) + return -EINVAL; + + priv->ds = dsa_switch_alloc(dev, priv->hw_info->max_ports); + if (!priv->ds) + return -ENOMEM; + + priv->ds->priv = priv; + priv->ds->ops = &gswip_switch_ops; + priv->dev = dev; + version = gswip_switch_r(priv, GSWIP_VERSION); + + /* bring up the mdio bus */ + gphy_fw_np = of_find_compatible_node(pdev->dev.of_node, NULL, + "lantiq,gphy-fw"); + if (gphy_fw_np) { + err = gswip_gphy_fw_list(priv, gphy_fw_np, version); + if (err) { + dev_err(dev, "gphy fw probe failed\n"); + return err; + } + } + + /* bring up the mdio bus */ + mdio_np = of_find_compatible_node(pdev->dev.of_node, NULL, + "lantiq,xrx200-mdio"); + if (mdio_np) { + err = gswip_mdio(priv, mdio_np); + if (err) { + dev_err(dev, "mdio probe failed\n"); + goto gphy_fw; + } + } + + err = dsa_register_switch(priv->ds); + if (err) { + dev_err(dev, "dsa switch register failed: %i\n", err); + goto mdio_bus; + } + if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) { + dev_err(dev, "wrong CPU port defined, HW only supports port: %i", + priv->hw_info->cpu_port); + err = -EINVAL; + goto mdio_bus; + } + + platform_set_drvdata(pdev, priv); + + dev_info(dev, "probed GSWIP version %lx mod %lx\n", + (version & GSWIP_VERSION_REV_MASK) >> GSWIP_VERSION_REV_SHIFT, + (version & GSWIP_VERSION_MOD_MASK) >> GSWIP_VERSION_MOD_SHIFT); + return 0; + +mdio_bus: + if (mdio_np) + mdiobus_unregister(priv->ds->slave_mii_bus); +gphy_fw: + for (i = 0; i < priv->num_gphy_fw; i++) + gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); + return err; +} + +static int gswip_remove(struct platform_device *pdev) +{ + struct gswip_priv *priv = platform_get_drvdata(pdev); + int i; + + if (!priv) + return 0; + + /* disable the switch */ + gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB); + + dsa_unregister_switch(priv->ds); + + if (priv->ds->slave_mii_bus) + mdiobus_unregister(priv->ds->slave_mii_bus); + + for (i = 0; i < priv->num_gphy_fw; i++) + gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]); + + return 0; +} + +static const struct gswip_hw_info gswip_xrx200 = { + .max_ports = 7, + .cpu_port = 6, +}; + +static const struct of_device_id gswip_of_match[] = { + { .compatible = "lantiq,xrx200-gswip", .data = &gswip_xrx200 }, + {}, +}; +MODULE_DEVICE_TABLE(of, gswip_of_match); + +static struct platform_driver gswip_driver = { + .probe = gswip_probe, + .remove = gswip_remove, + .driver = { + .name = "gswip", + .of_match_table = gswip_of_match, + }, +}; + +module_platform_driver(gswip_driver); + +MODULE_AUTHOR("Hauke Mehrtens <hauke@hauke-m.de>"); +MODULE_DESCRIPTION("Lantiq / Intel GSWIP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/dsa/lantiq_pce.h b/drivers/net/dsa/lantiq_pce.h new file mode 100644 index 000000000000..180663138e75 --- /dev/null +++ b/drivers/net/dsa/lantiq_pce.h @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCE microcode extracted from UGW 7.1.1 switch api + * + * Copyright (c) 2012, 2014, 2015 Lantiq Deutschland GmbH + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> + */ + +enum { + OUT_MAC0 = 0, + OUT_MAC1, + OUT_MAC2, + OUT_MAC3, + OUT_MAC4, + OUT_MAC5, + OUT_ETHTYP, + OUT_VTAG0, + OUT_VTAG1, + OUT_ITAG0, + OUT_ITAG1, /*10 */ + OUT_ITAG2, + OUT_ITAG3, + OUT_IP0, + OUT_IP1, + OUT_IP2, + OUT_IP3, + OUT_SIP0, + OUT_SIP1, + OUT_SIP2, + OUT_SIP3, /*20*/ + OUT_SIP4, + OUT_SIP5, + OUT_SIP6, + OUT_SIP7, + OUT_DIP0, + OUT_DIP1, + OUT_DIP2, + OUT_DIP3, + OUT_DIP4, + OUT_DIP5, /*30*/ + OUT_DIP6, + OUT_DIP7, + OUT_SESID, + OUT_PROT, + OUT_APP0, + OUT_APP1, + OUT_IGMP0, + OUT_IGMP1, + OUT_IPOFF, /*39*/ + OUT_NONE = 63, +}; + +/* parser's microcode length type */ +#define INSTR 0 +#define IPV6 1 +#define LENACCU 2 + +/* parser's microcode flag type */ +enum { + FLAG_ITAG = 0, + FLAG_VLAN, + FLAG_SNAP, + FLAG_PPPOE, + FLAG_IPV6, + FLAG_IPV6FL, + FLAG_IPV4, + FLAG_IGMP, + FLAG_TU, + FLAG_HOP, + FLAG_NN1, /*10 */ + FLAG_NN2, + FLAG_END, + FLAG_NO, /*13*/ +}; + +struct gswip_pce_microcode { + u16 val_3; + u16 val_2; + u16 val_1; + u16 val_0; +}; + +#define MC_ENTRY(val, msk, ns, out, len, type, flags, ipv4_len) \ + { val, msk, ((ns) << 10 | (out) << 4 | (len) >> 1),\ + ((len) & 1) << 15 | (type) << 13 | (flags) << 9 | (ipv4_len) << 8 } +static const struct gswip_pce_microcode gswip_pce_microcode[] = { + /* value mask ns fields L type flags ipv4_len */ + MC_ENTRY(0x88c3, 0xFFFF, 1, OUT_ITAG0, 4, INSTR, FLAG_ITAG, 0), + MC_ENTRY(0x8100, 0xFFFF, 2, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + MC_ENTRY(0x88A8, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + MC_ENTRY(0x8100, 0xFFFF, 1, OUT_VTAG0, 2, INSTR, FLAG_VLAN, 0), + MC_ENTRY(0x8864, 0xFFFF, 17, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0800, 0xFFFF, 21, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x86DD, 0xFFFF, 22, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x8863, 0xFFFF, 16, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0xF800, 10, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0600, 0x0600, 40, OUT_ETHTYP, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 12, OUT_NONE, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0xAAAA, 0xFFFF, 14, OUT_NONE, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0300, 0xFF00, 41, OUT_NONE, 0, INSTR, FLAG_SNAP, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_DIP7, 3, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 18, OUT_DIP7, 3, INSTR, FLAG_PPPOE, 0), + MC_ENTRY(0x0021, 0xFFFF, 21, OUT_NONE, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0057, 0xFFFF, 22, OUT_NONE, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x4000, 0xF000, 24, OUT_IP0, 4, INSTR, FLAG_IPV4, 1), + MC_ENTRY(0x6000, 0xF000, 27, OUT_IP0, 3, INSTR, FLAG_IPV6, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 25, OUT_IP3, 2, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 26, OUT_SIP0, 4, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, LENACCU, FLAG_NO, 0), + MC_ENTRY(0x1100, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0600, 0xFF00, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_HOP, 0), + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN1, 0), + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_IP3, 17, INSTR, FLAG_NN2, 0), + MC_ENTRY(0x0000, 0x0000, 39, OUT_PROT, 1, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x00E0, 35, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_NONE, 0, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_HOP, 0), + MC_ENTRY(0x2B00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN1, 0), + MC_ENTRY(0x3C00, 0xFF00, 33, OUT_NONE, 0, IPV6, FLAG_NN2, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_PROT, 1, IPV6, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 40, OUT_SIP0, 16, INSTR, FLAG_NO, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_APP0, 4, INSTR, FLAG_IGMP, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), + MC_ENTRY(0x0000, 0x0000, 41, OUT_NONE, 0, INSTR, FLAG_END, 0), +}; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8da3d39e3218..78ce820b5257 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -434,7 +434,7 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip) err = request_threaded_irq(chip->irq, NULL, mv88e6xxx_g1_irq_thread_fn, - IRQF_ONESHOT, + IRQF_ONESHOT | IRQF_SHARED, dev_name(chip->dev), chip); if (err) mv88e6xxx_g1_irq_free_common(chip); @@ -575,6 +575,13 @@ restore_link: return err; } +static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port) +{ + struct mv88e6xxx_chip *chip = ds->priv; + + return port < chip->info->num_internal_phys; +} + /* We expect the switch to perform auto negotiation if there is a real * phy. However, in the case of a fixed link phy, we force the port * settings from the fixed link settings. @@ -585,7 +592,8 @@ static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; int err; - if (!phy_is_pseudo_fixed_link(phydev)) + if (!phy_is_pseudo_fixed_link(phydev) && + mv88e6xxx_phy_is_internal(ds, port)) return; mutex_lock(&chip->reg_lock); @@ -709,13 +717,17 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; int speed, duplex, link, pause, err; - if (mode == MLO_AN_PHY) + if ((mode == MLO_AN_PHY) && mv88e6xxx_phy_is_internal(ds, port)) return; if (mode == MLO_AN_FIXED) { link = LINK_FORCED_UP; speed = state->speed; duplex = state->duplex; + } else if (!mv88e6xxx_phy_is_internal(ds, port)) { + link = state->link; + speed = state->speed; + duplex = state->duplex; } else { speed = SPEED_UNFORCED; duplex = DUPLEX_UNFORCED; @@ -3160,6 +3172,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, + .serdes_irq_setup = mv88e6352_serdes_irq_setup, + .serdes_irq_free = mv88e6352_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6352_phylink_validate, }; @@ -3366,6 +3380,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, + .serdes_irq_setup = mv88e6352_serdes_irq_setup, + .serdes_irq_free = mv88e6352_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -3664,6 +3680,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6352_serdes_power, + .serdes_irq_setup = mv88e6352_serdes_irq_setup, + .serdes_irq_free = mv88e6352_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 46af8052e535..152a65d46e0b 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -110,6 +110,9 @@ int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy, err = mv88e6xxx_phy_page_get(chip, phy, page); if (!err) { err = mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page); + if (!err) + err = mv88e6xxx_phy_write(chip, phy, reg, val); + mv88e6xxx_phy_page_put(chip, phy); } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index e82983975754..bb69650ff772 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -185,6 +185,111 @@ int mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, return ARRAY_SIZE(mv88e6352_serdes_hw_stats); } +static void mv88e6352_serdes_irq_link(struct mv88e6xxx_chip *chip, int port) +{ + struct dsa_switch *ds = chip->ds; + u16 status; + bool up; + + mv88e6352_serdes_read(chip, MII_BMSR, &status); + + /* Status must be read twice in order to give the current link + * status. Otherwise the change in link status since the last + * read of the register is returned. + */ + mv88e6352_serdes_read(chip, MII_BMSR, &status); + + up = status & BMSR_LSTATUS; + + dsa_port_phylink_mac_change(ds, port, up); +} + +static irqreturn_t mv88e6352_serdes_thread_fn(int irq, void *dev_id) +{ + struct mv88e6xxx_port *port = dev_id; + struct mv88e6xxx_chip *chip = port->chip; + irqreturn_t ret = IRQ_NONE; + u16 status; + int err; + + mutex_lock(&chip->reg_lock); + + err = mv88e6352_serdes_read(chip, MV88E6352_SERDES_INT_STATUS, &status); + if (err) + goto out; + + if (status & MV88E6352_SERDES_INT_LINK_CHANGE) { + ret = IRQ_HANDLED; + mv88e6352_serdes_irq_link(chip, port->port); + } +out: + mutex_unlock(&chip->reg_lock); + + return ret; +} + +static int mv88e6352_serdes_irq_enable(struct mv88e6xxx_chip *chip) +{ + return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, + MV88E6352_SERDES_INT_LINK_CHANGE); +} + +static int mv88e6352_serdes_irq_disable(struct mv88e6xxx_chip *chip) +{ + return mv88e6352_serdes_write(chip, MV88E6352_SERDES_INT_ENABLE, 0); +} + +int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) +{ + int err; + + if (!mv88e6352_port_has_serdes(chip, port)) + return 0; + + chip->ports[port].serdes_irq = irq_find_mapping(chip->g2_irq.domain, + MV88E6352_SERDES_IRQ); + if (chip->ports[port].serdes_irq < 0) { + dev_err(chip->dev, "Unable to map SERDES irq: %d\n", + chip->ports[port].serdes_irq); + return chip->ports[port].serdes_irq; + } + + /* Requesting the IRQ will trigger irq callbacks. So we cannot + * hold the reg_lock. + */ + mutex_unlock(&chip->reg_lock); + err = request_threaded_irq(chip->ports[port].serdes_irq, NULL, + mv88e6352_serdes_thread_fn, + IRQF_ONESHOT, "mv88e6xxx-serdes", + &chip->ports[port]); + mutex_lock(&chip->reg_lock); + + if (err) { + dev_err(chip->dev, "Unable to request SERDES interrupt: %d\n", + err); + return err; + } + + return mv88e6352_serdes_irq_enable(chip); +} + +void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) +{ + if (!mv88e6352_port_has_serdes(chip, port)) + return; + + mv88e6352_serdes_irq_disable(chip); + + /* Freeing the IRQ will trigger irq callbacks. So we cannot + * hold the reg_lock. + */ + mutex_unlock(&chip->reg_lock); + free_irq(chip->ports[port].serdes_irq, &chip->ports[port]); + mutex_lock(&chip->reg_lock); + + chip->ports[port].serdes_irq = 0; +} + /* Return the SERDES lane address a port is using. Only Ports 9 and 10 * have SERDES lanes. Returns -ENODEV if a port does not have a lane. */ diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index b1496de9c6fe..7870c5a9ef12 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -18,6 +18,19 @@ #define MV88E6352_ADDR_SERDES 0x0f #define MV88E6352_SERDES_PAGE_FIBER 0x01 +#define MV88E6352_SERDES_IRQ 0x0b +#define MV88E6352_SERDES_INT_ENABLE 0x12 +#define MV88E6352_SERDES_INT_SPEED_CHANGE BIT(14) +#define MV88E6352_SERDES_INT_DUPLEX_CHANGE BIT(13) +#define MV88E6352_SERDES_INT_PAGE_RX BIT(12) +#define MV88E6352_SERDES_INT_AN_COMPLETE BIT(11) +#define MV88E6352_SERDES_INT_LINK_CHANGE BIT(10) +#define MV88E6352_SERDES_INT_SYMBOL_ERROR BIT(9) +#define MV88E6352_SERDES_INT_FALSE_CARRIER BIT(8) +#define MV88E6352_SERDES_INT_FIFO_OVER_UNDER BIT(7) +#define MV88E6352_SERDES_INT_FIBRE_ENERGY BIT(4) +#define MV88E6352_SERDES_INT_STATUS 0x13 + #define MV88E6341_ADDR_SERDES 0x15 @@ -73,5 +86,8 @@ int mv88e6390_serdes_irq_enable(struct mv88e6xxx_chip *chip, int port, int lane); int mv88e6390_serdes_irq_disable(struct mv88e6xxx_chip *chip, int port, int lane); +int mv88e6352_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port); +void mv88e6352_serdes_irq_free(struct mv88e6xxx_chip *chip, int port); + #endif diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 2a0ddec1dd56..3dcc61821ed5 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -377,9 +377,7 @@ static int ax_mii_probe(struct net_device *dev) return ret; } - /* mask with MAC supported features */ - phy_dev->supported &= PHY_BASIC_FEATURES; - phy_dev->advertising = phy_dev->supported; + phy_set_max_speed(phy_dev, SPEED_100); netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq); diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c index 32e9627e3880..77191a281866 100644 --- a/drivers/net/ethernet/8390/etherh.c +++ b/drivers/net/ethernet/8390/etherh.c @@ -564,26 +564,29 @@ static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i sizeof(info->bus_info)); } -static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int etherh_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { - cmd->supported = etherh_priv(dev)->supported; - ethtool_cmd_speed_set(cmd, SPEED_10); - cmd->duplex = DUPLEX_HALF; - cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; - cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + etherh_priv(dev)->supported); + cmd->base.speed = SPEED_10; + cmd->base.duplex = DUPLEX_HALF; + cmd->base.port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC; + cmd->base.autoneg = (dev->flags & IFF_AUTOMEDIA ? AUTONEG_ENABLE : + AUTONEG_DISABLE); return 0; } -static int etherh_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int etherh_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) { - switch (cmd->autoneg) { + switch (cmd->base.autoneg) { case AUTONEG_ENABLE: dev->flags |= IFF_AUTOMEDIA; break; case AUTONEG_DISABLE: - switch (cmd->port) { + switch (cmd->base.port) { case PORT_TP: dev->if_port = IF_PORT_10BASET; break; @@ -622,12 +625,12 @@ static void etherh_set_msglevel(struct net_device *dev, u32 v) } static const struct ethtool_ops etherh_ethtool_ops = { - .get_settings = etherh_get_settings, - .set_settings = etherh_set_settings, - .get_drvinfo = etherh_get_drvinfo, - .get_ts_info = ethtool_op_get_ts_info, - .get_msglevel = etherh_get_msglevel, - .set_msglevel = etherh_set_msglevel, + .get_drvinfo = etherh_get_drvinfo, + .get_ts_info = ethtool_op_get_ts_info, + .get_msglevel = etherh_get_msglevel, + .set_msglevel = etherh_set_msglevel, + .get_link_ksettings = etherh_get_link_ksettings, + .set_link_ksettings = etherh_set_link_ksettings, }; static const struct net_device_ops etherh_netdev_ops = { diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 6fde68aa13a4..885e00d17807 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -108,6 +108,13 @@ config LANTIQ_ETOP ---help--- Support for the MII0 inside the Lantiq SoC +config LANTIQ_XRX200 + tristate "Lantiq / Intel xRX200 PMAC network driver" + depends on SOC_TYPE_XWAY + ---help--- + Support for the PMAC of the Gigabit switch (GSWIP) inside the + Lantiq / Intel VRX200 VDSL SoC + source "drivers/net/ethernet/marvell/Kconfig" source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index b45d5f626b59..7b5bf9682066 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o +obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 4309be3724ad..7c9348a26cbb 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1279,9 +1279,9 @@ static int greth_mdio_probe(struct net_device *dev) } if (greth->gbit_mac) - phy->supported &= PHY_GBIT_FEATURES; + phy_set_max_speed(phy, SPEED_1000); else - phy->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phy, SPEED_100); phy->advertising = phy->supported; diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 48220b6c600d..ea34bcb868b5 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3258,19 +3258,11 @@ static int et131x_mii_probe(struct net_device *netdev) return PTR_ERR(phydev); } - phydev->supported &= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_MII | - SUPPORTED_TP); + phy_set_max_speed(phydev, SPEED_100); if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) - phydev->supported |= SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full; + phy_set_max_speed(phydev, SPEED_1000); - phydev->advertising = phydev->supported; phydev->autoneg = AUTONEG_ENABLE; phy_attached_info(phydev); diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 3143de45baaa..e1acafa82214 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -172,8 +172,7 @@ static int emac_mdio_probe(struct net_device *dev) } /* mask with MAC supported features */ - phydev->supported &= PHY_BASIC_FEATURES; - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); db->link = 0; db->speed = 0; diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index baca8f704a45..02921d877c08 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -835,13 +835,10 @@ static int init_phy(struct net_device *dev) } /* Stop Advertising 1000BASE Capability if interface is not GMII - * Note: Checkpatch throws CHECKs for the camel case defines below, - * it's ok to ignore. */ if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) || (priv->phy_iface == PHY_INTERFACE_MODE_RMII)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_100); /* Broken HW is sometimes missing the pull-up resistor on the * MDIO line, which results in reads to non-existent devices returning diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 73ca8879ada7..7c1eb304c27e 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -564,17 +564,7 @@ static int au1000_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } - /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */ - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); aup->old_link = 0; aup->old_speed = 0; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 3ceb4f95ca7c..289129011b9f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -879,8 +879,8 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) phy_write(phy_data->phydev, 0x00, 0x9140); phy_data->phydev->supported = PHY_GBIT_FEATURES; - phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; phy_data->phydev->advertising = phy_data->phydev->supported; + phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, "Finisar PHY quirk in place\n"); @@ -951,8 +951,8 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) phy_write(phy_data->phydev, 0x00, reg & ~0x00800); phy_data->phydev->supported = PHY_GBIT_FEATURES; - phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; phy_data->phydev->advertising = phy_data->phydev->supported; + phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, "BelFuse PHY quirk in place\n"); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c index 4f50f11718f4..78dd09b5beeb 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c @@ -306,45 +306,25 @@ static int xgene_set_pauseparam(struct net_device *ndev, { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; - u32 oldadv, newadv; if (phy_interface_mode_is_rgmii(pdata->phy_mode) || pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) { if (!phydev) return -EINVAL; - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - pp->rx_pause != pp->tx_pause)) + if (!phy_validate_pause(phydev, pp)) return -EINVAL; pdata->pause_autoneg = pp->autoneg; pdata->tx_pause = pp->tx_pause; pdata->rx_pause = pp->rx_pause; - oldadv = phydev->advertising; - newadv = oldadv & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + phy_set_asym_pause(phydev, pp->rx_pause, pp->tx_pause); - if (pp->rx_pause) - newadv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - - if (pp->tx_pause) - newadv ^= ADVERTISED_Asym_Pause; - - if (oldadv ^ newadv) { - phydev->advertising = newadv; - - if (phydev->autoneg) - return phy_start_aneg(phydev); - - if (!pp->autoneg) { - pdata->mac_ops->flowctl_tx(pdata, - pdata->tx_pause); - pdata->mac_ops->flowctl_rx(pdata, - pdata->rx_pause); - } + if (!pp->autoneg) { + pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause); + pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause); } - } else { if (pp->autoneg) return -EINVAL; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 078a04dc1182..e3560311711a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -895,12 +895,10 @@ int xgene_enet_phy_connect(struct net_device *ndev) } pdata->phy_speed = SPEED_UNKNOWN; - phy_dev->supported &= ~SUPPORTED_10baseT_Half & - ~SUPPORTED_100baseT_Half & - ~SUPPORTED_1000baseT_Half; - phy_dev->supported |= SUPPORTED_Pause | - SUPPORTED_Asym_Pause; - phy_dev->advertising = phy_dev->supported; + phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_support_asym_pause(phy_dev); return 0; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index d52b088ff8f0..becb578211ed 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -57,4 +57,9 @@ #define AQ_NIC_RATE_1G BIT(4) #define AQ_NIC_RATE_100M BIT(5) +#define AQ_NIC_RATE_EEE_10G BIT(6) +#define AQ_NIC_RATE_EEE_5G BIT(7) +#define AQ_NIC_RATE_EEE_2GS BIT(8) +#define AQ_NIC_RATE_EEE_1G BIT(9) + #endif /* AQ_COMMON_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 08c9fa6ca71f..6a633c70f603 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -98,8 +98,8 @@ static void aq_ethtool_stats(struct net_device *ndev, struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) + - ARRAY_SIZE(aq_ethtool_queue_stat_names) * - cfg->vecs) * sizeof(u64)); + ARRAY_SIZE(aq_ethtool_queue_stat_names) * + cfg->vecs) * sizeof(u64)); aq_nic_get_stats(aq_nic, data); } @@ -285,6 +285,111 @@ static int aq_ethtool_set_coalesce(struct net_device *ndev, return aq_nic_update_interrupt_moderation_settings(aq_nic); } +static void aq_ethtool_get_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + + wol->supported = WAKE_MAGIC; + wol->wolopts = 0; + + if (cfg->wol) + wol->wolopts |= WAKE_MAGIC; +} + +static int aq_ethtool_set_wol(struct net_device *ndev, + struct ethtool_wolinfo *wol) +{ + struct pci_dev *pdev = to_pci_dev(ndev->dev.parent); + struct aq_nic_s *aq_nic = netdev_priv(ndev); + struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); + int err = 0; + + if (wol->wolopts & WAKE_MAGIC) + cfg->wol |= AQ_NIC_WOL_ENABLED; + else + cfg->wol &= ~AQ_NIC_WOL_ENABLED; + err = device_set_wakeup_enable(&pdev->dev, wol->wolopts); + + return err; +} + +static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed) +{ + u32 rate = 0; + + if (speed & AQ_NIC_RATE_EEE_10G) + rate |= SUPPORTED_10000baseT_Full; + + if (speed & AQ_NIC_RATE_EEE_2GS) + rate |= SUPPORTED_2500baseX_Full; + + if (speed & AQ_NIC_RATE_EEE_1G) + rate |= SUPPORTED_1000baseT_Full; + + return rate; +} + +static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 rate, supported_rates; + int err = 0; + + if (!aq_nic->aq_fw_ops->get_eee_rate) + return -EOPNOTSUPP; + + err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate, + &supported_rates); + if (err < 0) + return err; + + eee->supported = eee_mask_to_ethtool_mask(supported_rates); + + if (aq_nic->aq_nic_cfg.eee_speeds) + eee->advertised = eee->supported; + + eee->lp_advertised = eee_mask_to_ethtool_mask(rate); + + eee->eee_enabled = !!eee->advertised; + + eee->tx_lpi_enabled = eee->eee_enabled; + if (eee->advertised & eee->lp_advertised) + eee->eee_active = true; + + return 0; +} + +static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + u32 rate, supported_rates; + struct aq_nic_cfg_s *cfg; + int err = 0; + + cfg = aq_nic_get_cfg(aq_nic); + + if (unlikely(!aq_nic->aq_fw_ops->get_eee_rate || + !aq_nic->aq_fw_ops->set_eee_rate)) + return -EOPNOTSUPP; + + err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate, + &supported_rates); + if (err < 0) + return err; + + if (eee->eee_enabled) { + rate = supported_rates; + cfg->eee_speeds = rate; + } else { + rate = 0; + cfg->eee_speeds = 0; + } + + return aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate); +} + static int aq_ethtool_nway_reset(struct net_device *ndev) { struct aq_nic_s *aq_nic = netdev_priv(ndev); @@ -403,9 +508,13 @@ const struct ethtool_ops aq_ethtool_ops = { .get_drvinfo = aq_ethtool_get_drvinfo, .get_strings = aq_ethtool_get_strings, .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size, + .get_wol = aq_ethtool_get_wol, + .set_wol = aq_ethtool_set_wol, .nway_reset = aq_ethtool_nway_reset, .get_ringparam = aq_get_ringparam, .set_ringparam = aq_set_ringparam, + .get_eee = aq_ethtool_get_eee, + .set_eee = aq_ethtool_set_eee, .get_pauseparam = aq_ethtool_get_pauseparam, .set_pauseparam = aq_ethtool_set_pauseparam, .get_rxfh_key_size = aq_ethtool_get_rss_key_size, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 5c00671f248d..e8689241204e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -112,7 +112,7 @@ struct aq_hw_s { const struct aq_fw_ops *aq_fw_ops; void __iomem *mmio; struct aq_hw_link_status_s aq_link_status; - struct hw_aq_atl_utils_mbox mbox; + struct hw_atl_utils_mbox mbox; struct hw_atl_stats_s last_stats; struct aq_stats_s curr_stats; u64 speed; @@ -124,7 +124,7 @@ struct aq_hw_s { u32 mbox_addr; u32 rpc_addr; u32 rpc_tid; - struct hw_aq_atl_utils_fw_rpc rpc; + struct hw_atl_utils_fw_rpc rpc; }; struct aq_ring_s; @@ -204,7 +204,6 @@ struct aq_hw_ops { int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); - int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state); }; struct aq_fw_ops { @@ -228,6 +227,14 @@ struct aq_fw_ops { int (*update_stats)(struct aq_hw_s *self); int (*set_flow_control)(struct aq_hw_s *self); + + int (*set_power)(struct aq_hw_s *self, unsigned int power_state, + u8 *mac); + + int (*set_eee_rate)(struct aq_hw_s *self, u32 speed); + + int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate, + u32 *supported_rates); }; #endif /* AQ_HW_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 26dc6782b475..5fed24446687 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -189,7 +189,7 @@ static void aq_nic_polling_timer_cb(struct timer_list *t) aq_vec_isr(i, (void *)aq_vec); mod_timer(&self->polling_timer, jiffies + - AQ_CFG_POLLING_TIMER_INTERVAL); + AQ_CFG_POLLING_TIMER_INTERVAL); } int aq_nic_ndev_register(struct aq_nic_s *self) @@ -301,13 +301,13 @@ int aq_nic_start(struct aq_nic_s *self) unsigned int i = 0U; err = self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, - self->mc_list.ar, - self->mc_list.count); + self->mc_list.ar, + self->mc_list.count); if (err < 0) goto err_exit; err = self->aq_hw_ops->hw_packet_filter_set(self->aq_hw, - self->packet_filter); + self->packet_filter); if (err < 0) goto err_exit; @@ -327,7 +327,7 @@ int aq_nic_start(struct aq_nic_s *self) goto err_exit; timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0); mod_timer(&self->service_timer, jiffies + - AQ_CFG_SERVICE_TIMER_INTERVAL); + AQ_CFG_SERVICE_TIMER_INTERVAL); if (self->aq_nic_cfg.is_polling) { timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0); @@ -344,7 +344,7 @@ int aq_nic_start(struct aq_nic_s *self) } err = self->aq_hw_ops->hw_irq_enable(self->aq_hw, - AQ_CFG_IRQ_MASK); + AQ_CFG_IRQ_MASK); if (err < 0) goto err_exit; } @@ -889,11 +889,13 @@ void aq_nic_deinit(struct aq_nic_s *self) self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) aq_vec_deinit(aq_vec); - if (self->power_state == AQ_HW_POWER_STATE_D0) { - (void)self->aq_fw_ops->deinit(self->aq_hw); - } else { - (void)self->aq_hw_ops->hw_set_power(self->aq_hw, - self->power_state); + self->aq_fw_ops->deinit(self->aq_hw); + + if (self->power_state != AQ_HW_POWER_STATE_D0 || + self->aq_hw->aq_nic_cfg->wol) { + self->aq_fw_ops->set_power(self->aq_hw, + self->power_state, + self->ndev->dev_addr); } err_exit:; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index fecfc401f95d..c1582f4e8e1b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -36,6 +36,7 @@ struct aq_nic_cfg_s { u32 flow_control; u32 link_speed_msk; u32 vlan_id; + u32 wol; u16 is_mc_list_enabled; u16 mc_list_count; bool is_autoneg; @@ -44,6 +45,7 @@ struct aq_nic_cfg_s { bool is_lro; u8 tcs; struct aq_rss_parameters aq_rss; + u32 eee_speeds; }; #define AQ_NIC_FLAG_STARTED 0x00000004U @@ -54,6 +56,8 @@ struct aq_nic_cfg_s { #define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U #define AQ_NIC_FLAG_ERR_HW 0x80000000U +#define AQ_NIC_WOL_ENABLED BIT(0) + #define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index b5f1f62e8e25..3db91446cc67 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -29,8 +29,8 @@ static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, goto err_exit; } self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), - self->size * self->dx_size, - &self->dx_ring_pa, GFP_KERNEL); + self->size * self->dx_size, + &self->dx_ring_pa, GFP_KERNEL); if (!self->dx_ring) { err = -ENOMEM; goto err_exit; @@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self, } /* for single fragment packets use build_skb() */ - if (buff->is_eop) { + if (buff->is_eop && + buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { skb = build_skb(page_address(buff->page), - buff->len + AQ_SKB_ALIGN); + AQ_CFG_RX_FRAME_MAX); if (unlikely(!skb)) { err = -ENOMEM; goto err_exit; @@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self, buff->len - ETH_HLEN, SKB_TRUESIZE(buff->len - ETH_HLEN)); - for (i = 1U, next_ = buff->next, - buff_ = &self->buff_ring[next_]; true; - next_ = buff_->next, - buff_ = &self->buff_ring[next_], ++i) { - skb_add_rx_frag(skb, i, buff_->page, 0, - buff_->len, - SKB_TRUESIZE(buff->len - - ETH_HLEN)); - buff_->is_cleaned = 1; - - if (buff_->is_eop) - break; + if (!buff->is_eop) { + for (i = 1U, next_ = buff->next, + buff_ = &self->buff_ring[next_]; + true; next_ = buff_->next, + buff_ = &self->buff_ring[next_], ++i) { + skb_add_rx_frag(skb, i, + buff_->page, 0, + buff_->len, + SKB_TRUESIZE(buff->len - + ETH_HLEN)); + buff_->is_cleaned = 1; + + if (buff_->is_eop) + break; + } } } diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 97addfa6f895..2469ed4d86b9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c @@ -49,37 +49,37 @@ const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_FIBRE, - .link_speed_msk = HW_ATL_A0_RATE_5G | - HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_A0_RATE_10G | - HW_ATL_A0_RATE_5G | - HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_A0_RATE_5G | - HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = { DEFAULT_A0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_A0_RATE_2G5 | - HW_ATL_A0_RATE_1G | - HW_ATL_A0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; static int hw_atl_a0_hw_reset(struct aq_hw_s *self) @@ -284,7 +284,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self) /* RSS Ring selection */ hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ? - 0xB3333333U : 0x00000000U); + 0xB3333333U : 0x00000000U); /* Multicast filters */ for (i = HW_ATL_A0_MAC_MAX; i--;) { @@ -325,7 +325,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr) } h = (mac_addr[0] << 8) | (mac_addr[1]); l = (mac_addr[2] << 24) | (mac_addr[3] << 16) | - (mac_addr[4] << 8) | mac_addr[5]; + (mac_addr[4] << 8) | mac_addr[5]; hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC); hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC); @@ -519,7 +519,7 @@ static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self, hw_atl_rdm_rx_desc_data_buff_size_set(self, AQ_CFG_RX_FRAME_MAX / 1024U, - aq_ring->idx); + aq_ring->idx); hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx); hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx); @@ -758,7 +758,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self, hw_atl_rpfl2_uc_flr_en_set(self, (self->aq_nic_cfg->is_mc_list_enabled && (i <= self->aq_nic_cfg->mc_list_count)) ? - 1U : 0U, i); + 1U : 0U, i); return aq_hw_err_from_flags(self); } @@ -877,7 +877,6 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_a0 = { .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set, .hw_init = hw_atl_a0_hw_init, - .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_a0_hw_reset, .hw_start = hw_atl_a0_hw_start, .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h index 3c94cff57876..a021dc431ef7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h @@ -62,12 +62,6 @@ #define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU #define HW_ATL_A0_MPI_SPEED_SHIFT 16U -#define HW_ATL_A0_RATE_10G BIT(0) -#define HW_ATL_A0_RATE_5G BIT(1) -#define HW_ATL_A0_RATE_2G5 BIT(3) -#define HW_ATL_A0_RATE_1G BIT(4) -#define HW_ATL_A0_RATE_100M BIT(5) - #define HW_ATL_A0_TXBUF_MAX 160U #define HW_ATL_A0_RXBUF_MAX 320U diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 1d44a386e7d3..76d25d594a0f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -51,38 +51,38 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_FIBRE, - .link_speed_msk = HW_ATL_B0_RATE_10G | - HW_ATL_B0_RATE_5G | - HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_b0_caps_aqc107 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_B0_RATE_10G | - HW_ATL_B0_RATE_5G | - HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_b0_caps_aqc108 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_B0_RATE_5G | - HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_5G | + AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = { DEFAULT_B0_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, - .link_speed_msk = HW_ATL_B0_RATE_2G5 | - HW_ATL_B0_RATE_1G | - HW_ATL_B0_RATE_100M, + .link_speed_msk = AQ_NIC_RATE_2GS | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M, }; static int hw_atl_b0_hw_reset(struct aq_hw_s *self) @@ -935,7 +935,6 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, .hw_init = hw_atl_b0_hw_init, - .hw_set_power = hw_atl_utils_hw_set_power, .hw_reset = hw_atl_b0_hw_reset, .hw_start = hw_atl_b0_hw_start, .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 28568f5fa74b..b318eefd36ae 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h @@ -67,12 +67,6 @@ #define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU #define HW_ATL_B0_MPI_SPEED_SHIFT 16U -#define HW_ATL_B0_RATE_10G BIT(0) -#define HW_ATL_B0_RATE_5G BIT(1) -#define HW_ATL_B0_RATE_2G5 BIT(3) -#define HW_ATL_B0_RATE_1G BIT(4) -#define HW_ATL_B0_RATE_100M BIT(5) - #define HW_ATL_B0_TXBUF_MAX 160U #define HW_ATL_B0_RXBUF_MAX 320U diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 10ba035dadb1..be0a3a90dfad 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -1460,3 +1460,11 @@ void hw_atl_reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, aq_hw_write_reg(aq_hw, HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp), glb_cpu_scratch_scp); } + +void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR, + HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK, + HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT, + up_force_intr); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index dfb426f2dc2c..7056c7342afc 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -698,4 +698,7 @@ void hw_atl_msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe); /* set pci register reset disable */ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); +/* set uP Force Interrupt */ +void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr); + #endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index e0cf70120f1d..716674a9b729 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -2387,4 +2387,17 @@ #define HW_ATL_GLB_CPU_SCRATCH_SCP_ADR(scratch_scp) \ (0x00000300u + (scratch_scp) * 0x4) +/* register address for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_ADR 0x00000404 +/* bitmask for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSK 0x00000002 +/* inverted bitmask for bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_MSKN 0xFFFFFFFD +/* lower bit position of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT 1 +/* width of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_WIDTH 1 +/* default value of bitfield uP Force Interrupt */ +#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0 + #endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index c965e65d07db..0dd59b09060b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -49,6 +49,7 @@ #define FORCE_FLASHLESS 0 static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); + static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); @@ -69,10 +70,10 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_1x_ops; } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, - self->fw_ver_actual) == 0) { + self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_2x_ops; } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, - self->fw_ver_actual) == 0) { + self->fw_ver_actual) == 0) { *fw_ops = &aq_fw_2x_ops; } else { aq_pr_err("Bad FW version detected: %x\n", @@ -260,7 +261,7 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) hw_atl_utils_mpi_set_state(self, MPI_DEINIT); AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) & - HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, + HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, 10, 1000U); } @@ -277,7 +278,7 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, AQ_HW_WAIT_FOR(hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM) == 1U, - 1U, 10000U); + 1U, 10000U); if (err < 0) { bool is_locked; @@ -325,17 +326,31 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p, err = -ETIME; goto err_exit; } + if (IS_CHIP_FEATURE(REVISION_B1)) { + u32 offset = 0; + + for (; offset < cnt; ++offset) { + aq_hw_write_reg(self, 0x328, p[offset]); + aq_hw_write_reg(self, 0x32C, + (0x80000000 | (0xFFFF & (offset * 4)))); + hw_atl_mcp_up_force_intr_set(self, 1); + /* 1000 times by 10us = 10ms */ + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, + 0x32C) & 0xF0000000) != + 0x80000000, + 10, 1000); + } + } else { + u32 offset = 0; - aq_hw_write_reg(self, 0x00000208U, a); - - for (++cnt; --cnt;) { - u32 i = 0U; + aq_hw_write_reg(self, 0x208, a); - aq_hw_write_reg(self, 0x0000020CU, *(p++)); - aq_hw_write_reg(self, 0x00000200U, 0xC000U); + for (; offset < cnt; ++offset) { + aq_hw_write_reg(self, 0x20C, p[offset]); + aq_hw_write_reg(self, 0x200, 0xC000); - for (i = 1024U; - (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, 0x200U) & + 0x100) == 0, 10, 1000); } } @@ -379,7 +394,7 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self, /* check 10 times by 1ms */ AQ_HW_WAIT_FOR(0U != (self->mbox_addr = - aq_hw_read_reg(self, 0x360U)), 1000U, 10U); + aq_hw_read_reg(self, 0x360U)), 1000U, 10U); return err; } @@ -399,7 +414,7 @@ struct aq_hw_atl_utils_fw_rpc_tid_s { #define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL) -static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) +int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) { int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; @@ -411,7 +426,7 @@ static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size) err = hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, (u32 *)(void *)&self->rpc, (rpc_size + sizeof(u32) - - sizeof(u8)) / sizeof(u32)); + sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; @@ -423,8 +438,8 @@ err_exit: return err; } -static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, - struct hw_aq_atl_utils_fw_rpc **rpc) +int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_atl_utils_fw_rpc **rpc) { int err = 0; struct aq_hw_atl_utils_fw_rpc_tid_s sw; @@ -436,7 +451,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, self->rpc_tid = sw.tid; AQ_HW_WAIT_FOR(sw.tid == - (fw.val = + (fw.val = aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), fw.tid), 1000U, 100U); if (err < 0) @@ -459,7 +474,7 @@ static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, (u32 *)(void *) &self->rpc, (fw.len + sizeof(u32) - - sizeof(u8)) / + sizeof(u8)) / sizeof(u32)); if (err < 0) goto err_exit; @@ -489,16 +504,16 @@ err_exit: } int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox_header *pmbox) + struct hw_atl_utils_mbox_header *pmbox) { return hw_atl_utils_fw_downld_dwords(self, - self->mbox_addr, - (u32 *)(void *)pmbox, - sizeof(*pmbox) / sizeof(u32)); + self->mbox_addr, + (u32 *)(void *)pmbox, + sizeof(*pmbox) / sizeof(u32)); } void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox *pmbox) + struct hw_atl_utils_mbox *pmbox) { int err = 0; @@ -538,7 +553,7 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, { int err = 0; u32 transaction_id = 0; - struct hw_aq_atl_utils_mbox_header mbox; + struct hw_atl_utils_mbox_header mbox; u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR); if (state == MPI_RESET) { @@ -547,8 +562,8 @@ static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, transaction_id = mbox.transaction_id; AQ_HW_WAIT_FOR(transaction_id != - (hw_atl_utils_mpi_read_mbox(self, &mbox), - mbox.transaction_id), + (hw_atl_utils_mpi_read_mbox(self, &mbox), + mbox.transaction_id), 1000U, 100U); if (err < 0) goto err_exit; @@ -645,9 +660,9 @@ int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self, if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) { /* chip revision */ - l = 0xE3000000U - | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) - | (0x00 << 16); + l = 0xE3000000U | + (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) | + (0x00 << 16); h = 0x8001300EU; mac[5] = (u8)(0xFFU & l); @@ -730,17 +745,9 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self) return 0; } -int hw_atl_utils_hw_set_power(struct aq_hw_s *self, - unsigned int power_state) -{ - hw_atl_utils_mpi_set_speed(self, 0); - hw_atl_utils_mpi_set_state(self, MPI_POWER); - return 0; -} - int hw_atl_utils_update_stats(struct aq_hw_s *self) { - struct hw_aq_atl_utils_mbox mbox; + struct hw_atl_utils_mbox mbox; hw_atl_utils_mpi_read_stats(self, &mbox); @@ -825,6 +832,81 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version) return 0; } +static int aq_fw1x_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac) +{ + struct hw_atl_utils_fw_rpc *prpc = NULL; + unsigned int rpc_size = 0U; + int err = 0; + + err = hw_atl_utils_fw_rpc_wait(self, &prpc); + if (err < 0) + goto err_exit; + + memset(prpc, 0, sizeof(*prpc)); + + if (wol_enabled) { + rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_wol); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD; + prpc->msg_wol.priority = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR; + prpc->msg_wol.pattern_id = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN; + prpc->msg_wol.wol_packet_type = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT; + + ether_addr_copy((u8 *)&prpc->msg_wol.wol_pattern, mac); + } else { + rpc_size = sizeof(prpc->msg_id) + sizeof(prpc->msg_del_id); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL; + prpc->msg_wol.pattern_id = + HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN; + } + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + +err_exit: + return err; +} + +int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state, + u8 *mac) +{ + struct hw_atl_utils_fw_rpc *prpc = NULL; + unsigned int rpc_size = 0U; + int err = 0; + + if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) { + err = aq_fw1x_set_wol(self, 1, mac); + + if (err < 0) + goto err_exit; + + rpc_size = sizeof(prpc->msg_id) + + sizeof(prpc->msg_enable_wakeup); + + err = hw_atl_utils_fw_rpc_wait(self, &prpc); + + if (err < 0) + goto err_exit; + + memset(prpc, 0, rpc_size); + + prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP; + prpc->msg_enable_wakeup.pattern_mask = 0x00000002; + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + if (err < 0) + goto err_exit; + } + hw_atl_utils_mpi_set_speed(self, 0); + hw_atl_utils_mpi_set_state(self, MPI_POWER); + +err_exit: + return err; +} + const struct aq_fw_ops aq_fw_1x_ops = { .init = hw_atl_utils_mpi_create, .deinit = hw_atl_fw1x_deinit, @@ -834,5 +916,8 @@ const struct aq_fw_ops aq_fw_1x_ops = { .set_state = hw_atl_utils_mpi_set_state, .update_link_status = hw_atl_utils_mpi_get_link_status, .update_stats = hw_atl_utils_update_stats, + .set_power = aq_fw1x_set_power, + .set_eee_rate = NULL, + .get_eee_rate = NULL, .set_flow_control = NULL, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index b875590efcbd..3613fca64b58 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -75,7 +75,7 @@ union __packed ip_addr { } v4; }; -struct __packed hw_aq_atl_utils_fw_rpc { +struct __packed hw_atl_utils_fw_rpc { u32 msg_id; union { @@ -101,8 +101,6 @@ struct __packed hw_aq_atl_utils_fw_rpc { struct { u32 priority; u32 wol_packet_type; - u16 friendly_name_len; - u16 friendly_name[65]; u32 pattern_id; u32 next_wol_pattern_offset; @@ -134,25 +132,112 @@ struct __packed hw_aq_atl_utils_fw_rpc { u32 pattern_offset; u32 pattern_size; } wol_bit_map_pattern; + + struct { + u8 mac_addr[ETH_ALEN]; + } wol_magic_packet_patter; } wol_pattern; } msg_wol; struct { - u32 is_wake_on_link_down; - u32 is_wake_on_link_up; - } msg_wolink; + union { + u32 pattern_mask; + + struct { + u32 reason_arp_v4_pkt : 1; + u32 reason_ipv4_ping_pkt : 1; + u32 reason_ipv6_ns_pkt : 1; + u32 reason_ipv6_ping_pkt : 1; + u32 reason_link_up : 1; + u32 reason_link_down : 1; + u32 reason_maximum : 1; + }; + }; + + union { + u32 offload_mask; + }; + } msg_enable_wakeup; + + struct { + u32 id; + } msg_del_id; }; }; -struct __packed hw_aq_atl_utils_mbox_header { +struct __packed hw_atl_utils_mbox_header { u32 version; u32 transaction_id; u32 error; }; -struct __packed hw_aq_atl_utils_mbox { - struct hw_aq_atl_utils_mbox_header header; +struct __packed hw_aq_info { + u8 reserved[6]; + u16 phy_fault_code; + u16 phy_temperature; + u8 cable_len; + u8 reserved1; + u32 cable_diag_data[4]; + u8 reserved2[32]; + u32 caps_lo; + u32 caps_hi; +}; + +struct __packed hw_atl_utils_mbox { + struct hw_atl_utils_mbox_header header; struct hw_atl_stats_s stats; + struct hw_aq_info info; +}; + +/* fw2x */ +typedef u32 fw_offset_t; + +struct __packed offload_ip_info { + u8 v4_local_addr_count; + u8 v4_addr_count; + u8 v6_local_addr_count; + u8 v6_addr_count; + fw_offset_t v4_addr; + fw_offset_t v4_prefix; + fw_offset_t v6_addr; + fw_offset_t v6_prefix; +}; + +struct __packed offload_port_info { + u16 udp_port_count; + u16 tcp_port_count; + fw_offset_t udp_port; + fw_offset_t tcp_port; +}; + +struct __packed offload_ka_info { + u16 v4_ka_count; + u16 v6_ka_count; + u32 retry_count; + u32 retry_interval; + fw_offset_t v4_ka; + fw_offset_t v6_ka; +}; + +struct __packed offload_rr_info { + u32 rr_count; + u32 rr_buf_len; + fw_offset_t rr_id_x; + fw_offset_t rr_buf; +}; + +struct __packed offload_info { + u32 version; + u32 len; + u8 mac_addr[ETH_ALEN]; + + u8 reserved[2]; + + struct offload_ip_info ips; + struct offload_port_info ports; + struct offload_ka_info kas; + struct offload_rr_info rrs; + u8 buf[0]; }; #define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U @@ -181,6 +266,21 @@ enum hal_atl_utils_fw_state_e { #define HAL_ATLANTIC_RATE_100M BIT(5) #define HAL_ATLANTIC_RATE_INVALID BIT(6) +#define HAL_ATLANTIC_UTILS_FW_MSG_PING 0x1U +#define HAL_ATLANTIC_UTILS_FW_MSG_ARP 0x2U +#define HAL_ATLANTIC_UTILS_FW_MSG_INJECT 0x3U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD 0x4U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR 0x10000000U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN 0x1U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT 0x2U +#define HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL 0x5U +#define HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP 0x6U +#define HAL_ATLANTIC_UTILS_FW_MSG_MSM_PFC 0x7U +#define HAL_ATLANTIC_UTILS_FW_MSG_PROVISIONING 0x8U +#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_ADD 0x9U +#define HAL_ATLANTIC_UTILS_FW_MSG_OFFLOAD_DEL 0xAU +#define HAL_ATLANTIC_UTILS_FW_MSG_CABLE_DIAG 0xDU + enum hw_atl_fw2x_rate { FW2X_RATE_100M = 0x20, FW2X_RATE_1G = 0x100, @@ -286,10 +386,10 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self); void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox_header *pmbox); + struct hw_atl_utils_mbox_header *pmbox); void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, - struct hw_aq_atl_utils_mbox *pmbox); + struct hw_atl_utils_mbox *pmbox); void hw_atl_utils_mpi_set(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state, @@ -316,9 +416,17 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); int hw_atl_utils_update_stats(struct aq_hw_s *self); struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self); + int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a, u32 *p, u32 cnt); +int hw_atl_utils_fw_set_wol(struct aq_hw_s *self, bool wol_enabled, u8 *mac); + +int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size); + +int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, + struct hw_atl_utils_fw_rpc **rpc); + extern const struct aq_fw_ops aq_fw_1x_ops; extern const struct aq_fw_ops aq_fw_2x_ops; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c index e37943760a58..c0568465e10b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c @@ -16,11 +16,13 @@ #include "../aq_pci_func.h" #include "../aq_ring.h" #include "../aq_vec.h" +#include "../aq_nic.h" #include "hw_atl_utils.h" #include "hw_atl_llh.h" #define HW_ATL_FW2X_MPI_EFUSE_ADDR 0x364 #define HW_ATL_FW2X_MPI_MBOX_ADDR 0x360 +#define HW_ATL_FW2X_MPI_RPC_ADDR 0x334 #define HW_ATL_FW2X_MPI_CONTROL_ADDR 0x368 #define HW_ATL_FW2X_MPI_CONTROL2_ADDR 0x36C @@ -28,6 +30,42 @@ #define HW_ATL_FW2X_MPI_STATE_ADDR 0x370 #define HW_ATL_FW2X_MPI_STATE2_ADDR 0x374 +#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY) +#define HW_ATL_FW2X_CAP_WOL BIT(CAPS_HI_WOL) + +#define HW_ATL_FW2X_CTRL_SLEEP_PROXY BIT(CTRL_SLEEP_PROXY) +#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL) +#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP) +#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE) +#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE) +#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT) + +#define HW_ATL_FW2X_CAP_EEE_1G_MASK BIT(CAPS_HI_1000BASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_2G5_MASK BIT(CAPS_HI_2P5GBASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_5G_MASK BIT(CAPS_HI_5GBASET_FD_EEE) +#define HW_ATL_FW2X_CAP_EEE_10G_MASK BIT(CAPS_HI_10GBASET_FD_EEE) + +#define HAL_ATLANTIC_WOL_FILTERS_COUNT 8 +#define HAL_ATLANTIC_UTILS_FW2X_MSG_WOL 0x0E + +struct __packed fw2x_msg_wol_pattern { + u8 mask[16]; + u32 crc; +}; + +struct __packed fw2x_msg_wol { + u32 msg_id; + u8 hw_addr[ETH_ALEN]; + u8 magic_packet_enabled; + u8 filter_count; + struct fw2x_msg_wol_pattern filter[HAL_ATLANTIC_WOL_FILTERS_COUNT]; + u8 link_up_enabled; + u8 link_down_enabled; + u16 reserved; + u32 link_up_timeout; + u32 link_down_timeout; +}; + static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed); static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state); @@ -38,8 +76,12 @@ static int aq_fw2x_init(struct aq_hw_s *self) /* check 10 times by 1ms */ AQ_HW_WAIT_FOR(0U != (self->mbox_addr = - aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)), + aq_hw_read_reg(self, HW_ATL_FW2X_MPI_MBOX_ADDR)), 1000U, 10U); + AQ_HW_WAIT_FOR(0U != (self->rpc_addr = + aq_hw_read_reg(self, HW_ATL_FW2X_MPI_RPC_ADDR)), + 1000U, 100U); + return err; } @@ -78,6 +120,38 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed) return rate; } +static u32 fw2x_to_eee_mask(u32 speed) +{ + u32 rate = 0; + + if (speed & HW_ATL_FW2X_CAP_EEE_10G_MASK) + rate |= AQ_NIC_RATE_EEE_10G; + if (speed & HW_ATL_FW2X_CAP_EEE_5G_MASK) + rate |= AQ_NIC_RATE_EEE_5G; + if (speed & HW_ATL_FW2X_CAP_EEE_2G5_MASK) + rate |= AQ_NIC_RATE_EEE_2GS; + if (speed & HW_ATL_FW2X_CAP_EEE_1G_MASK) + rate |= AQ_NIC_RATE_EEE_1G; + + return rate; +} + +static u32 eee_mask_to_fw2x(u32 speed) +{ + u32 rate = 0; + + if (speed & AQ_NIC_RATE_EEE_10G) + rate |= HW_ATL_FW2X_CAP_EEE_10G_MASK; + if (speed & AQ_NIC_RATE_EEE_5G) + rate |= HW_ATL_FW2X_CAP_EEE_5G_MASK; + if (speed & AQ_NIC_RATE_EEE_2GS) + rate |= HW_ATL_FW2X_CAP_EEE_2G5_MASK; + if (speed & AQ_NIC_RATE_EEE_1G) + rate |= HW_ATL_FW2X_CAP_EEE_1G_MASK; + + return rate; +} + static int aq_fw2x_set_link_speed(struct aq_hw_s *self, u32 speed) { u32 val = link_speed_mask_2fw2x_ratemask(speed); @@ -100,14 +174,27 @@ static void aq_fw2x_set_mpi_flow_control(struct aq_hw_s *self, u32 *mpi_state) *mpi_state &= ~BIT(CAPS_HI_ASYMMETRIC_PAUSE); } +static void aq_fw2x_upd_eee_rate_bits(struct aq_hw_s *self, u32 *mpi_opts, + u32 eee_speeds) +{ + *mpi_opts &= ~(HW_ATL_FW2X_CAP_EEE_1G_MASK | + HW_ATL_FW2X_CAP_EEE_2G5_MASK | + HW_ATL_FW2X_CAP_EEE_5G_MASK | + HW_ATL_FW2X_CAP_EEE_10G_MASK); + + *mpi_opts |= eee_mask_to_fw2x(eee_speeds); +} + static int aq_fw2x_set_state(struct aq_hw_s *self, enum hal_atl_utils_fw_state_e state) { u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; switch (state) { case MPI_INIT: mpi_state &= ~BIT(CAPS_HI_LINK_DROP); + aq_fw2x_upd_eee_rate_bits(self, &mpi_state, cfg->eee_speeds); aq_fw2x_set_mpi_flow_control(self, &mpi_state); break; case MPI_DEINIT: @@ -126,7 +213,7 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self) { u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE_ADDR); u32 speed = mpi_state & (FW2X_RATE_100M | FW2X_RATE_1G | - FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G); + FW2X_RATE_2G5 | FW2X_RATE_5G | FW2X_RATE_10G); struct aq_hw_link_status_s *link_status = &self->aq_link_status; if (speed) { @@ -175,9 +262,7 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) get_random_bytes(&rnd, sizeof(unsigned int)); - l = 0xE3000000U - | (0xFFFFU & rnd) - | (0x00 << 16); + l = 0xE3000000U | (0xFFFFU & rnd) | (0x00 << 16); h = 0x8001300EU; mac[5] = (u8)(0xFFU & l); @@ -194,7 +279,7 @@ static int aq_fw2x_get_mac_permanent(struct aq_hw_s *self, u8 *mac) return err; } -static int aq_fw2x_update_stats(struct aq_hw_s *self) +int aq_fw2x_update_stats(struct aq_hw_s *self) { int err = 0; u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); @@ -207,7 +292,7 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) /* Wait FW to report back */ AQ_HW_WAIT_FOR(orig_stats_val != (aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & - BIT(CAPS_HI_STATISTICS)), + BIT(CAPS_HI_STATISTICS)), 1U, 10000U); if (err) return err; @@ -215,6 +300,135 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self) return hw_atl_utils_update_stats(self); } +static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac) +{ + struct hw_atl_utils_fw_rpc *rpc = NULL; + struct offload_info *cfg = NULL; + unsigned int rpc_size = 0U; + u32 mpi_opts; + int err = 0; + + rpc_size = sizeof(rpc->msg_id) + sizeof(*cfg); + + err = hw_atl_utils_fw_rpc_wait(self, &rpc); + if (err < 0) + goto err_exit; + + memset(rpc, 0, rpc_size); + cfg = (struct offload_info *)(&rpc->msg_id + 1); + + memcpy(cfg->mac_addr, mac, ETH_ALEN); + cfg->len = sizeof(*cfg); + + /* Clear bit 0x36C.23 and 0x36C.22 */ + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + mpi_opts &= ~HW_ATL_FW2X_CTRL_SLEEP_PROXY; + mpi_opts &= ~HW_ATL_FW2X_CTRL_LINK_DROP; + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + err = hw_atl_utils_fw_rpc_call(self, rpc_size); + if (err < 0) + goto err_exit; + + /* Set bit 0x36C.23 */ + mpi_opts |= HW_ATL_FW2X_CTRL_SLEEP_PROXY; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + HW_ATL_FW2X_CTRL_SLEEP_PROXY), 1U, 10000U); + +err_exit: + return err; +} + +static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac) +{ + struct hw_atl_utils_fw_rpc *rpc = NULL; + struct fw2x_msg_wol *msg = NULL; + u32 mpi_opts; + int err = 0; + + err = hw_atl_utils_fw_rpc_wait(self, &rpc); + if (err < 0) + goto err_exit; + + msg = (struct fw2x_msg_wol *)rpc; + + msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL; + msg->magic_packet_enabled = true; + memcpy(msg->hw_addr, mac, ETH_ALEN); + + mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + mpi_opts &= ~(HW_ATL_FW2X_CTRL_SLEEP_PROXY | HW_ATL_FW2X_CTRL_WOL); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + err = hw_atl_utils_fw_rpc_call(self, sizeof(*msg)); + if (err < 0) + goto err_exit; + + /* Set bit 0x36C.24 */ + mpi_opts |= HW_ATL_FW2X_CTRL_WOL; + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR) & + HW_ATL_FW2X_CTRL_WOL), 1U, 10000U); + +err_exit: + return err; +} + +static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state, + u8 *mac) +{ + int err = 0; + + if (self->aq_nic_cfg->wol & AQ_NIC_WOL_ENABLED) { + err = aq_fw2x_set_sleep_proxy(self, mac); + if (err < 0) + goto err_exit; + err = aq_fw2x_set_wol_params(self, mac); + } + +err_exit: + return err; +} + +static int aq_fw2x_set_eee_rate(struct aq_hw_s *self, u32 speed) +{ + u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); + + aq_fw2x_upd_eee_rate_bits(self, &mpi_opts, speed); + + aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts); + + return 0; +} + +static int aq_fw2x_get_eee_rate(struct aq_hw_s *self, u32 *rate, + u32 *supported_rates) +{ + u32 mpi_state; + u32 caps_hi; + int err = 0; + u32 addr = self->mbox_addr + offsetof(struct hw_atl_utils_mbox, info) + + offsetof(struct hw_aq_info, caps_hi); + + err = hw_atl_utils_fw_downld_dwords(self, addr, &caps_hi, + sizeof(caps_hi) / sizeof(u32)); + + if (err) + return err; + + *supported_rates = fw2x_to_eee_mask(caps_hi); + + mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR); + *rate = fw2x_to_eee_mask(mpi_state); + + return err; +} + static int aq_fw2x_renegotiate(struct aq_hw_s *self) { u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR); @@ -247,5 +461,8 @@ const struct aq_fw_ops aq_fw_2x_ops = { .set_state = aq_fw2x_set_state, .update_link_status = aq_fw2x_update_link_status, .update_stats = aq_fw2x_update_stats, - .set_flow_control = aq_fw2x_set_flow_control, + .set_power = aq_fw2x_set_power, + .set_eee_rate = aq_fw2x_set_eee_rate, + .get_eee_rate = aq_fw2x_get_eee_rate, + .set_flow_control = aq_fw2x_set_flow_control, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index 94efc6477bdc..b48260114da3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h @@ -12,7 +12,7 @@ #define NIC_MAJOR_DRIVER_VERSION 2 #define NIC_MINOR_DRIVER_VERSION 0 -#define NIC_BUILD_DRIVER_VERSION 3 +#define NIC_BUILD_DRIVER_VERSION 4 #define NIC_REVISION_DRIVER_VERSION 0 #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index c8d1f8fa4713..6f56276015a4 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -935,18 +935,11 @@ static void nb8800_pause_adv(struct net_device *dev) { struct nb8800_priv *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; - u32 adv = 0; if (!phydev) return; - if (priv->pause_rx) - adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (priv->pause_tx) - adv ^= ADVERTISED_Asym_Pause; - - phydev->supported |= adv; - phydev->advertising |= adv; + phy_set_asym_pause(phydev, priv->pause_rx, priv->pause_tx); } static int nb8800_open(struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 897302adc38e..02e7dfc1a2ef 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -890,19 +890,10 @@ static int bcm_enet_open(struct net_device *dev) } /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_Pause | - SUPPORTED_MII); - phydev->advertising = phydev->supported; - - if (priv->pause_auto && priv->pause_rx && priv->pause_tx) - phydev->advertising |= SUPPORTED_Pause; - else - phydev->advertising &= ~SUPPORTED_Pause; + phy_support_sym_pause(phydev); + phy_set_max_speed(phydev, SPEED_100); + phy_set_sym_pause(phydev, priv->pause_rx, priv->pause_rx, + priv->pause_auto); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 4c94d9218bba..cabc8e49ad24 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -616,7 +616,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; int size; /* ring size: different for Tx and Rx */ - int err; int i; BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); @@ -666,7 +665,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) if (!ring->cpu_base) { dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n", ring->mmio_base); - err = -ENOMEM; goto err_dma_free; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 092c817f8f11..f4ba9b3f8819 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -181,7 +181,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, struct bnxt_tc_flow *flow) { struct flow_dissector *dissector = tc_flow_cmd->dissector; - u16 addr_type = 0; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || @@ -191,13 +190,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, return -EOPNOTSUPP; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); @@ -293,13 +285,6 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, flow->l4_mask.icmp.code = mask->code; } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { - struct flow_dissector_key_control *key = - GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); - - addr_type = key->addr_type; - } - if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index e31f5d803c13..b574fe8e974e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -209,9 +209,7 @@ struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) { struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev); - struct bnxt_vf_rep_stats *rx_stats; - rx_stats = &vf_rep->rx_stats; vf_rep->rx_stats.bytes += skb->len; vf_rep->rx_stats.packets++; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 4241ae928d4a..b756fc79424e 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -214,7 +214,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) case PHY_INTERFACE_MODE_MII: phy_name = "external MII"; - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); bcmgenet_sys_writel(priv, PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); break; @@ -226,11 +226,10 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) * capabilities, use that knowledge to also configure the * Reverse MII interface correctly. */ - if ((dev->phydev->supported & PHY_BASIC_FEATURES) == - PHY_BASIC_FEATURES) - port_ctrl = PORT_MODE_EXT_RVMII_25; - else + if (dev->phydev->supported & PHY_1000BT_FEATURES) port_ctrl = PORT_MODE_EXT_RVMII_50; + else + port_ctrl = PORT_MODE_EXT_RVMII_25; bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL); break; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index ef4a0c326736..c44cff54262c 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -156,7 +156,7 @@ enum sbmac_state { (d)->sbdma_dscrtable : (d)->f+1) -#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES) +#define NUMCACHEBLKS(x) DIV_ROUND_UP(x, SMP_CACHE_BYTES) #define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_RXDESCR 256 @@ -2357,21 +2357,11 @@ static int sbmac_mii_probe(struct net_device *dev) } /* Remove any features not supported by the controller */ - phy_dev->supported &= SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_MII | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause; + phy_set_max_speed(phy_dev, SPEED_1000); + phy_support_asym_pause(phy_dev); phy_attached_info(phy_dev); - phy_dev->advertising = phy_dev->supported; - sc->phy_dev = phy_dev; return 0; diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index e6f28c7942ab..89295306f161 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -1598,7 +1598,7 @@ static int tg3_mdio_init(struct tg3 *tp) phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE; if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN)) phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE; - /* fallthru */ + /* fall through */ case PHY_ID_RTL8211C: phydev->interface = PHY_INTERFACE_MODE_RGMII; break; @@ -2122,16 +2122,14 @@ static int tg3_phy_init(struct tg3 *tp) case PHY_INTERFACE_MODE_GMII: case PHY_INTERFACE_MODE_RGMII: if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) { - phydev->supported &= (PHY_GBIT_FEATURES | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); + phy_set_max_speed(phydev, SPEED_1000); + phy_support_asym_pause(phydev); break; } - /* fallthru */ + /* fall through */ case PHY_INTERFACE_MODE_MII: - phydev->supported &= (PHY_BASIC_FEATURES | - SUPPORTED_Pause | - SUPPORTED_Asym_Pause); + phy_set_max_speed(phydev, SPEED_100); + phy_support_asym_pause(phydev); break; default: phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)); @@ -2140,8 +2138,6 @@ static int tg3_phy_init(struct tg3 *tp) tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED; - phydev->advertising = phydev->supported; - phy_attached_info(phydev); return 0; @@ -5215,7 +5211,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) ap->state = ANEG_STATE_AN_ENABLE; - /* fallthru */ + /* fall through */ case ANEG_STATE_AN_ENABLE: ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX); if (ap->flags & MR_AN_ENABLE) { @@ -5245,7 +5241,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ret = ANEG_TIMER_ENAB; ap->state = ANEG_STATE_RESTART; - /* fallthru */ + /* fall through */ case ANEG_STATE_RESTART: delta = ap->cur_time - ap->link_time; if (delta > ANEG_STATE_SETTLE_TIME) @@ -5288,7 +5284,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp, ap->state = ANEG_STATE_ACK_DETECT; - /* fallthru */ + /* fall through */ case ANEG_STATE_ACK_DETECT: if (ap->ack_match != 0) { if ((ap->rxconfig & ~ANEG_CFG_ACK) == @@ -12496,31 +12492,24 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam tg3_warn_mgmt_link_flap(tp); if (tg3_flag(tp, USE_PHYLIB)) { - u32 newadv; struct phy_device *phydev; phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - (epause->rx_pause != epause->tx_pause))) + if (!phy_validate_pause(phydev, epause)) return -EINVAL; tp->link_config.flowctrl = 0; + phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); if (epause->rx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_RX; if (epause->tx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_TX; - newadv = ADVERTISED_Pause; - } else - newadv = ADVERTISED_Pause | - ADVERTISED_Asym_Pause; + } } else if (epause->tx_pause) { tp->link_config.flowctrl |= FLOW_CTRL_TX; - newadv = ADVERTISED_Asym_Pause; - } else - newadv = 0; + } if (epause->autoneg) tg3_flag_set(tp, PAUSE_AUTONEG); @@ -12528,33 +12517,19 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam tg3_flag_clear(tp, PAUSE_AUTONEG); if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) { - u32 oldadv = phydev->advertising & - (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (oldadv != newadv) { - phydev->advertising &= - ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - phydev->advertising |= newadv; - if (phydev->autoneg) { - /* - * Always renegotiate the link to - * inform our link partner of our - * flow control settings, even if the - * flow control is forced. Let - * tg3_adjust_link() do the final - * flow control setup. - */ - return phy_start_aneg(phydev); - } + if (phydev->autoneg) { + /* phy_set_asym_pause() will + * renegotiate the link to inform our + * link partner of our flow control + * settings, even if the flow control + * is forced. Let tg3_adjust_link() + * do the final flow control setup. + */ + return 0; } if (!epause->autoneg) tg3_setup_flow_control(tp, 0, 0); - } else { - tp->link_config.advertising &= - ~(ADVERTISED_Pause | - ADVERTISED_Asym_Pause); - tp->link_config.advertising |= newadv; } } else { int irq_sync = 0; @@ -14013,7 +13988,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SIOCGMIIPHY: data->phy_id = tp->phy_addr; - /* fallthru */ + /* fall through */ case SIOCGMIIREG: { u32 mii_regval; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index f1a86b422617..7fddf76e6ead 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -544,14 +544,13 @@ static int macb_mii_probe(struct net_device *dev) /* mask with MAC supported features */ if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE) - phydev->supported &= PHY_GBIT_FEATURES; + phy_set_max_speed(phydev, SPEED_1000); else - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF) - phydev->supported &= ~SUPPORTED_1000baseT_Half; - - phydev->advertising = phydev->supported; + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); bp->link = 0; bp->speed = 0; diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c index 962bb62933db..fda49404968c 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -616,7 +616,7 @@ static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag) int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) { struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip; - u32 rings_per_vf, ring_flag; + u32 rings_per_vf; u64 reg_val; if (octeon_map_pci_barx(oct, 0, 0)) @@ -634,8 +634,6 @@ int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; - ring_flag = 0; - cn23xx->conf = oct_get_config_info(oct, LIO_23XX); if (!cn23xx->conf) { dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n", diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 8093c5eafea2..eb96b0613cf6 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -32,38 +32,6 @@ #define OCTNIC_MAX_SG MAX_SKB_FRAGS /** - * \brief Callback for getting interface configuration - * @param status status of request - * @param buf pointer to resp structure - */ -void lio_if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_context *ctx; - struct liquidio_if_cfg_resp *resp; - - resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (resp->status) - dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", - CVM_CAST64(resp->status)); - WRITE_ONCE(ctx->cond, 1); - - snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", - resp->cfg_info.liquidio_firmware_version); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - -/** * \brief Delete gather lists * @param lio per-network private data */ @@ -198,14 +166,15 @@ int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) nctrl.ncmd.s.cmd = cmd; nctrl.ncmd.s.param1 = param1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -285,15 +254,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) struct octeon_device *oct = lio->oct_dev; u8 *mac; - if (nctrl->completion && nctrl->response_code) { - /* Signal whoever is interested that the response code from the - * firmware has arrived. - */ - WRITE_ONCE(*nctrl->response_code, nctrl->status); - complete(nctrl->completion); - } - - if (nctrl->status) + if (nctrl->sc_status) return; switch (nctrl->ncmd.s.cmd) { @@ -464,56 +425,73 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac) */ } +void octeon_schedule_rxq_oom_work(struct octeon_device *oct, + struct octeon_droq *droq) +{ + struct net_device *netdev = oct->props[0].netdev; + struct lio *lio = GET_LIO(netdev); + struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; + + queue_delayed_work(wq->wq, &wq->wk.work, + msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); +} + static void octnet_poll_check_rxq_oom_status(struct work_struct *work) { struct cavium_wk *wk = (struct cavium_wk *)work; struct lio *lio = (struct lio *)wk->ctxptr; struct octeon_device *oct = lio->oct_dev; - struct octeon_droq *droq; - int q, q_no = 0; + int q_no = wk->ctxul; + struct octeon_droq *droq = oct->droq[q_no]; - if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) { - for (q = 0; q < lio->linfo.num_rxpciq; q++) { - q_no = lio->linfo.rxpciq[q].s.q_no; - droq = oct->droq[q_no]; - if (!droq) - continue; - octeon_droq_check_oom(droq); - } - } - queue_delayed_work(lio->rxq_status_wq.wq, - &lio->rxq_status_wq.wk.work, - msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq) + return; + + if (octeon_retry_droq_refill(droq)) + octeon_schedule_rxq_oom_work(oct, droq); } int setup_rx_oom_poll_fn(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct cavium_wq *wq; + int q, q_no; - lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status", - WQ_MEM_RECLAIM, 0); - if (!lio->rxq_status_wq.wq) { - dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); - return -ENOMEM; + for (q = 0; q < oct->num_oqs; q++) { + q_no = lio->linfo.rxpciq[q].s.q_no; + wq = &lio->rxq_status_wq[q_no]; + wq->wq = alloc_workqueue("rxq-oom-status", + WQ_MEM_RECLAIM, 0); + if (!wq->wq) { + dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&wq->wk.work, + octnet_poll_check_rxq_oom_status); + wq->wk.ctxptr = lio; + wq->wk.ctxul = q_no; } - INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work, - octnet_poll_check_rxq_oom_status); - lio->rxq_status_wq.wk.ctxptr = lio; - queue_delayed_work(lio->rxq_status_wq.wq, - &lio->rxq_status_wq.wk.work, - msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); + return 0; } void cleanup_rx_oom_poll_fn(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); - - if (lio->rxq_status_wq.wq) { - cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work); - flush_workqueue(lio->rxq_status_wq.wq); - destroy_workqueue(lio->rxq_status_wq.wq); + struct octeon_device *oct = lio->oct_dev; + struct cavium_wq *wq; + int q_no; + + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + wq = &lio->rxq_status_wq[q_no]; + if (wq->wq) { + cancel_delayed_work_sync(&wq->wk.work); + flush_workqueue(wq->wq); + destroy_workqueue(wq->wq); + wq->wq = NULL; + } } } @@ -1218,30 +1196,6 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) return 0; } -static void liquidio_change_mtu_completion(struct octeon_device *oct, - u32 status, void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_if_cfg_context *ctx; - - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - - if (status) { - dev_err(&oct->pci_dev->dev, "MTU change failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_FAIL); - } else { - WRITE_ONCE(ctx->cond, LIO_CHANGE_MTU_SUCCESS); - } - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - /** * \brief Net device change_mtu * @param netdev network device @@ -1250,22 +1204,17 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - struct liquidio_if_cfg_context *ctx; struct octeon_soft_command *sc; union octnet_cmd *ncmd; - int ctx_size; int ret = 0; - ctx_size = sizeof(struct liquidio_if_cfg_context); sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, ctx_size); + octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0); ncmd = (union octnet_cmd *)sc->virtdptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ncmd->u64 = 0; ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU; @@ -1278,28 +1227,28 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = liquidio_change_mtu_completion; - sc->callback_arg = sc; - sc->wait_time = 100; - ret = octeon_send_soft_command(oct, sc); if (ret == IQ_SEND_FAILED) { netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n"); + octeon_free_soft_command(oct, sc); return -EINVAL; } /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR || - ctx->cond == LIO_CHANGE_MTU_FAIL) { - octeon_free_soft_command(oct, sc); + ret = wait_for_sc_completion_timeout(oct, sc, 0); + if (ret) + return ret; + + if (sc->sc_status) { + WRITE_ONCE(sc->caller_is_done, true); return -EINVAL; } netdev->mtu = new_mtu; lio->mtu = new_mtu; - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); return 0; } @@ -1333,8 +1282,6 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)sc->virtrptr; - struct oct_nic_stats_ctrl *ctrl = - (struct oct_nic_stats_ctrl *)sc->ctxptr; struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; @@ -1422,93 +1369,148 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, resp->status = 1; } else { + dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n"); resp->status = -1; } - complete(&ctrl->complete); } -int octnet_get_link_stats(struct net_device *netdev) +static int lio_fetch_vf_stats(struct lio *lio) { - struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; struct octeon_soft_command *sc; - struct oct_nic_stats_ctrl *ctrl; - struct oct_nic_stats_resp *resp; + struct oct_nic_vf_stats_resp *resp; + int retval; /* Alloc soft command */ sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, 0, - sizeof(struct oct_nic_stats_resp), - sizeof(struct octnic_ctrl_pkt)); + sizeof(struct oct_nic_vf_stats_resp), + 0); - if (!sc) - return -ENOMEM; + if (!sc) { + dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n"); + retval = -ENOMEM; + goto lio_fetch_vf_stats_exit; + } - resp = (struct oct_nic_stats_resp *)sc->virtrptr; - memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp)); - ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr; - memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl)); - ctrl->netdev = netdev; - init_completion(&ctrl->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; sc->iq_no = lio->linfo.txpciq[0].s.q_no; octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, - OPCODE_NIC_PORT_STATS, 0, 0, 0); - - sc->callback = octnet_nic_stats_callback; - sc->callback_arg = sc; - sc->wait_time = 500; /*in milli seconds*/ + OPCODE_NIC_VF_PORT_STATS, 0, 0, 0); retval = octeon_send_soft_command(oct_dev, sc); if (retval == IQ_SEND_FAILED) { octeon_free_soft_command(oct_dev, sc); - return -EINVAL; + goto lio_fetch_vf_stats_exit; } - wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000)); + retval = + wait_for_sc_completion_timeout(oct_dev, sc, + (2 * LIO_SC_MAX_TMO_MS)); + if (retval) { + dev_err(&oct_dev->pci_dev->dev, + "sc OPCODE_NIC_VF_PORT_STATS command failed\n"); + goto lio_fetch_vf_stats_exit; + } - if (resp->status != 1) { - octeon_free_soft_command(oct_dev, sc); + if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) { + octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt, + (sizeof(u64)) >> 3); - return -EINVAL; + if (resp->spoofmac_cnt != 0) { + dev_warn(&oct_dev->pci_dev->dev, + "%llu Spoofed packets detected\n", + resp->spoofmac_cnt); + } } + WRITE_ONCE(sc->caller_is_done, 1); - octeon_free_soft_command(oct_dev, sc); - - return 0; +lio_fetch_vf_stats_exit: + return retval; } -static void liquidio_nic_seapi_ctl_callback(struct octeon_device *oct, - u32 status, - void *buf) +void lio_fetch_stats(struct work_struct *work) { - struct liquidio_nic_seapi_ctl_context *ctx; - struct octeon_soft_command *sc = buf; + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = wk->ctxptr; + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_soft_command *sc; + struct oct_nic_stats_resp *resp; + unsigned long time_in_jiffies; + int retval; + + if (OCTEON_CN23XX_PF(oct_dev)) { + /* report spoofchk every 2 seconds */ + if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) && + (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) && + oct_dev->sriov_info.num_vfs_alloced) { + lio_fetch_vf_stats(lio); + } - ctx = sc->ctxptr; + oct_dev->vfstats_poll++; + } + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_nic_stats_resp), + 0); + + if (!sc) { + dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n"); + goto lio_fetch_stats_exit; + } + + resp = (struct oct_nic_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; - oct = lio_get_device(ctx->octeon_id); - if (status) { - dev_err(&oct->pci_dev->dev, "%s: instruction failed. Status: %llx\n", - __func__, - CVM_CAST64(status)); + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_PORT_STATS, 0, 0, 0); + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + goto lio_fetch_stats_exit; + } + + retval = wait_for_sc_completion_timeout(oct_dev, sc, + (2 * LIO_SC_MAX_TMO_MS)); + if (retval) { + dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n"); + goto lio_fetch_stats_exit; } - ctx->status = status; - complete(&ctx->complete); + + octnet_nic_stats_callback(oct_dev, sc->sc_status, sc); + WRITE_ONCE(sc->caller_is_done, true); + +lio_fetch_stats_exit: + time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS); + if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) + schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies); + + return; } int liquidio_set_speed(struct lio *lio, int speed) { - struct liquidio_nic_seapi_ctl_context *ctx; struct octeon_device *oct = lio->oct_dev; struct oct_nic_seapi_resp *resp; struct octeon_soft_command *sc; union octnet_cmd *ncmd; - u32 ctx_size; int retval; u32 var; @@ -1521,21 +1523,18 @@ int liquidio_set_speed(struct lio *lio, int speed) return -EOPNOTSUPP; } - ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context); sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, sizeof(struct oct_nic_seapi_resp), - ctx_size); + 0); if (!sc) return -ENOMEM; ncmd = sc->virtdptr; - ctx = sc->ctxptr; resp = sc->virtrptr; memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); - ctx->octeon_id = lio_get_device_id(oct); - ctx->status = 0; - init_completion(&ctx->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ncmd->u64 = 0; ncmd->s.cmd = SEAPI_CMD_SPEED_SET; @@ -1548,30 +1547,24 @@ int liquidio_set_speed(struct lio *lio, int speed) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_UBOOT_CTL, 0, 0, 0); - sc->callback = liquidio_nic_seapi_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; - retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + octeon_free_soft_command(oct, sc); retval = -EBUSY; } else { /* Wait for response or timeout */ - if (wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies(10000)) == 0) { - dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", - __func__); - octeon_free_soft_command(oct, sc); - return -EINTR; - } + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n", __func__, retval); - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); + return -EIO; } @@ -1583,38 +1576,32 @@ int liquidio_set_speed(struct lio *lio, int speed) } oct->speed_setting = var; + WRITE_ONCE(sc->caller_is_done, true); } - octeon_free_soft_command(oct, sc); - return retval; } int liquidio_get_speed(struct lio *lio) { - struct liquidio_nic_seapi_ctl_context *ctx; struct octeon_device *oct = lio->oct_dev; struct oct_nic_seapi_resp *resp; struct octeon_soft_command *sc; union octnet_cmd *ncmd; - u32 ctx_size; int retval; - ctx_size = sizeof(struct liquidio_nic_seapi_ctl_context); sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, sizeof(struct oct_nic_seapi_resp), - ctx_size); + 0); if (!sc) return -ENOMEM; ncmd = sc->virtdptr; - ctx = sc->ctxptr; resp = sc->virtrptr; memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); - ctx->octeon_id = lio_get_device_id(oct); - ctx->status = 0; - init_completion(&ctx->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ncmd->u64 = 0; ncmd->s.cmd = SEAPI_CMD_SPEED_GET; @@ -1626,37 +1613,20 @@ int liquidio_get_speed(struct lio *lio) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_UBOOT_CTL, 0, 0, 0); - sc->callback = liquidio_nic_seapi_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; - retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); - oct->no_speed_setting = 1; - oct->speed_setting = 25; - - retval = -EBUSY; + octeon_free_soft_command(oct, sc); + retval = -EIO; } else { - if (wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies(10000)) == 0) { - dev_err(&oct->pci_dev->dev, "%s: sc timeout\n", - __func__); - - oct->speed_setting = 25; - oct->no_speed_setting = 1; - - octeon_free_soft_command(oct, sc); + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; - return -EINTR; - } retval = resp->status; if (retval) { dev_err(&oct->pci_dev->dev, "%s failed retval=%d\n", __func__, retval); - oct->no_speed_setting = 1; - oct->speed_setting = 25; - octeon_free_soft_command(oct, sc); retval = -EIO; } else { u32 var; @@ -1664,16 +1634,171 @@ int liquidio_get_speed(struct lio *lio) var = be32_to_cpu((__force __be32)resp->speed); oct->speed_setting = var; if (var == 0xffff) { - oct->no_speed_setting = 1; /* unable to access boot variables * get the default value based on the NIC type */ - oct->speed_setting = 25; + if (oct->subsystem_id == + OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == + OCTEON_CN2360_25GB_SUBSYS_ID) { + oct->no_speed_setting = 1; + oct->speed_setting = 25; + } else { + oct->speed_setting = 10; + } } + } + WRITE_ONCE(sc->caller_is_done, true); + } + + return retval; +} + +int liquidio_set_fec(struct lio *lio, int on_off) +{ + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + struct octeon_device *oct; + union octnet_cmd *ncmd; + int retval; + u32 var; + + oct = lio->oct_dev; + + if (oct->props[lio->ifidx].fec == on_off) + return 0; + + if (!OCTEON_CN23XX_PF(oct)) { + dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n", + __func__); + return -1; + } + + if (oct->speed_boot != 25) { + dev_err(&oct->pci_dev->dev, + "Set FEC only when link speed is 25G during insmod\n"); + return -1; + } + + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), 0); + + ncmd = sc->virtdptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_FEC_SET; + ncmd->s.param1 = on_off; + /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */ + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + octeon_free_soft_command(oct, sc); + return -EIO; + } + + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (-EIO); + + var = be32_to_cpu(resp->fec_setting); + resp->fec_setting = var; + if (var != on_off) { + dev_err(&oct->pci_dev->dev, + "Setting failed fec= %x, expect %x\n", + var, on_off); + oct->props[lio->ifidx].fec = var; + if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS) + oct->props[lio->ifidx].fec = 1; + else + oct->props[lio->ifidx].fec = 0; + } + + WRITE_ONCE(sc->caller_is_done, true); + + if (oct->props[lio->ifidx].fec != + oct->props[lio->ifidx].fec_boot) { + dev_dbg(&oct->pci_dev->dev, + "Reloade driver to chang fec to %s\n", + oct->props[lio->ifidx].fec ? "on" : "off"); + } + + return retval; +} + +int liquidio_get_fec(struct lio *lio) +{ + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + struct octeon_device *oct; + union octnet_cmd *ncmd; + int retval; + u32 var; + + oct = lio->oct_dev; + + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), 0); + if (!sc) + return -ENOMEM; + + ncmd = sc->virtdptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_FEC_GET; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, + "%s: Failed to send soft command\n", __func__); + octeon_free_soft_command(oct, sc); + return -EIO; } - octeon_free_soft_command(oct, sc); + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; + + var = be32_to_cpu(resp->fec_setting); + resp->fec_setting = var; + if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS) + oct->props[lio->ifidx].fec = 1; + else + oct->props[lio->ifidx].fec = 0; + + WRITE_ONCE(sc->caller_is_done, true); + + if (oct->props[lio->ifidx].fec != + oct->props[lio->ifidx].fec_boot) { + dev_dbg(&oct->pci_dev->dev, + "Reloade driver to chang fec to %s\n", + oct->props[lio->ifidx].fec ? "on" : "off"); + } return retval; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 8e05afd5e39c..4c3925af53bc 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -33,25 +33,12 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); -struct oct_intrmod_context { - int octeon_id; - wait_queue_head_t wc; - int cond; - int status; -}; - struct oct_intrmod_resp { u64 rh; struct oct_intrmod_cfg intrmod; u64 status; }; -struct oct_mdio_cmd_context { - int octeon_id; - wait_queue_head_t wc; - int cond; -}; - struct oct_mdio_cmd_resp { u64 rh; struct oct_mdio_cmd resp; @@ -257,6 +244,7 @@ static int lio_get_link_ksettings(struct net_device *netdev, linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || linfo->link.s.if_mode == INTERFACE_MODE_XFI) { dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); + ecmd->base.transceiver = XCVR_EXTERNAL; } else { dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", linfo->link.s.if_mode); @@ -290,10 +278,12 @@ static int lio_get_link_ksettings(struct net_device *netdev, 10000baseCR_Full); } - if (oct->no_speed_setting == 0) + if (oct->no_speed_setting == 0) { liquidio_get_speed(lio); - else + liquidio_get_fec(lio); + } else { oct->speed_setting = 25; + } if (oct->speed_setting == 10) { ethtool_link_ksettings_add_link_mode @@ -317,6 +307,24 @@ static int lio_get_link_ksettings(struct net_device *netdev, (ecmd, advertising, 25000baseCR_Full); } + + if (oct->no_speed_setting) + break; + + ethtool_link_ksettings_add_link_mode + (ecmd, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, FEC_NONE); + /*FEC_OFF*/ + if (oct->props[lio->ifidx].fec == 1) { + /* ETHTOOL_FEC_RS */ + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, FEC_RS); + } else { + /* ETHTOOL_FEC_OFF */ + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, FEC_NONE); + } } else { /* VF */ if (linfo->link.s.speed == 10000) { ethtool_link_ksettings_add_link_mode @@ -472,12 +480,11 @@ lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) nctrl.ncmd.s.param1 = num_queues; nctrl.ncmd.s.param2 = num_queues; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", ret); return -1; @@ -708,13 +715,13 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val) nctrl.ncmd.s.param1 = addr; nctrl.ncmd.s.param2 = val; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to configure gpio value, ret=%d\n", ret); return -EINVAL; } @@ -734,41 +741,19 @@ static int octnet_id_active(struct net_device *netdev, int val) nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; nctrl.ncmd.s.param1 = val; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to configure gpio value, ret=%d\n", ret); return -EINVAL; } return 0; } -/* Callback for when mdio command response arrives - */ -static void octnet_mdio_resp_callback(struct octeon_device *oct, - u32 status, - void *buf) -{ - struct oct_mdio_cmd_context *mdio_cmd_ctx; - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - - mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; - - oct = lio_get_device(mdio_cmd_ctx->octeon_id); - if (status) { - dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(mdio_cmd_ctx->cond, -1); - } else { - WRITE_ONCE(mdio_cmd_ctx->cond, 1); - } - wake_up_interruptible(&mdio_cmd_ctx->wc); -} - /* This routine provides PHY access routines for * mdio clause45 . */ @@ -778,25 +763,20 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) struct octeon_device *oct_dev = lio->oct_dev; struct octeon_soft_command *sc; struct oct_mdio_cmd_resp *mdio_cmd_rsp; - struct oct_mdio_cmd_context *mdio_cmd_ctx; struct oct_mdio_cmd *mdio_cmd; int retval = 0; sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, sizeof(struct oct_mdio_cmd), - sizeof(struct oct_mdio_cmd_resp), - sizeof(struct oct_mdio_cmd_context)); + sizeof(struct oct_mdio_cmd_resp), 0); if (!sc) return -ENOMEM; - mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; - WRITE_ONCE(mdio_cmd_ctx->cond, 0); - mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); mdio_cmd->op = op; mdio_cmd->mdio_addr = loc; if (op) @@ -808,42 +788,40 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, 0, 0, 0); - sc->wait_time = 1000; - sc->callback = octnet_mdio_resp_callback; - sc->callback_arg = sc; - - init_waitqueue_head(&mdio_cmd_ctx->wc); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct_dev, sc); - if (retval == IQ_SEND_FAILED) { dev_err(&oct_dev->pci_dev->dev, "octnet_mdio45_access instruction failed status: %x\n", retval); - retval = -EBUSY; + octeon_free_soft_command(oct_dev, sc); + return -EBUSY; } else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived */ - sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond); + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return retval; + retval = mdio_cmd_rsp->status; if (retval) { - dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n"); - retval = -EBUSY; - } else { - octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), - sizeof(struct oct_mdio_cmd) / 8); - - if (READ_ONCE(mdio_cmd_ctx->cond) == 1) { - if (!op) - *value = mdio_cmd_rsp->resp.value1; - } else { - retval = -EINVAL; - } + dev_err(&oct_dev->pci_dev->dev, + "octnet mdio45 access failed: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -EBUSY; } - } - octeon_free_soft_command(oct_dev, sc); + octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), + sizeof(struct oct_mdio_cmd) / 8); + + if (!op) + *value = mdio_cmd_rsp->resp.value1; + + WRITE_ONCE(sc->caller_is_done, true); + } return retval; } @@ -1007,8 +985,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev, static int lio_23xx_reconfigure_queue_count(struct lio *lio) { struct octeon_device *oct = lio->oct_dev; - struct liquidio_if_cfg_context *ctx; - u32 resp_size, ctx_size, data_size; + u32 resp_size, data_size; struct liquidio_if_cfg_resp *resp; struct octeon_soft_command *sc; union oct_nic_if_cfg if_cfg; @@ -1018,11 +995,10 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) int j; resp_size = sizeof(struct liquidio_if_cfg_resp); - ctx_size = sizeof(struct liquidio_if_cfg_context); data_size = sizeof(struct lio_version); sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, data_size, - resp_size, ctx_size); + resp_size, 0); if (!sc) { dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", __func__); @@ -1030,7 +1006,6 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) } resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); @@ -1038,9 +1013,6 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); ifidx_or_pfnum = oct->pf_num; - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); if_cfg.u64 = 0; if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; @@ -1052,27 +1024,29 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_QCOUNT_UPDATE, 0, if_cfg.u64, 0); - sc->callback = lio_if_cfg_callback; - sc->callback_arg = sc; - sc->wait_time = LIO_IFCFG_WAIT_TIME; + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { dev_err(&oct->pci_dev->dev, - "iq/oq config failed status: %x\n", + "Sending iq/oq config failed status: %x\n", retval); - goto qcount_update_fail; + octeon_free_soft_command(oct, sc); + return -EIO; } - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&oct->pci_dev->dev, "Wait interrupted\n"); - return -1; - } + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { - dev_err(&oct->pci_dev->dev, "iq/oq config failed\n"); - goto qcount_update_fail; + dev_err(&oct->pci_dev->dev, + "iq/oq config failed: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -1; } octeon_swap_8B_data((u64 *)(&resp->cfg_info), @@ -1097,16 +1071,12 @@ static int lio_23xx_reconfigure_queue_count(struct lio *lio) lio->txq = lio->linfo.txpciq[0].s.q_no; lio->rxq = lio->linfo.rxpciq[0].s.q_no; - octeon_free_soft_command(oct, sc); dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", lio->linfo.num_rxpciq); - return 0; - -qcount_update_fail: - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); - return -1; + return 0; } static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) @@ -1166,6 +1136,8 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) * steps like updating sriov_info for the octeon device need to be done. */ if (queue_count_update) { + cleanup_rx_oom_poll_fn(netdev); + lio_delete_glists(lio); /* Delete mbox for PF which is SRIOV disabled because sriov_info @@ -1265,6 +1237,11 @@ static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) return -1; } + if (setup_rx_oom_poll_fn(netdev)) { + dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n"); + return 1; + } + /* Send firmware the information about new number of queues * if the interface is a VF or a PF that is SRIOV enabled. */ @@ -1412,7 +1389,6 @@ lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) nctrl.ncmd.u64 = 0; nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; @@ -1433,8 +1409,9 @@ lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) } ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { - dev_err(&oct->pci_dev->dev, "Failed to set pause parameter\n"); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to set pause parameter, ret=%d\n", ret); return -EINVAL; } @@ -1764,7 +1741,8 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, */ data[i++] = lstats.rx_dropped; /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ - data[i++] = lstats.tx_dropped; + data[i++] = lstats.tx_dropped + + oct_dev->link_stats.fromhost.fw_err_drop; data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; @@ -2013,34 +1991,11 @@ static int lio_vf_get_sset_count(struct net_device *netdev, int sset) } } -/* Callback function for intrmod */ -static void octnet_intrmod_callback(struct octeon_device *oct_dev, - u32 status, - void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct oct_intrmod_context *ctx; - - ctx = (struct oct_intrmod_context *)sc->ctxptr; - - ctx->status = status; - - WRITE_ONCE(ctx->cond, 1); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - /* get interrupt moderation parameters */ static int octnet_get_intrmod_cfg(struct lio *lio, struct oct_intrmod_cfg *intr_cfg) { struct octeon_soft_command *sc; - struct oct_intrmod_context *ctx; struct oct_intrmod_resp *resp; int retval; struct octeon_device *oct_dev = lio->oct_dev; @@ -2049,8 +2004,7 @@ static int octnet_get_intrmod_cfg(struct lio *lio, sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, 0, - sizeof(struct oct_intrmod_resp), - sizeof(struct oct_intrmod_context)); + sizeof(struct oct_intrmod_resp), 0); if (!sc) return -ENOMEM; @@ -2058,20 +2012,13 @@ static int octnet_get_intrmod_cfg(struct lio *lio, resp = (struct oct_intrmod_resp *)sc->virtrptr; memset(resp, 0, sizeof(struct oct_intrmod_resp)); - ctx = (struct oct_intrmod_context *)sc->ctxptr; - memset(ctx, 0, sizeof(struct oct_intrmod_context)); - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct_dev); - init_waitqueue_head(&ctx->wc); - sc->iq_no = lio->linfo.txpciq[0].s.q_no; octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); - sc->callback = octnet_intrmod_callback; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -2082,32 +2029,23 @@ static int octnet_get_intrmod_cfg(struct lio *lio, /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&oct_dev->pci_dev->dev, "Wait interrupted\n"); - goto intrmod_info_wait_intr; - } + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return -ENODEV; - retval = ctx->status || resp->status; - if (retval) { + if (resp->status) { dev_err(&oct_dev->pci_dev->dev, "Get interrupt moderation parameters failed\n"); - goto intrmod_info_wait_fail; + WRITE_ONCE(sc->caller_is_done, true); + return -ENODEV; } octeon_swap_8B_data((u64 *)&resp->intrmod, (sizeof(struct oct_intrmod_cfg)) / 8); memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); - octeon_free_soft_command(oct_dev, sc); + WRITE_ONCE(sc->caller_is_done, true); return 0; - -intrmod_info_wait_fail: - - octeon_free_soft_command(oct_dev, sc); - -intrmod_info_wait_intr: - - return -ENODEV; } /* Configure interrupt moderation parameters */ @@ -2115,7 +2053,6 @@ static int octnet_set_intrmod_cfg(struct lio *lio, struct oct_intrmod_cfg *intr_cfg) { struct octeon_soft_command *sc; - struct oct_intrmod_context *ctx; struct oct_intrmod_cfg *cfg; int retval; struct octeon_device *oct_dev = lio->oct_dev; @@ -2124,18 +2061,11 @@ static int octnet_set_intrmod_cfg(struct lio *lio, sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct_dev, sizeof(struct oct_intrmod_cfg), - 0, - sizeof(struct oct_intrmod_context)); + 16, 0); if (!sc) return -ENOMEM; - ctx = (struct oct_intrmod_context *)sc->ctxptr; - - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct_dev); - init_waitqueue_head(&ctx->wc); - cfg = (struct oct_intrmod_cfg *)sc->virtdptr; memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); @@ -2146,9 +2076,8 @@ static int octnet_set_intrmod_cfg(struct lio *lio, octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); - sc->callback = octnet_intrmod_callback; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -2159,26 +2088,24 @@ static int octnet_set_intrmod_cfg(struct lio *lio, /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) != -EINTR) { - retval = ctx->status; - if (retval) - dev_err(&oct_dev->pci_dev->dev, - "intrmod config failed. Status: %llx\n", - CVM_CAST64(retval)); - else - dev_info(&oct_dev->pci_dev->dev, - "Rx-Adaptive Interrupt moderation %s\n", - (intr_cfg->rx_enable) ? - "enabled" : "disabled"); - - octeon_free_soft_command(oct_dev, sc); - - return ((retval) ? -ENODEV : 0); + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return retval; + + retval = sc->sc_status; + if (retval == 0) { + dev_info(&oct_dev->pci_dev->dev, + "Rx-Adaptive Interrupt moderation %s\n", + (intr_cfg->rx_enable) ? + "enabled" : "disabled"); + WRITE_ONCE(sc->caller_is_done, true); + return 0; } - dev_err(&oct_dev->pci_dev->dev, "iq/oq config failed\n"); - - return -EINTR; + dev_err(&oct_dev->pci_dev->dev, + "intrmod config failed. Status: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -ENODEV; } static int lio_get_intr_coalesce(struct net_device *netdev, @@ -3123,9 +3050,60 @@ static int lio_set_priv_flags(struct net_device *netdev, u32 flags) return 0; } +static int lio_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + fec->active_fec = ETHTOOL_FEC_NONE; + fec->fec = ETHTOOL_FEC_NONE; + + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (oct->no_speed_setting == 1) + return 0; + + liquidio_get_fec(lio); + fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF); + if (oct->props[lio->ifidx].fec == 1) + fec->active_fec = ETHTOOL_FEC_RS; + else + fec->active_fec = ETHTOOL_FEC_OFF; + } + + return 0; +} + +static int lio_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (oct->no_speed_setting == 1) + return -EOPNOTSUPP; + + if (fec->fec & ETHTOOL_FEC_OFF) + liquidio_set_fec(lio, 0); + else if (fec->fec & ETHTOOL_FEC_RS) + liquidio_set_fec(lio, 1); + else + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + static const struct ethtool_ops lio_ethtool_ops = { .get_link_ksettings = lio_get_link_ksettings, .set_link_ksettings = lio_set_link_ksettings, + .get_fecparam = lio_get_fecparam, + .set_fecparam = lio_set_fecparam, .get_link = ethtool_op_get_link, .get_drvinfo = lio_get_drvinfo, .get_ringparam = lio_ethtool_get_ringparam, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 6fb13fa73b27..f42c1b0f4ac8 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -99,14 +99,6 @@ struct lio_trusted_vf_ctx { int status; }; -struct liquidio_rx_ctl_context { - int octeon_id; - - wait_queue_head_t wc; - - int cond; -}; - struct oct_link_status_resp { u64 rh; struct oct_link_info link_info; @@ -642,26 +634,6 @@ static inline void update_link_status(struct net_device *netdev, } /** - * lio_sync_octeon_time_cb - callback that is invoked when soft command - * sent by lio_sync_octeon_time() has completed successfully or failed - * - * @oct - octeon device structure - * @status - indicates success or failure - * @buf - pointer to the command that was sent to firmware - **/ -static void lio_sync_octeon_time_cb(struct octeon_device *oct, - u32 status, void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - - if (status) - dev_err(&oct->pci_dev->dev, - "Failed to sync time to octeon; error=%d\n", status); - - octeon_free_soft_command(oct, sc); -} - -/** * lio_sync_octeon_time - send latest localtime to octeon firmware so that * firmware will correct it's time, in case there is a time skew * @@ -677,7 +649,7 @@ static void lio_sync_octeon_time(struct work_struct *work) struct lio_time *lt; int ret; - sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0); + sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); if (!sc) { dev_err(&oct->pci_dev->dev, "Failed to sync time to octeon: soft command allocation failed\n"); @@ -696,15 +668,16 @@ static void lio_sync_octeon_time(struct work_struct *work) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); - sc->callback = lio_sync_octeon_time_cb; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; ret = octeon_send_soft_command(oct, sc); if (ret == IQ_SEND_FAILED) { dev_err(&oct->pci_dev->dev, "Failed to sync time to octeon: failed to send soft command\n"); octeon_free_soft_command(oct, sc); + } else { + WRITE_ONCE(sc->caller_is_done, true); } queue_delayed_work(lio->sync_octeon_time_wq.wq, @@ -1037,12 +1010,12 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_IO_QUEUES_DONE: - if (wait_for_pending_requests(oct)) - dev_err(&oct->pci_dev->dev, "There were pending requests\n"); - if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + /* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. @@ -1052,6 +1025,31 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (lio_wait_for_oq_pkts(oct)) dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); + /* Force all requests waiting to be fetched by OCTEON to + * complete. + */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + lio_process_ordered_list(oct, 1); + octeon_free_sc_done_list(oct); + octeon_free_sc_zombie_list(oct); + /* fallthrough */ case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ @@ -1178,34 +1176,6 @@ static void octeon_destroy_resources(struct octeon_device *oct) } /** - * \brief Callback for rx ctrl - * @param status status of request - * @param buf pointer to resp structure - */ -static void rx_ctl_callback(struct octeon_device *oct, - u32 status, - void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_rx_ctl_context *ctx; - - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - - oct = lio_get_device(ctx->octeon_id); - if (status) - dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(ctx->cond, 1); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); -} - -/** * \brief Send Rx control command * @param lio per-network private data * @param start_stop whether to start or stop @@ -1213,9 +1183,7 @@ static void rx_ctl_callback(struct octeon_device *oct, static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct octeon_soft_command *sc; - struct liquidio_rx_ctl_context *ctx; union octnet_cmd *ncmd; - int ctx_size = sizeof(struct liquidio_rx_ctl_context); struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; int retval; @@ -1224,14 +1192,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, - 16, ctx_size); + 16, 0); ncmd = (union octnet_cmd *)sc->virtdptr; - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); ncmd->u64 = 0; ncmd->s.cmd = OCTNET_CMD_RX_CTL; @@ -1244,23 +1207,25 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = rx_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); + octeon_free_soft_command(oct, sc); + return; } else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) return; + oct->props[lio->ifidx].rx_on = start_stop; + WRITE_ONCE(sc->caller_is_done, true); } - - octeon_free_soft_command(oct, sc); } /** @@ -1274,8 +1239,10 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct net_device *netdev = oct->props[ifidx].netdev; - struct lio *lio; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; + struct lio *lio; if (!netdev) { dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", @@ -1304,6 +1271,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) netif_napi_del(napi); + tasklet_enable(&oct_priv->droq_tasklet); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -1840,9 +1809,13 @@ static int liquidio_open(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; if (oct->props[lio->ifidx].napi_enabled == 0) { + tasklet_disable(&oct_priv->droq_tasklet); + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_enable(napi); @@ -1876,6 +1849,12 @@ static int liquidio_open(struct net_device *netdev) /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); + /* start periodical statistics fetch */ + INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); + lio->stats_wk.ctxptr = lio; + schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies + (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); + dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); @@ -1890,6 +1869,8 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; ifstate_reset(lio, LIO_IFSTATE_RUNNING); @@ -1916,6 +1897,8 @@ static int liquidio_stop(struct net_device *netdev) cleanup_tx_poll_fn(netdev); } + cancel_delayed_work_sync(&lio->stats_wk.work); + if (lio->ptp_clock) { ptp_clock_unregister(lio->ptp_clock); lio->ptp_clock = NULL; @@ -1934,6 +1917,8 @@ static int liquidio_stop(struct net_device *netdev) if (OCTEON_CN23XX_PF(oct)) oct->droq[0]->ops.poll_mode = 0; + + tasklet_enable(&oct_priv->droq_tasklet); } dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); @@ -2014,10 +1999,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev) /* Apparently, any activity in this call from the kernel has to * be atomic. So we won't wait for response. */ - nctrl.wait_time = 0; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", ret); } @@ -2046,8 +2030,6 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nctrl.wait_time = 100; nctrl.udd[0] = 0; /* The MAC Address is presented in network byte order. */ @@ -2058,6 +2040,14 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); return -ENOMEM; } + + if (nctrl.sc_status) { + dev_err(&oct->pci_dev->dev, + "%s: MAC Address change failed. sc return=%x\n", + __func__, nctrl.sc_status); + return -EIO; + } + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); @@ -2111,7 +2101,6 @@ liquidio_get_stats64(struct net_device *netdev, lstats->rx_packets = pkts; lstats->rx_dropped = drop; - octnet_get_link_stats(netdev); lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; lstats->collisions = oct->link_stats.fromhost.total_collisions; @@ -2598,14 +2587,15 @@ static int liquidio_vlan_rx_add_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; @@ -2626,14 +2616,15 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -2659,15 +2650,16 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -2695,15 +2687,16 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command, nctrl.ncmd.s.more = vxlan_cmd_bit; nctrl.ncmd.s.param1 = vxlan_port; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "VxLAN port add/delete failed in core (ret:0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -2826,6 +2819,7 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; + int ret = 0; if (!is_valid_ether_addr(mac)) return -EINVAL; @@ -2839,12 +2833,13 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; /* vfidx is 0 based, but vf_num (param1) is 1 based */ nctrl.ncmd.s.param1 = vfidx + 1; - nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0); nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nctrl.wait_time = LIO_CMD_WAIT_TM; + if (is_admin_assigned) { + nctrl.ncmd.s.param2 = true; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + } nctrl.udd[0] = 0; /* The MAC Address is presented in network byte order. */ @@ -2852,9 +2847,11 @@ static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; - octnet_send_nic_ctrl_pkt(oct, &nctrl); + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); + if (ret > 0) + ret = -EIO; - return 0; + return ret; } static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) @@ -2873,6 +2870,62 @@ static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) return retval; } +static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, + bool enable) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int retval; + + if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { + netif_info(lio, drv, lio->netdev, + "firmware does not support spoofchk\n"); + return -EOPNOTSUPP; + } + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { + netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); + return -EINVAL; + } + + if (enable) { + if (oct->sriov_info.vf_spoofchk[vfidx]) + return 0; + } else { + /* Clear */ + if (!oct->sriov_info.vf_spoofchk[vfidx]) + return 0; + } + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; + nctrl.ncmd.s.param1 = + vfidx + 1; /* vfidx is 0 based, + * but vf_num (param1) is 1 based + */ + nctrl.ncmd.s.param2 = enable; + nctrl.ncmd.s.more = 0; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.cb_fn = 0; + + retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); + + if (retval) { + netif_info(lio, drv, lio->netdev, + "Failed to set VF %d spoofchk %s\n", vfidx, + enable ? "on" : "off"); + return -1; + } + + oct->sriov_info.vf_spoofchk[vfidx] = enable; + netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx, + enable ? "on" : "off"); + + return 0; +} + static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, u16 vlan, u8 qos, __be16 vlan_proto) { @@ -2880,6 +2933,7 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; u16 vlantci; + int ret = 0; if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) return -EINVAL; @@ -2911,13 +2965,17 @@ static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.cb_fn = NULL; - nctrl.wait_time = LIO_CMD_WAIT_TM; - octnet_send_nic_ctrl_pkt(oct, &nctrl); + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); + if (ret) { + if (ret > 0) + ret = -EIO; + return ret; + } oct->sriov_info.vf_vlantci[vfidx] = vlantci; - return 0; + return ret; } static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, @@ -2930,6 +2988,8 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) return -EINVAL; + memset(ivi, 0, sizeof(struct ifla_vf_info)); + ivi->vf = vfidx; macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; ether_addr_copy(&ivi->mac[0], macaddr); @@ -2941,33 +3001,22 @@ static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, else ivi->trusted = false; ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; - return 0; -} - -static void trusted_vf_callback(struct octeon_device *oct_dev, - u32 status, void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct lio_trusted_vf_ctx *ctx; + ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; + ivi->max_tx_rate = lio->linfo.link.s.speed; + ivi->min_tx_rate = 0; - ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr; - ctx->status = status; - - complete(&ctx->complete); + return 0; } static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) { struct octeon_device *oct = lio->oct_dev; - struct lio_trusted_vf_ctx *ctx; struct octeon_soft_command *sc; - int ctx_size, retval; - - ctx_size = sizeof(struct lio_trusted_vf_ctx); - sc = octeon_alloc_soft_command(oct, 0, 0, ctx_size); + int retval; - ctx = (struct lio_trusted_vf_ctx *)sc->ctxptr; - init_completion(&ctx->complete); + sc = octeon_alloc_soft_command(oct, 0, 16, 0); + if (!sc) + return -ENOMEM; sc->iq_no = lio->linfo.txpciq[0].s.q_no; @@ -2976,23 +3025,21 @@ static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, trusted); - sc->callback = trusted_vf_callback; - sc->callback_arg = sc; - sc->wait_time = 1000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct, sc); retval = -1; } else { /* Wait for response or timeout */ - if (wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies(2000))) - retval = ctx->status; - else - retval = -1; - } + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (retval); - octeon_free_soft_command(oct, sc); + WRITE_ONCE(sc->caller_is_done, true); + } return retval; } @@ -3055,6 +3102,7 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; + int ret = 0; if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) return -EINVAL; @@ -3070,13 +3118,15 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, nctrl.ncmd.s.more = 0; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.cb_fn = NULL; - nctrl.wait_time = LIO_CMD_WAIT_TM; - octnet_send_nic_ctrl_pkt(oct, &nctrl); + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); - oct->sriov_info.vf_linkstate[vfidx] = linkstate; + if (!ret) + oct->sriov_info.vf_linkstate[vfidx] = linkstate; + else if (ret > 0) + ret = -EIO; - return 0; + return ret; } static int @@ -3204,6 +3254,7 @@ static const struct net_device_ops lionetdevops = { .ndo_set_vf_mac = liquidio_set_vf_mac, .ndo_set_vf_vlan = liquidio_set_vf_vlan, .ndo_get_vf_config = liquidio_get_vf_config, + .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, .ndo_set_vf_trust = liquidio_set_vf_trust, .ndo_set_vf_link_state = liquidio_set_vf_link_state, .ndo_get_vf_stats = liquidio_get_vf_stats, @@ -3307,7 +3358,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) unsigned long micro; u32 cur_ver; struct octeon_soft_command *sc; - struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; struct octdev_props *props; int retval, num_iqueues, num_oqueues; @@ -3315,7 +3365,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) union oct_nic_if_cfg if_cfg; unsigned int base_queue; unsigned int gmx_port_id; - u32 resp_size, ctx_size, data_size; + u32 resp_size, data_size; u32 ifidx_or_pfnum; struct lio_version *vdata; struct devlink *devlink; @@ -3340,13 +3390,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) for (i = 0; i < octeon_dev->ifcount; i++) { resp_size = sizeof(struct liquidio_if_cfg_resp); - ctx_size = sizeof(struct liquidio_if_cfg_context); data_size = sizeof(struct lio_version); sc = (struct octeon_soft_command *) octeon_alloc_soft_command(octeon_dev, data_size, - resp_size, ctx_size); + resp_size, 0); resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; *((u64 *)vdata) = 0; @@ -3376,9 +3424,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "requesting config for interface %d, iqs %d, oqs %d\n", ifidx_or_pfnum, num_iqueues, num_oqueues); - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(octeon_dev); - init_waitqueue_head(&ctx->wc); if_cfg.u64 = 0; if_cfg.s.num_iqueues = num_iqueues; @@ -3392,9 +3437,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = lio_if_cfg_callback; - sc->callback_arg = sc; - sc->wait_time = LIO_IFCFG_WAIT_TIME; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(octeon_dev, sc); if (retval == IQ_SEND_FAILED) { @@ -3402,22 +3446,26 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "iq/oq config failed status: %x\n", retval); /* Soft instr is freed by driver in case of failure. */ - goto setup_nic_dev_fail; + octeon_free_soft_command(octeon_dev, sc); + return(-EIO); } /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); - goto setup_nic_wait_intr; - } + retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } + snprintf(octeon_dev->fw_info.liquidio_firmware_version, + 32, "%s", + resp->cfg_info.liquidio_firmware_version); /* Verify f/w version (in case of 'auto' loading from flash) */ fw_ver = octeon_dev->fw_info.liquidio_firmware_version; @@ -3427,7 +3475,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n", LIQUIDIO_BASE_VERSION, fw_ver); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } else if (atomic_read(octeon_dev->adapter_fw_state) == FW_IS_PRELOADED) { dev_info(&octeon_dev->pci_dev->dev, @@ -3454,7 +3503,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", resp->cfg_info.iqmask, resp->cfg_info.oqmask); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } if (OCTEON_CN6XXX(octeon_dev)) { @@ -3473,7 +3523,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (!netdev) { dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); @@ -3488,14 +3539,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (retval) { dev_err(&octeon_dev->pci_dev->dev, "setting real number rx failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_free; } retval = netif_set_real_num_tx_queues(netdev, num_iqueues); if (retval) { dev_err(&octeon_dev->pci_dev->dev, "setting real number tx failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_free; } lio = GET_LIO(netdev); @@ -3522,6 +3575,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + WRITE_ONCE(sc->caller_is_done, true); + lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); if (OCTEON_CN23XX_PF(octeon_dev) || @@ -3588,7 +3643,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Error setting VF%d MAC address\n", j); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } } @@ -3610,7 +3665,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.num_txpciq, lio->linfo.num_rxpciq)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); @@ -3621,7 +3676,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } /* Register ethtool support */ @@ -3643,20 +3698,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OCTNET_CMD_VERBOSE_ENABLE, 0); if (setup_link_status_change_wq(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; if ((octeon_dev->fw_info.app_cap_flags & LIQUIDIO_TIME_SYNC_CAP) && setup_sync_octeon_time_wq(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; if (setup_rx_oom_poll_fn(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; /* Register the network device with the OS */ if (register_netdev(netdev)) { dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } dev_dbg(&octeon_dev->pci_dev->dev, @@ -3679,8 +3734,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup successful\n", i); - octeon_free_soft_command(octeon_dev, sc); - if (octeon_dev->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || octeon_dev->subsystem_id == @@ -3709,13 +3762,20 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) } octeon_dev->speed_boot = octeon_dev->speed_setting; + /* don't read FEC setting if unsupported by f/w (see above) */ + if (octeon_dev->speed_boot == 25 && + !octeon_dev->no_speed_setting) { + liquidio_get_fec(lio); + octeon_dev->props[lio->ifidx].fec_boot = + octeon_dev->props[lio->ifidx].fec; + } } devlink = devlink_alloc(&liquidio_devlink_ops, sizeof(struct lio_devlink_priv)); if (!devlink) { dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); - goto setup_nic_wait_intr; + goto setup_nic_dev_free; } lio_devlink = devlink_priv(devlink); @@ -3725,7 +3785,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) devlink_free(devlink); dev_err(&octeon_dev->pci_dev->dev, "devlink registration failed\n"); - goto setup_nic_wait_intr; + goto setup_nic_dev_free; } octeon_dev->devlink = devlink; @@ -3733,17 +3793,16 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) return 0; -setup_nic_dev_fail: - - octeon_free_soft_command(octeon_dev, sc); - -setup_nic_wait_intr: +setup_nic_dev_free: while (i--) { dev_err(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup failed\n", i); liquidio_destroy_nic_device(octeon_dev, i); } + +setup_nic_dev_done: + return -ENODEV; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index b77835724dc8..0ec4bfe6a147 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -40,14 +40,6 @@ MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) -struct liquidio_rx_ctl_context { - int octeon_id; - - wait_queue_head_t wc; - - int cond; -}; - struct oct_timestamp_resp { u64 rh; u64 timestamp; @@ -452,6 +444,8 @@ static void octeon_pci_flr(struct octeon_device *oct) */ static void octeon_destroy_resources(struct octeon_device *oct) { + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct msix_entry *msix_entries; int i; @@ -471,12 +465,12 @@ static void octeon_destroy_resources(struct octeon_device *oct) case OCT_DEV_HOST_OK: /* fallthrough */ case OCT_DEV_IO_QUEUES_DONE: - if (wait_for_pending_requests(oct)) - dev_err(&oct->pci_dev->dev, "There were pending requests\n"); - if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + /* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. @@ -485,7 +479,33 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (lio_wait_for_oq_pkts(oct)) dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); - /* fall through */ + + /* Force all requests waiting to be fetched by OCTEON to + * complete. + */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + lio_process_ordered_list(oct, 1); + octeon_free_sc_done_list(oct); + octeon_free_sc_zombie_list(oct); + + /* fall through */ case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); @@ -569,33 +589,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* Nothing to be done here either */ break; } -} - -/** - * \brief Callback for rx ctrl - * @param status status of request - * @param buf pointer to resp structure - */ -static void rx_ctl_callback(struct octeon_device *oct, - u32 status, void *buf) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; - struct liquidio_rx_ctl_context *ctx; - - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - oct = lio_get_device(ctx->octeon_id); - if (status) - dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n", - CVM_CAST64(status)); - WRITE_ONCE(ctx->cond, 1); - - /* This barrier is required to be sure that the response has been - * written fully before waking up the handler - */ - wmb(); - - wake_up_interruptible(&ctx->wc); + tasklet_kill(&oct_priv->droq_tasklet); } /** @@ -606,8 +601,6 @@ static void rx_ctl_callback(struct octeon_device *oct, static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; - int ctx_size = sizeof(struct liquidio_rx_ctl_context); - struct liquidio_rx_ctl_context *ctx; struct octeon_soft_command *sc; union octnet_cmd *ncmd; int retval; @@ -617,14 +610,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, - 16, ctx_size); + 16, 0); ncmd = (union octnet_cmd *)sc->virtdptr; - ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr; - - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(oct); - init_waitqueue_head(&ctx->wc); ncmd->u64 = 0; ncmd->s.cmd = OCTNET_CMD_RX_CTL; @@ -637,23 +625,24 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = rx_ctl_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(oct, sc); if (retval == IQ_SEND_FAILED) { netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); + octeon_free_soft_command(oct, sc); } else { /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) return; + oct->props[lio->ifidx].rx_on = start_stop; + WRITE_ONCE(sc->caller_is_done, true); } - - octeon_free_soft_command(oct, sc); } /** @@ -667,6 +656,8 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct net_device *netdev = oct->props[ifidx].netdev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; struct lio *lio; @@ -696,6 +687,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) netif_napi_del(napi); + tasklet_enable(&oct_priv->droq_tasklet); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -913,9 +906,13 @@ static int liquidio_open(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; if (!oct->props[lio->ifidx].napi_enabled) { + tasklet_disable(&oct_priv->droq_tasklet); + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_enable(napi); @@ -932,6 +929,11 @@ static int liquidio_open(struct net_device *netdev) netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); start_txqs(netdev); + INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); + lio->stats_wk.ctxptr = lio; + schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies + (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); + /* tell Octeon to start forwarding packets to host */ send_rx_ctrl_cmd(lio, 1); @@ -948,6 +950,8 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; struct napi_struct *napi, *n; /* tell Octeon to stop forwarding packets to host */ @@ -977,8 +981,12 @@ static int liquidio_stop(struct net_device *netdev) oct->props[lio->ifidx].napi_enabled = 0; oct->droq[0]->ops.poll_mode = 0; + + tasklet_enable(&oct_priv->droq_tasklet); } + cancel_delayed_work_sync(&lio->stats_wk.work); + dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); return 0; @@ -1093,10 +1101,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev) /* Apparently, any activity in this call from the kernel has to * be atomic. So we won't wait for response. */ - nctrl.wait_time = 0; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", ret); } @@ -1133,8 +1140,6 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) nctrl.ncmd.s.more = 1; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.netpndev = (u64)netdev; - nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - nctrl.wait_time = 100; nctrl.udd[0] = 0; /* The MAC Address is presented in network byte order. */ @@ -1145,6 +1150,13 @@ static int liquidio_set_mac(struct net_device *netdev, void *p) dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); return -ENOMEM; } + + if (nctrl.sc_status == + FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) { + dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n"); + return -EPERM; + } + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); @@ -1198,7 +1210,6 @@ liquidio_get_stats64(struct net_device *netdev, lstats->rx_packets = pkts; lstats->rx_dropped = drop; - octnet_get_link_stats(netdev); lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; /* detailed rx_errors: */ @@ -1638,8 +1649,6 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev, struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct octnic_ctrl_pkt nctrl; - struct completion compl; - u16 response_code; int ret = 0; memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); @@ -1648,26 +1657,15 @@ liquidio_vlan_rx_add_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; - init_completion(&compl); - nctrl.completion = &compl; - nctrl.response_code = &response_code; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", ret); - return -EIO; - } - - if (!wait_for_completion_timeout(&compl, - msecs_to_jiffies(nctrl.wait_time))) - return -EPERM; - - if (READ_ONCE(response_code)) return -EPERM; + } return 0; } @@ -1687,14 +1685,15 @@ liquidio_vlan_rx_kill_vid(struct net_device *netdev, nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -1720,14 +1719,15 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -1755,15 +1755,16 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command, nctrl.ncmd.s.more = vxlan_cmd_bit; nctrl.ncmd.s.param1 = vxlan_port; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - nctrl.wait_time = 100; nctrl.netpndev = (u64)netdev; nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); - if (ret < 0) { + if (ret) { dev_err(&oct->pci_dev->dev, "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", ret); + if (ret > 0) + ret = -EIO; } return ret; } @@ -1924,8 +1925,7 @@ nic_info_err: static int setup_nic_devices(struct octeon_device *octeon_dev) { int retval, num_iqueues, num_oqueues; - struct liquidio_if_cfg_context *ctx; - u32 resp_size, ctx_size, data_size; + u32 resp_size, data_size; struct liquidio_if_cfg_resp *resp; struct octeon_soft_command *sc; union oct_nic_if_cfg if_cfg; @@ -1956,13 +1956,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) for (i = 0; i < octeon_dev->ifcount; i++) { resp_size = sizeof(struct liquidio_if_cfg_resp); - ctx_size = sizeof(struct liquidio_if_cfg_context); data_size = sizeof(struct lio_version); sc = (struct octeon_soft_command *) octeon_alloc_soft_command(octeon_dev, data_size, - resp_size, ctx_size); + resp_size, 0); resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; - ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; *((u64 *)vdata) = 0; @@ -1970,10 +1968,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); - WRITE_ONCE(ctx->cond, 0); - ctx->octeon_id = lio_get_device_id(octeon_dev); - init_waitqueue_head(&ctx->wc); - if_cfg.u64 = 0; if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; @@ -1986,32 +1980,37 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OPCODE_NIC_IF_CFG, 0, if_cfg.u64, 0); - sc->callback = lio_if_cfg_callback; - sc->callback_arg = sc; - sc->wait_time = 5000; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; retval = octeon_send_soft_command(octeon_dev, sc); if (retval == IQ_SEND_FAILED) { dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed status: %x\n", retval); /* Soft instr is freed by driver in case of failure. */ - goto setup_nic_dev_fail; + octeon_free_soft_command(octeon_dev, sc); + return(-EIO); } /* Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ - if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) { - dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n"); - goto setup_nic_wait_intr; - } + retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); + if (retval) + return retval; retval = resp->status; if (retval) { - dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); - goto setup_nic_dev_fail; + dev_err(&octeon_dev->pci_dev->dev, + "iq/oq config failed, retval = %d\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -EIO; } + snprintf(octeon_dev->fw_info.liquidio_firmware_version, + 32, "%s", + resp->cfg_info.liquidio_firmware_version); + octeon_swap_8B_data((u64 *)(&resp->cfg_info), (sizeof(struct liquidio_if_cfg_info)) >> 3); @@ -2022,7 +2021,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", resp->cfg_info.iqmask, resp->cfg_info.oqmask); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } dev_dbg(&octeon_dev->pci_dev->dev, "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", @@ -2033,7 +2033,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (!netdev) { dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); - goto setup_nic_dev_fail; + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; } SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); @@ -2070,6 +2071,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; lio->linfo.macaddr_is_admin_asgnd = resp->cfg_info.linfo.macaddr_is_admin_asgnd; + lio->linfo.macaddr_spoofchk = + resp->cfg_info.linfo.macaddr_spoofchk; lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); @@ -2109,6 +2112,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) netdev->min_mtu = LIO_MIN_MTU_SIZE; netdev->max_mtu = LIO_MAX_MTU_SIZE; + WRITE_ONCE(sc->caller_is_done, true); + /* Point to the properties for octeon device to which this * interface belongs. */ @@ -2132,7 +2137,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio->linfo.num_txpciq, lio->linfo.num_rxpciq)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); @@ -2155,7 +2160,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { dev_err(&octeon_dev->pci_dev->dev, "Gather list allocation failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } /* Register ethtool support */ @@ -2170,15 +2175,15 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) OCTNIC_LROIPV4 | OCTNIC_LROIPV6); if (setup_link_status_change_wq(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; if (setup_rx_oom_poll_fn(netdev)) - goto setup_nic_dev_fail; + goto setup_nic_dev_free; /* Register the network device with the OS */ if (register_netdev(netdev)) { dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); - goto setup_nic_dev_fail; + goto setup_nic_dev_free; } dev_dbg(&octeon_dev->pci_dev->dev, @@ -2201,24 +2206,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) dev_dbg(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup successful\n", i); - octeon_free_soft_command(octeon_dev, sc); - octeon_dev->no_speed_setting = 1; } return 0; -setup_nic_dev_fail: - - octeon_free_soft_command(octeon_dev, sc); - -setup_nic_wait_intr: +setup_nic_dev_free: while (i--) { dev_err(&octeon_dev->pci_dev->dev, "NIC ifidx:%d Setup failed\n", i); liquidio_destroy_nic_device(octeon_dev, i); } + +setup_nic_dev_done: + return -ENODEV; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index ddd7431579f4..96cf4a40af66 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -27,7 +27,6 @@ #include "octeon_network.h" #include <net/switchdev.h> #include "lio_vf_rep.h" -#include "octeon_network.h" static int lio_vf_rep_open(struct net_device *ndev); static int lio_vf_rep_stop(struct net_device *ndev); @@ -49,44 +48,25 @@ static const struct net_device_ops lio_vf_rep_ndev_ops = { .ndo_change_mtu = lio_vf_rep_change_mtu, }; -static void -lio_vf_rep_send_sc_complete(struct octeon_device *oct, - u32 status, void *ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; - struct lio_vf_rep_sc_ctx *ctx = - (struct lio_vf_rep_sc_ctx *)sc->ctxptr; - struct lio_vf_rep_resp *resp = - (struct lio_vf_rep_resp *)sc->virtrptr; - - if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status)) - WRITE_ONCE(resp->status, 0); - - complete(&ctx->complete); -} - static int lio_vf_rep_send_soft_command(struct octeon_device *oct, void *req, int req_size, void *resp, int resp_size) { int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size; - int ctx_size = sizeof(struct lio_vf_rep_sc_ctx); struct octeon_soft_command *sc = NULL; struct lio_vf_rep_resp *rep_resp; - struct lio_vf_rep_sc_ctx *ctx; void *sc_req; int err; sc = (struct octeon_soft_command *) octeon_alloc_soft_command(oct, req_size, - tot_resp_size, ctx_size); + tot_resp_size, 0); if (!sc) return -ENOMEM; - ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr; - memset(ctx, 0, ctx_size); - init_completion(&ctx->complete); + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; sc_req = (struct lio_vf_rep_req *)sc->virtdptr; memcpy(sc_req, req, req_size); @@ -98,23 +78,24 @@ lio_vf_rep_send_soft_command(struct octeon_device *oct, sc->iq_no = 0; octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_CMD, 0, 0, 0); - sc->callback = lio_vf_rep_send_sc_complete; - sc->callback_arg = sc; - sc->wait_time = LIO_VF_REP_REQ_TMO_MS; err = octeon_send_soft_command(oct, sc); if (err == IQ_SEND_FAILED) goto free_buff; - wait_for_completion_timeout(&ctx->complete, - msecs_to_jiffies - (2 * LIO_VF_REP_REQ_TMO_MS)); + err = wait_for_sc_completion_timeout(oct, sc, 0); + if (err) + return err; + err = READ_ONCE(rep_resp->status) ? -EBUSY : 0; if (err) dev_err(&oct->pci_dev->dev, "VF rep send config failed\n"); - - if (resp) + else if (resp) memcpy(resp, (rep_resp + 1), resp_size); + + WRITE_ONCE(sc->caller_is_done, true); + return err; + free_buff: octeon_free_soft_command(oct, sc); @@ -404,7 +385,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) } sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct, 0, 0, 0); + octeon_alloc_soft_command(oct, 0, 16, 0); if (!sc) { dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n"); goto xmit_failed; @@ -413,6 +394,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) /* Multiple buffers are not used for vf_rep packets. */ if (skb_shinfo(skb)->nr_frags != 0) { dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n"); + octeon_free_soft_command(oct, sc); goto xmit_failed; } @@ -420,6 +402,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) { dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n"); + octeon_free_soft_command(oct, sc); goto xmit_failed; } @@ -440,6 +423,7 @@ lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) if (status == IQ_SEND_FAILED) { dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, sc->datasize, DMA_TO_DEVICE); + octeon_free_soft_command(oct, sc); goto xmit_failed; } diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 7407fcd338e9..a5e0e9f17959 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -118,6 +118,10 @@ enum octeon_tag_type { /* App specific capabilities from firmware to pf driver */ #define LIQUIDIO_TIME_SYNC_CAP 0x1 #define LIQUIDIO_SWITCHDEV_CAP 0x2 +#define LIQUIDIO_SPOOFCHK_CAP 0x4 + +/* error status return from firmware */ +#define OCTEON_REQUEST_NO_PERMISSION 0xc static inline u32 incr_index(u32 index, u32 count, u32 max) { @@ -241,6 +245,10 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_QUEUE_COUNT_CTL 0x1f +#define OCTNET_CMD_GROUP1 1 +#define OCTNET_CMD_SET_VF_SPOOFCHK 0x1 +#define OCTNET_GROUP1_LAST_CMD OCTNET_CMD_SET_VF_SPOOFCHK + #define OCTNET_CMD_VXLAN_PORT_ADD 0x0 #define OCTNET_CMD_VXLAN_PORT_DEL 0x1 #define OCTNET_CMD_RXCSUM_ENABLE 0x0 @@ -250,9 +258,18 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1 #define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0 +#define OCTNET_CMD_FAIL 0x1 + +#define SEAPI_CMD_FEC_SET 0x0 +#define SEAPI_CMD_FEC_SET_DISABLE 0x0 +#define SEAPI_CMD_FEC_SET_RS 0x1 +#define SEAPI_CMD_FEC_GET 0x1 + #define SEAPI_CMD_SPEED_SET 0x2 #define SEAPI_CMD_SPEED_GET 0x3 +#define OPCODE_NIC_VF_PORT_STATS 0x22 + #define LIO_CMD_WAIT_TM 100 /* RX(packets coming from wire) Checksum verification flags */ @@ -301,7 +318,8 @@ union octnet_cmd { u64 more:6; /* How many udd words follow the command */ - u64 reserved:29; + u64 cmdgroup:8; + u64 reserved:21; u64 param1:16; @@ -313,7 +331,8 @@ union octnet_cmd { u64 param1:16; - u64 reserved:29; + u64 reserved:21; + u64 cmdgroup:8; u64 more:6; @@ -757,13 +776,17 @@ struct oct_link_info { #ifdef __BIG_ENDIAN_BITFIELD u64 gmxport:16; u64 macaddr_is_admin_asgnd:1; - u64 rsvd:31; + u64 rsvd:13; + u64 macaddr_spoofchk:1; + u64 rsvd1:17; u64 num_txpciq:8; u64 num_rxpciq:8; #else u64 num_rxpciq:8; u64 num_txpciq:8; - u64 rsvd:31; + u64 rsvd1:17; + u64 macaddr_spoofchk:1; + u64 rsvd:13; u64 macaddr_is_admin_asgnd:1; u64 gmxport:16; #endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index ceac74388e09..24c212001212 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -438,9 +438,10 @@ struct octeon_config { #define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE) /* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking + * 1 process done list, 1 zombie lists(timeouted sc list) * NoResponse Lists are now maintained with each IQ. (Dec' 2007). */ -#define MAX_RESPONSE_LISTS 4 +#define MAX_RESPONSE_LISTS 6 /* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the * dispatch table. diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index f878a552fef3..0f0275cd153e 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1440,18 +1440,15 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) /* the whole thing needs to be atomic, ideally */ if (droq) { pkts_pend = (u32)atomic_read(&droq->pkts_pending); - spin_lock_bh(&droq->lock); writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg); droq->pkt_count = pkts_pend; - /* this write needs to be flushed before we release the lock */ - mmiowb(); - spin_unlock_bh(&droq->lock); oct = droq->oct_dev; } if (iq) { spin_lock_bh(&iq->lock); - writel(iq->pkt_in_done, iq->inst_cnt_reg); - iq->pkt_in_done = 0; + writel(iq->pkts_processed, iq->inst_cnt_reg); + iq->pkt_in_done -= iq->pkts_processed; + iq->pkts_processed = 0; /* this write needs to be flushed before we release the lock */ mmiowb(); spin_unlock_bh(&iq->lock); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index d99ca6ba23a4..3d01d3602d8f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -316,6 +316,8 @@ struct octdev_props { * device pointer (used for OS specific calls). */ int rx_on; + int fec; + int fec_boot; int napi_enabled; int gmxport; struct net_device *netdev; @@ -397,6 +399,8 @@ struct octeon_sriov_info { int vf_linkstate[MAX_POSSIBLE_VFS]; + bool vf_spoofchk[MAX_POSSIBLE_VFS]; + u64 vf_drv_loaded_mask; }; @@ -607,6 +611,9 @@ struct octeon_device { u8 speed_boot; u8 speed_setting; u8 no_speed_setting; + + u32 vfstats_poll; +#define LIO_VFSTATS_POLL 10 }; #define OCT_DRV_ONLINE 1 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index a71dbb7ab6af..a0c099f71524 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -301,8 +301,6 @@ int octeon_init_droq(struct octeon_device *oct, dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n", droq->max_empty_descs); - spin_lock_init(&droq->lock); - INIT_LIST_HEAD(&droq->dispatch_list); /* For 56xx Pass1, this function won't be called, so no checks. */ @@ -333,8 +331,6 @@ init_droq_fail: * Returns: * Success: Pointer to recv_info_t * Failure: NULL. - * Locks: - * The droq->lock is held when this routine is called. */ static inline struct octeon_recv_info *octeon_create_recv_info( struct octeon_device *octeon_dev, @@ -433,8 +429,6 @@ octeon_droq_refill_pullup_descs(struct octeon_droq *droq, * up buffers (that were not dispatched) to form a contiguous ring. * Returns: * No of descriptors refilled. - * Locks: - * This routine is called with droq->lock held. */ static u32 octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) @@ -449,8 +443,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) while (droq->refill_count && (desc_refilled < droq->max_count)) { /* If a valid buffer exists (happens if there is no dispatch), - * reuse - * the buffer, else allocate. + * reuse the buffer, else allocate. */ if (!droq->recv_buf_list[droq->refill_idx].buffer) { pg_info = @@ -503,34 +496,37 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) /** check if we can allocate packets to get out of oom. * @param droq - Droq being checked. - * @return does not return anything + * @return 1 if fails to refill minimum */ -void octeon_droq_check_oom(struct octeon_droq *droq) +int octeon_retry_droq_refill(struct octeon_droq *droq) { - int desc_refilled; struct octeon_device *oct = droq->oct_dev; + int desc_refilled, reschedule = 1; + u32 pkts_credit; + + pkts_credit = readl(droq->pkts_credit_reg); + desc_refilled = octeon_droq_refill(oct, droq); + if (desc_refilled) { + /* Flush the droq descriptor data to memory to be sure + * that when we update the credits the data in memory + * is accurate. + */ + wmb(); + writel(desc_refilled, droq->pkts_credit_reg); + /* make sure mmio write completes */ + mmiowb(); - if (readl(droq->pkts_credit_reg) <= CN23XX_SLI_DEF_BP) { - spin_lock_bh(&droq->lock); - desc_refilled = octeon_droq_refill(oct, droq); - if (desc_refilled) { - /* Flush the droq descriptor data to memory to be sure - * that when we update the credits the data in memory - * is accurate. - */ - wmb(); - writel(desc_refilled, droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); - } - spin_unlock_bh(&droq->lock); + if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP) + reschedule = 0; } + + return reschedule; } static inline u32 octeon_droq_get_bufcount(u32 buf_size, u32 total_len) { - return ((total_len + buf_size - 1) / buf_size); + return DIV_ROUND_UP(total_len, buf_size); } static int @@ -603,9 +599,9 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, struct octeon_droq *droq, u32 pkts_to_process) { + u32 pkt, total_len = 0, pkt_count, retval; struct octeon_droq_info *info; union octeon_rh *rh; - u32 pkt, total_len = 0, pkt_count; pkt_count = pkts_to_process; @@ -709,30 +705,43 @@ octeon_droq_fast_process_packets(struct octeon_device *oct, if (droq->refill_count >= droq->refill_threshold) { int desc_refilled = octeon_droq_refill(oct, droq); - /* Flush the droq descriptor data to memory to be sure - * that when we update the credits the data in memory - * is accurate. - */ - wmb(); - writel((desc_refilled), droq->pkts_credit_reg); - /* make sure mmio write completes */ - mmiowb(); + if (desc_refilled) { + /* Flush the droq descriptor data to memory to + * be sure that when we update the credits the + * data in memory is accurate. + */ + wmb(); + writel(desc_refilled, droq->pkts_credit_reg); + /* make sure mmio write completes */ + mmiowb(); + } } - } /* for (each packet)... */ /* Increment refill_count by the number of buffers processed. */ droq->stats.pkts_received += pkt; droq->stats.bytes_received += total_len; + retval = pkt; if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); droq->stats.dropped_toomany += (pkts_to_process - pkt); - return pkts_to_process; + retval = pkts_to_process; + } + + atomic_sub(retval, &droq->pkts_pending); + + if (droq->refill_count >= droq->refill_threshold && + readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) { + octeon_droq_check_hw_for_pkts(droq); + + /* Make sure there are no pkts_pending */ + if (!atomic_read(&droq->pkts_pending)) + octeon_schedule_rxq_oom_work(oct, droq); } - return pkt; + return retval; } int @@ -740,29 +749,19 @@ octeon_droq_process_packets(struct octeon_device *oct, struct octeon_droq *droq, u32 budget) { - u32 pkt_count = 0, pkts_processed = 0; + u32 pkt_count = 0; struct list_head *tmp, *tmp2; - /* Grab the droq lock */ - spin_lock(&droq->lock); - octeon_droq_check_hw_for_pkts(droq); pkt_count = atomic_read(&droq->pkts_pending); - if (!pkt_count) { - spin_unlock(&droq->lock); + if (!pkt_count) return 0; - } if (pkt_count > budget) pkt_count = budget; - pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); - - atomic_sub(pkts_processed, &droq->pkts_pending); - - /* Release the spin lock */ - spin_unlock(&droq->lock); + octeon_droq_fast_process_packets(oct, droq, pkt_count); list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { struct __dispatch *rdisp = (struct __dispatch *)tmp; @@ -798,8 +797,6 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct, if (budget > droq->max_count) budget = droq->max_count; - spin_lock(&droq->lock); - while (total_pkts_processed < budget) { octeon_droq_check_hw_for_pkts(droq); @@ -813,13 +810,9 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct, octeon_droq_fast_process_packets(oct, droq, pkts_available); - atomic_sub(pkts_processed, &droq->pkts_pending); - total_pkts_processed += pkts_processed; } - spin_unlock(&droq->lock); - list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { struct __dispatch *rdisp = (struct __dispatch *)tmp; @@ -879,9 +872,8 @@ octeon_enable_irq(struct octeon_device *oct, u32 q_no) int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, struct octeon_droq_ops *ops) { - struct octeon_droq *droq; - unsigned long flags; struct octeon_config *oct_cfg = NULL; + struct octeon_droq *droq; oct_cfg = octeon_get_conf(oct); @@ -901,21 +893,15 @@ int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, } droq = oct->droq[q_no]; - - spin_lock_irqsave(&droq->lock, flags); - memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops)); - spin_unlock_irqrestore(&droq->lock, flags); - return 0; } int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) { - unsigned long flags; - struct octeon_droq *droq; struct octeon_config *oct_cfg = NULL; + struct octeon_droq *droq; oct_cfg = octeon_get_conf(oct); @@ -936,14 +922,10 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) return 0; } - spin_lock_irqsave(&droq->lock, flags); - droq->ops.fptr = NULL; droq->ops.farg = NULL; droq->ops.drop_on_max = 0; - spin_unlock_irqrestore(&droq->lock, flags); - return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index f28f262d4ab6..c9b19e624dce 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -245,9 +245,6 @@ struct octeon_droq_ops { * Octeon DROQ. */ struct octeon_droq { - /** A spinlock to protect access to this ring. */ - spinlock_t lock; - u32 q_no; u32 pkt_count; @@ -414,6 +411,6 @@ int octeon_droq_process_poll_pkts(struct octeon_device *oct, int octeon_enable_irq(struct octeon_device *oct, u32 q_no); -void octeon_droq_check_oom(struct octeon_droq *droq); +int octeon_retry_droq_refill(struct octeon_droq *droq); #endif /*__OCTEON_DROQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 2327062e8af6..bebf3bd349c6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -94,6 +94,8 @@ struct octeon_instr_queue { u32 pkt_in_done; + u32 pkts_processed; + /** A spinlock to protect access to the input ring.*/ spinlock_t iq_flush_running_lock; @@ -290,13 +292,19 @@ struct octeon_soft_command { u32 ctxsize; /** Time out and callback */ - size_t wait_time; - size_t timeout; + size_t expiry_time; u32 iq_no; void (*callback)(struct octeon_device *, u32, void *); void *callback_arg; + + int caller_is_done; + u32 sc_status; + struct completion complete; }; +/* max timeout (in milli sec) for soft request */ +#define LIO_SC_MAX_TMO_MS 60000 + /** Maximum number of buffers to allocate into soft command buffer pool */ #define MAX_SOFT_COMMAND_BUFFERS 256 @@ -317,6 +325,8 @@ struct octeon_sc_buffer_pool { (((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count) int octeon_setup_sc_buffer_pool(struct octeon_device *oct); +int octeon_free_sc_done_list(struct octeon_device *oct); +int octeon_free_sc_zombie_list(struct octeon_device *oct); int octeon_free_sc_buffer_pool(struct octeon_device *oct); struct octeon_soft_command * octeon_alloc_soft_command(struct octeon_device *oct, @@ -368,6 +378,9 @@ int octeon_send_command(struct octeon_device *oct, u32 iq_no, u32 force_db, void *cmd, void *buf, u32 datasize, u32 reqtype); +void octeon_dump_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc); + void octeon_prepare_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc, u8 opcode, u8 subcode, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index c846eec11a45..073d0647b439 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -70,6 +70,10 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype, void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, unsigned int bytes_compl); void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac); + +void octeon_schedule_rxq_oom_work(struct octeon_device *oct, + struct octeon_droq *droq); + /** Swap 8B blocks */ static inline void octeon_swap_8B_data(u64 *data, u32 blocks) { @@ -146,46 +150,70 @@ err_release_region: return 1; } +/* input parameter: + * sc: pointer to a soft request + * timeout: milli sec which an application wants to wait for the + response of the request. + * 0: the request will wait until its response gets back + * from the firmware within LIO_SC_MAX_TMO_MS milli sec. + * It the response does not return within + * LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list() + * will move the request to zombie response list. + * + * return value: + * 0: got the response from firmware for the sc request. + * errno -EINTR: user abort the command. + * errno -ETIME: user spefified timeout value has been expired. + * errno -EBUSY: the response of the request does not return in + * resonable time (LIO_SC_MAX_TMO_MS). + * the sc wll be move to zombie response list by + * lio_process_ordered_list() + * + * A request with non-zero return value, the sc->caller_is_done + * will be marked 1. + * When getting a request with zero return value, the requestor + * should mark sc->caller_is_done with 1 after examing the + * response of sc. + * lio_process_ordered_list() will free the soft command on behalf + * of the soft command requestor. + * This is to fix the possible race condition of both timeout process + * and lio_process_ordered_list()/callback function to free a + * sc strucutre. + */ static inline int -sleep_cond(wait_queue_head_t *wait_queue, int *condition) +wait_for_sc_completion_timeout(struct octeon_device *oct_dev, + struct octeon_soft_command *sc, + unsigned long timeout) { int errno = 0; - wait_queue_entry_t we; - - init_waitqueue_entry(&we, current); - add_wait_queue(wait_queue, &we); - while (!(READ_ONCE(*condition))) { - set_current_state(TASK_INTERRUPTIBLE); - if (signal_pending(current)) { - errno = -EINTR; - goto out; - } - schedule(); + long timeout_jiff; + + if (timeout) + timeout_jiff = msecs_to_jiffies(timeout); + else + timeout_jiff = MAX_SCHEDULE_TIMEOUT; + + timeout_jiff = + wait_for_completion_interruptible_timeout(&sc->complete, + timeout_jiff); + if (timeout_jiff == 0) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -ETIME; + } else if (timeout_jiff == -ERESTARTSYS) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -EINTR; + } else if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -EBUSY; } -out: - set_current_state(TASK_RUNNING); - remove_wait_queue(wait_queue, &we); - return errno; -} -/* Gives up the CPU for a timeout period. - * Check that the condition is not true before we go to sleep for a - * timeout period. - */ -static inline void -sleep_timeout_cond(wait_queue_head_t *wait_queue, - int *condition, - int timeout) -{ - wait_queue_entry_t we; - - init_waitqueue_entry(&we, current); - add_wait_queue(wait_queue, &we); - set_current_state(TASK_INTERRUPTIBLE); - if (!(*condition)) - schedule_timeout(timeout); - set_current_state(TASK_RUNNING); - remove_wait_queue(wait_queue, &we); + return errno; } #ifndef ROUNDUP4 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index d7a3916fe877..50201fc86dcf 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -35,12 +35,6 @@ #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 #define LIO_IFSTATE_RESETTING 0x10 -struct liquidio_if_cfg_context { - u32 octeon_id; - wait_queue_head_t wc; - int cond; -}; - struct liquidio_if_cfg_resp { u64 rh; struct liquidio_if_cfg_info cfg_info; @@ -48,6 +42,7 @@ struct liquidio_if_cfg_resp { }; #define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */ +#define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200 /* Structure of a node in list of gather components maintained by * NIC driver for each network device. @@ -76,6 +71,12 @@ struct oct_nic_stats_resp { u64 status; }; +struct oct_nic_vf_stats_resp { + u64 rh; + u64 spoofmac_cnt; + u64 status; +}; + struct oct_nic_stats_ctrl { struct completion complete; struct net_device *netdev; @@ -83,16 +84,13 @@ struct oct_nic_stats_ctrl { struct oct_nic_seapi_resp { u64 rh; - u32 speed; + union { + u32 fec_setting; + u32 speed; + }; u64 status; }; -struct liquidio_nic_seapi_ctl_context { - int octeon_id; - u32 status; - struct completion complete; -}; - /** LiquidIO per-interface network private data */ struct lio { /** State of the interface. Rx/Tx happens only in the RUNNING state. */ @@ -178,7 +176,7 @@ struct lio { struct cavium_wq txq_status_wq; /* work queue for rxq oom status */ - struct cavium_wq rxq_status_wq; + struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES]; /* work queue for link status */ struct cavium_wq link_status_wq; @@ -187,6 +185,7 @@ struct lio { struct cavium_wq sync_octeon_time_wq; int netdev_uc_count; + struct cavium_wk stats_wk; }; #define LIO_SIZE (sizeof(struct lio)) @@ -225,7 +224,7 @@ irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); -int octnet_get_link_stats(struct net_device *netdev); +void lio_fetch_stats(struct work_struct *work); int lio_wait_for_clean_oq(struct octeon_device *oct); /** @@ -234,16 +233,14 @@ int lio_wait_for_clean_oq(struct octeon_device *oct); */ void liquidio_set_ethtool_ops(struct net_device *netdev); -void lio_if_cfg_callback(struct octeon_device *oct, - u32 status __attribute__((unused)), - void *buf); - void lio_delete_glists(struct lio *lio); int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs); int liquidio_get_speed(struct lio *lio); int liquidio_set_speed(struct lio *lio, int speed); +int liquidio_get_fec(struct lio *lio); +int liquidio_set_fec(struct lio *lio, int on_off); /** * \brief Net device change_mtu diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index 150609bd8849..1a706f81bbb0 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -75,8 +75,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, else sc->cmd.cmd2.rptr = sc->dmarptr; - sc->wait_time = 1000; - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return sc; } @@ -92,29 +91,6 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct, ndata->reqtype); } -static void octnet_link_ctrl_callback(struct octeon_device *oct, - u32 status, - void *sc_ptr) -{ - struct octeon_soft_command *sc = (struct octeon_soft_command *)sc_ptr; - struct octnic_ctrl_pkt *nctrl; - - nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr; - - /* Call the callback function if status is zero (meaning OK) or status - * contains a firmware status code bigger than zero (meaning the - * firmware is reporting an error). - * If no response was expected, status is OK if the command was posted - * successfully. - */ - if ((!status || status > FIRMWARE_STATUS_CODE(0)) && nctrl->cb_fn) { - nctrl->status = status; - nctrl->cb_fn(nctrl); - } - - octeon_free_soft_command(oct, sc); -} - static inline struct octeon_soft_command *octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, struct octnic_ctrl_pkt *nctrl) @@ -127,17 +103,14 @@ static inline struct octeon_soft_command uddsize = (u32)(nctrl->ncmd.s.more * 8); datasize = OCTNET_CMD_SIZE + uddsize; - rdatasize = (nctrl->wait_time) ? 16 : 0; + rdatasize = 16; sc = (struct octeon_soft_command *) - octeon_alloc_soft_command(oct, datasize, rdatasize, - sizeof(struct octnic_ctrl_pkt)); + octeon_alloc_soft_command(oct, datasize, rdatasize, 0); if (!sc) return NULL; - memcpy(sc->ctxptr, nctrl, sizeof(struct octnic_ctrl_pkt)); - data = (u8 *)sc->virtdptr; memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); @@ -154,9 +127,8 @@ static inline struct octeon_soft_command octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, 0, 0, 0); - sc->callback = octnet_link_ctrl_callback; - sc->callback_arg = sc; - sc->wait_time = nctrl->wait_time; + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; return sc; } @@ -199,5 +171,28 @@ octnet_send_nic_ctrl_pkt(struct octeon_device *oct, } spin_unlock_bh(&oct->cmd_resp_wqlock); + + if (nctrl->ncmd.s.cmdgroup == 0) { + switch (nctrl->ncmd.s.cmd) { + /* caller holds lock, can not sleep */ + case OCTNET_CMD_CHANGE_DEVFLAGS: + case OCTNET_CMD_SET_MULTI_LIST: + case OCTNET_CMD_SET_UC_LIST: + WRITE_ONCE(sc->caller_is_done, true); + return retval; + } + } + + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (retval); + + nctrl->sc_status = sc->sc_status; + retval = nctrl->sc_status; + if (nctrl->cb_fn) + nctrl->cb_fn(nctrl); + + WRITE_ONCE(sc->caller_is_done, true); + return retval; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h index de4130d26a98..87dd6f89ce51 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -52,20 +52,13 @@ struct octnic_ctrl_pkt { /** Input queue to use to send this command. */ u64 iq_no; - /** Time to wait for Octeon software to respond to this control command. - * If wait_time is 0, OSI assumes no response is expected. - */ - size_t wait_time; - /** The network device that issued the control command. */ u64 netpndev; /** Callback function called when the command has been fetched */ octnic_ctrl_pkt_cb_fn_t cb_fn; - u32 status; - u16 *response_code; - struct completion *completion; + u32 sc_status; }; #define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd)) diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 8f746e1348d4..c6f4cbda040f 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -123,6 +123,7 @@ int octeon_init_instr_queue(struct octeon_device *oct, iq->do_auto_flush = 1; iq->db_timeout = (u32)conf->db_timeout; atomic_set(&iq->instr_pending, 0); + iq->pkts_processed = 0; /* Initialize the spinlock for this instruction queue */ spin_lock_init(&iq->lock); @@ -379,7 +380,6 @@ lio_process_iq_request_list(struct octeon_device *oct, u32 inst_count = 0; unsigned int pkts_compl = 0, bytes_compl = 0; struct octeon_soft_command *sc; - struct octeon_instr_irh *irh; unsigned long flags; while (old != iq->octeon_read_index) { @@ -401,40 +401,21 @@ lio_process_iq_request_list(struct octeon_device *oct, case REQTYPE_RESP_NET: case REQTYPE_SOFT_COMMAND: sc = buf; - - if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) - irh = (struct octeon_instr_irh *) - &sc->cmd.cmd3.irh; - else - irh = (struct octeon_instr_irh *) - &sc->cmd.cmd2.irh; - if (irh->rflag) { - /* We're expecting a response from Octeon. - * It's up to lio_process_ordered_list() to - * process sc. Add sc to the ordered soft - * command response list because we expect - * a response from Octeon. - */ - spin_lock_irqsave - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - atomic_inc(&oct->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - list_add_tail(&sc->node, &oct->response_list - [OCTEON_ORDERED_SC_LIST].head); - spin_unlock_irqrestore - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - } else { - if (sc->callback) { - /* This callback must not sleep */ - sc->callback(oct, OCTEON_REQUEST_DONE, - sc->callback_arg); - } - } + /* We're expecting a response from Octeon. + * It's up to lio_process_ordered_list() to + * process sc. Add sc to the ordered soft + * command response list because we expect + * a response from Octeon. + */ + spin_lock_irqsave(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, flags); + atomic_inc(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count); + list_add_tail(&sc->node, &oct->response_list + [OCTEON_ORDERED_SC_LIST].head); + spin_unlock_irqrestore(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); break; default: dev_err(&oct->pci_dev->dev, @@ -459,7 +440,7 @@ lio_process_iq_request_list(struct octeon_device *oct, if (atomic_read(&oct->response_list [OCTEON_ORDERED_SC_LIST].pending_req_count)) - queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); + queue_work(cwq->wq, &cwq->wk.work.work); return inst_count; } @@ -495,6 +476,7 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, lio_process_iq_request_list(oct, iq, 0); if (inst_processed) { + iq->pkts_processed += inst_processed; atomic_sub(inst_processed, &iq->instr_pending); iq->stats.instr_processed += inst_processed; } @@ -753,8 +735,7 @@ int octeon_send_soft_command(struct octeon_device *oct, len = (u32)ih2->dlengsz; } - if (sc->wait_time) - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, len, REQTYPE_SOFT_COMMAND)); @@ -789,11 +770,76 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct) return 0; } +int octeon_free_sc_done_list(struct octeon_device *oct) +{ + struct octeon_response_list *done_sc_list, *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST]; + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + + if (!atomic_read(&done_sc_list->pending_req_count)) + return 0; + + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &done_sc_list->head) { + sc = list_entry(tmp, struct octeon_soft_command, node); + + if (READ_ONCE(sc->caller_is_done)) { + list_del(&sc->node); + atomic_dec(&done_sc_list->pending_req_count); + + if (*sc->status_word == COMPLETION_WORD_INIT) { + /* timeout; move sc to zombie list */ + list_add_tail(&sc->node, &zombie_sc_list->head); + atomic_inc(&zombie_sc_list->pending_req_count); + } else { + octeon_free_soft_command(oct, sc); + } + } + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + +int octeon_free_sc_zombie_list(struct octeon_device *oct) +{ + struct octeon_response_list *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) { + list_del(tmp); + atomic_dec(&zombie_sc_list->pending_req_count); + sc = list_entry(tmp, struct octeon_soft_command, node); + octeon_free_soft_command(oct, sc); + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + int octeon_free_sc_buffer_pool(struct octeon_device *oct) { struct list_head *tmp, *tmp2; struct octeon_soft_command *sc; + octeon_free_sc_zombie_list(oct); + spin_lock_bh(&oct->sc_buf_pool.lock); list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { @@ -822,6 +868,9 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc = NULL; struct list_head *tmp; + if (!rdatasize) + rdatasize = 16; + WARN_ON((offset + datasize + rdatasize + ctxsize) > SOFT_COMMAND_BUFFER_SIZE); diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index fe5b53700576..ac7747ccf56a 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -69,6 +69,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, u32 status; u64 status64; + octeon_free_sc_done_list(octeon_dev); + ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; do { @@ -111,26 +113,88 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, } } } - } else if (force_quit || (sc->timeout && - time_after(jiffies, (unsigned long)sc->timeout))) { - dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n", - __func__, (long)jiffies, (long)sc->timeout); + } else if (unlikely(force_quit) || (sc->expiry_time && + time_after(jiffies, (unsigned long)sc->expiry_time))) { + struct octeon_instr_irh *irh = + (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + + dev_err(&octeon_dev->pci_dev->dev, "%s: ", __func__); + dev_err(&octeon_dev->pci_dev->dev, + "cmd %x/%x/%llx/%llx failed, ", + irh->opcode, irh->subcode, + sc->cmd.cmd3.ossp[0], sc->cmd.cmd3.ossp[1]); + dev_err(&octeon_dev->pci_dev->dev, + "timeout (%ld, %ld)\n", + (long)jiffies, (long)sc->expiry_time); status = OCTEON_REQUEST_TIMEOUT; } if (status != OCTEON_REQUEST_PENDING) { + sc->sc_status = status; + /* we have received a response or we have timed out */ /* remove node from linked list */ list_del(&sc->node); atomic_dec(&octeon_dev->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - spin_unlock_bh - (&ordered_sc_list->lock); + [OCTEON_ORDERED_SC_LIST]. + pending_req_count); + + if (!sc->callback) { + atomic_inc(&octeon_dev->response_list + [OCTEON_DONE_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, + &octeon_dev->response_list + [OCTEON_DONE_SC_LIST].head); + + if (unlikely(READ_ONCE(sc->caller_is_done))) { + /* caller does not wait for response + * from firmware + */ + if (status != OCTEON_REQUEST_DONE) { + struct octeon_instr_irh *irh; + + irh = + (struct octeon_instr_irh *) + &sc->cmd.cmd3.irh; + dev_dbg + (&octeon_dev->pci_dev->dev, + "%s: sc failed: opcode=%x, ", + __func__, irh->opcode); + dev_dbg + (&octeon_dev->pci_dev->dev, + "subcode=%x, ossp[0]=%llx, ", + irh->subcode, + sc->cmd.cmd3.ossp[0]); + dev_dbg + (&octeon_dev->pci_dev->dev, + "ossp[1]=%llx, status=%d\n", + sc->cmd.cmd3.ossp[1], + status); + } + } else { + complete(&sc->complete); + } + + spin_unlock_bh(&ordered_sc_list->lock); + } else { + /* sc with callback function */ + if (status == OCTEON_REQUEST_TIMEOUT) { + atomic_inc(&octeon_dev->response_list + [OCTEON_ZOMBIE_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, + &octeon_dev->response_list + [OCTEON_ZOMBIE_SC_LIST]. + head); + } + + spin_unlock_bh(&ordered_sc_list->lock); - if (sc->callback) sc->callback(octeon_dev, status, sc->callback_arg); + /* sc is freed by caller */ + } request_complete++; diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h index 9169c2815dba..ed4020d26fae 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.h +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h @@ -53,7 +53,9 @@ enum { OCTEON_ORDERED_LIST = 0, OCTEON_UNORDERED_NONBLOCKING_LIST = 1, OCTEON_UNORDERED_BLOCKING_LIST = 2, - OCTEON_ORDERED_SC_LIST = 3 + OCTEON_ORDERED_SC_LIST = 3, + OCTEON_DONE_SC_LIST = 4, + OCTEON_ZOMBIE_SC_LIST = 5 }; /** Response Order values for a Octeon Request. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 36d25883d123..b2d617abcf49 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -315,6 +315,48 @@ struct cudbg_pbt_tables { u32 pbt_data[CUDBG_PBT_DATA_ENTRIES]; }; +enum cudbg_qdesc_qtype { + CUDBG_QTYPE_UNKNOWN = 0, + CUDBG_QTYPE_NIC_TXQ, + CUDBG_QTYPE_NIC_RXQ, + CUDBG_QTYPE_NIC_FLQ, + CUDBG_QTYPE_CTRLQ, + CUDBG_QTYPE_FWEVTQ, + CUDBG_QTYPE_INTRQ, + CUDBG_QTYPE_PTP_TXQ, + CUDBG_QTYPE_OFLD_TXQ, + CUDBG_QTYPE_RDMA_RXQ, + CUDBG_QTYPE_RDMA_FLQ, + CUDBG_QTYPE_RDMA_CIQ, + CUDBG_QTYPE_ISCSI_RXQ, + CUDBG_QTYPE_ISCSI_FLQ, + CUDBG_QTYPE_ISCSIT_RXQ, + CUDBG_QTYPE_ISCSIT_FLQ, + CUDBG_QTYPE_CRYPTO_TXQ, + CUDBG_QTYPE_CRYPTO_RXQ, + CUDBG_QTYPE_CRYPTO_FLQ, + CUDBG_QTYPE_TLS_RXQ, + CUDBG_QTYPE_TLS_FLQ, + CUDBG_QTYPE_MAX, +}; + +#define CUDBG_QDESC_REV 1 + +struct cudbg_qdesc_entry { + u32 data_size; + u32 qtype; + u32 qid; + u32 desc_size; + u32 num_desc; + u8 data[0]; /* Must be last */ +}; + +struct cudbg_qdesc_info { + u32 qdesc_entry_size; + u32 num_queues; + u8 data[0]; /* Must be last */ +}; + #define IREG_NUM_ELEM 4 static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 215fe6260fd7..dec63c15c0ba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -81,7 +81,8 @@ enum cudbg_dbg_entity_type { CUDBG_MBOX_LOG = 66, CUDBG_HMA_INDIRECT = 67, CUDBG_HMA = 68, - CUDBG_MAX_ENTITY = 70, + CUDBG_QDESC = 70, + CUDBG_MAX_ENTITY = 71, }; struct cudbg_init { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index d97e0d7e541a..7c49681407ad 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -19,6 +19,7 @@ #include "t4_regs.h" #include "cxgb4.h" +#include "cxgb4_cudbg.h" #include "cudbg_if.h" #include "cudbg_lib_common.h" #include "cudbg_entity.h" @@ -2890,3 +2891,240 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, } return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff); } + +void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, + u32 *num, u32 *size) +{ + u32 tot_entries = 0, tot_size = 0; + + /* NIC TXQ, RXQ, FLQ, and CTRLQ */ + tot_entries += MAX_ETH_QSETS * 3; + tot_entries += MAX_CTRL_QUEUES; + + tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; + tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; + tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE; + tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES * + MAX_CTRL_TXQ_DESC_SIZE; + + /* FW_EVTQ and INTRQ */ + tot_entries += INGQ_EXTRAS; + tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE; + + /* PTP_TXQ */ + tot_entries += 1; + tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE; + + /* ULD TXQ, RXQ, and FLQ */ + tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS; + tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2; + + tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES * + MAX_TXQ_DESC_SIZE; + tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES * + MAX_RXQ_DESC_SIZE; + tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS * + MAX_FL_DESC_SIZE; + + /* ULD CIQ */ + tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS; + tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE * + MAX_RXQ_DESC_SIZE; + + tot_size += sizeof(struct cudbg_ver_hdr) + + sizeof(struct cudbg_qdesc_info) + + sizeof(struct cudbg_qdesc_entry) * tot_entries; + + if (num) + *num = tot_entries; + + if (size) + *size = tot_size; +} + +int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + u32 num_queues = 0, tot_entries = 0, size = 0; + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_qdesc_entry *qdesc_entry; + struct cudbg_qdesc_info *qdesc_info; + struct cudbg_ver_hdr *ver_hdr; + struct sge *s = &padap->sge; + u32 i, j, cur_off, tot_len; + u8 *data; + int rc; + + cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size); + size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE); + tot_len = size; + data = kvzalloc(size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + ver_hdr = (struct cudbg_ver_hdr *)data; + ver_hdr->signature = CUDBG_ENTITY_SIGNATURE; + ver_hdr->revision = CUDBG_QDESC_REV; + ver_hdr->size = sizeof(struct cudbg_qdesc_info); + size -= sizeof(*ver_hdr); + + qdesc_info = (struct cudbg_qdesc_info *)(data + + sizeof(*ver_hdr)); + size -= sizeof(*qdesc_info); + qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data; + +#define QDESC_GET(q, desc, type, label) do { \ + if (size <= 0) { \ + goto label; \ + } \ + if (desc) { \ + cudbg_fill_qdesc_##q(q, type, qdesc_entry); \ + size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \ + num_queues++; \ + qdesc_entry = cudbg_next_qdesc(qdesc_entry); \ + } \ +} while (0) + +#define QDESC_GET_TXQ(q, type, label) do { \ + struct sge_txq *txq = (struct sge_txq *)q; \ + QDESC_GET(txq, txq->desc, type, label); \ +} while (0) + +#define QDESC_GET_RXQ(q, type, label) do { \ + struct sge_rspq *rxq = (struct sge_rspq *)q; \ + QDESC_GET(rxq, rxq->desc, type, label); \ +} while (0) + +#define QDESC_GET_FLQ(q, type, label) do { \ + struct sge_fl *flq = (struct sge_fl *)q; \ + QDESC_GET(flq, flq->desc, type, label); \ +} while (0) + + /* NIC TXQ */ + for (i = 0; i < s->ethqsets; i++) + QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out); + + /* NIC RXQ */ + for (i = 0; i < s->ethqsets; i++) + QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out); + + /* NIC FLQ */ + for (i = 0; i < s->ethqsets; i++) + QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out); + + /* NIC CTRLQ */ + for (i = 0; i < padap->params.nports; i++) + QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out); + + /* FW_EVTQ */ + QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out); + + /* INTRQ */ + QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out); + + /* PTP_TXQ */ + QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out); + + /* ULD Queues */ + mutex_lock(&uld_mutex); + + if (s->uld_txq_info) { + struct sge_uld_txq_info *utxq; + + /* ULD TXQ */ + for (j = 0; j < CXGB4_TX_MAX; j++) { + if (!s->uld_txq_info[j]) + continue; + + utxq = s->uld_txq_info[j]; + for (i = 0; i < utxq->ntxq; i++) + QDESC_GET_TXQ(&utxq->uldtxq[i].q, + cudbg_uld_txq_to_qtype(j), + out_unlock); + } + } + + if (s->uld_rxq_info) { + struct sge_uld_rxq_info *urxq; + u32 base; + + /* ULD RXQ */ + for (j = 0; j < CXGB4_ULD_MAX; j++) { + if (!s->uld_rxq_info[j]) + continue; + + urxq = s->uld_rxq_info[j]; + for (i = 0; i < urxq->nrxq; i++) + QDESC_GET_RXQ(&urxq->uldrxq[i].rspq, + cudbg_uld_rxq_to_qtype(j), + out_unlock); + } + + /* ULD FLQ */ + for (j = 0; j < CXGB4_ULD_MAX; j++) { + if (!s->uld_rxq_info[j]) + continue; + + urxq = s->uld_rxq_info[j]; + for (i = 0; i < urxq->nrxq; i++) + QDESC_GET_FLQ(&urxq->uldrxq[i].fl, + cudbg_uld_flq_to_qtype(j), + out_unlock); + } + + /* ULD CIQ */ + for (j = 0; j < CXGB4_ULD_MAX; j++) { + if (!s->uld_rxq_info[j]) + continue; + + urxq = s->uld_rxq_info[j]; + base = urxq->nrxq; + for (i = 0; i < urxq->nciq; i++) + QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq, + cudbg_uld_ciq_to_qtype(j), + out_unlock); + } + } + +out_unlock: + mutex_unlock(&uld_mutex); + +out: + qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry); + qdesc_info->num_queues = num_queues; + cur_off = 0; + while (tot_len) { + u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE); + + rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size, + &temp_buff); + if (rc) { + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto out_free; + } + + memcpy(temp_buff.data, data + cur_off, chunk_size); + tot_len -= chunk_size; + cur_off += chunk_size; + rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff, + dbg_buff); + if (rc) { + cudbg_put_buff(pdbg_init, &temp_buff); + cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA; + goto out_free; + } + } + +out_free: + if (data) + kvfree(data); + +#undef QDESC_GET_FLQ +#undef QDESC_GET_RXQ +#undef QDESC_GET_TXQ +#undef QDESC_GET + + return rc; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index eebefe7cd18e..f047a01a3e5b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -171,6 +171,9 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, @@ -182,4 +185,107 @@ int cudbg_fill_meminfo(struct adapter *padap, struct cudbg_meminfo *meminfo_buff); void cudbg_fill_le_tcam_info(struct adapter *padap, struct cudbg_tcam *tcam_region); +void cudbg_fill_qdesc_num_and_size(const struct adapter *padap, + u32 *num, u32 *size); + +static inline u32 cudbg_uld_txq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_TX_OFLD: + return CUDBG_QTYPE_OFLD_TXQ; + case CXGB4_TX_CRYPTO: + return CUDBG_QTYPE_CRYPTO_TXQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline u32 cudbg_uld_rxq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_ULD_RDMA: + return CUDBG_QTYPE_RDMA_RXQ; + case CXGB4_ULD_ISCSI: + return CUDBG_QTYPE_ISCSI_RXQ; + case CXGB4_ULD_ISCSIT: + return CUDBG_QTYPE_ISCSIT_RXQ; + case CXGB4_ULD_CRYPTO: + return CUDBG_QTYPE_CRYPTO_RXQ; + case CXGB4_ULD_TLS: + return CUDBG_QTYPE_TLS_RXQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline u32 cudbg_uld_flq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_ULD_RDMA: + return CUDBG_QTYPE_RDMA_FLQ; + case CXGB4_ULD_ISCSI: + return CUDBG_QTYPE_ISCSI_FLQ; + case CXGB4_ULD_ISCSIT: + return CUDBG_QTYPE_ISCSIT_FLQ; + case CXGB4_ULD_CRYPTO: + return CUDBG_QTYPE_CRYPTO_FLQ; + case CXGB4_ULD_TLS: + return CUDBG_QTYPE_TLS_FLQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline u32 cudbg_uld_ciq_to_qtype(u32 uld) +{ + switch (uld) { + case CXGB4_ULD_RDMA: + return CUDBG_QTYPE_RDMA_CIQ; + } + + return CUDBG_QTYPE_UNKNOWN; +} + +static inline void cudbg_fill_qdesc_txq(const struct sge_txq *txq, + enum cudbg_qdesc_qtype type, + struct cudbg_qdesc_entry *entry) +{ + entry->qtype = type; + entry->qid = txq->cntxt_id; + entry->desc_size = sizeof(struct tx_desc); + entry->num_desc = txq->size; + entry->data_size = txq->size * sizeof(struct tx_desc); + memcpy(entry->data, txq->desc, entry->data_size); +} + +static inline void cudbg_fill_qdesc_rxq(const struct sge_rspq *rxq, + enum cudbg_qdesc_qtype type, + struct cudbg_qdesc_entry *entry) +{ + entry->qtype = type; + entry->qid = rxq->cntxt_id; + entry->desc_size = rxq->iqe_len; + entry->num_desc = rxq->size; + entry->data_size = rxq->size * rxq->iqe_len; + memcpy(entry->data, rxq->desc, entry->data_size); +} + +static inline void cudbg_fill_qdesc_flq(const struct sge_fl *flq, + enum cudbg_qdesc_qtype type, + struct cudbg_qdesc_entry *entry) +{ + entry->qtype = type; + entry->qid = flq->cntxt_id; + entry->desc_size = sizeof(__be64); + entry->num_desc = flq->size; + entry->data_size = flq->size * sizeof(__be64); + memcpy(entry->data, flq->desc, entry->data_size); +} + +static inline +struct cudbg_qdesc_entry *cudbg_next_qdesc(struct cudbg_qdesc_entry *e) +{ + return (struct cudbg_qdesc_entry *) + ((u8 *)e + sizeof(*e) + e->data_size); +} #endif /* __CUDBG_LIB_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 76d16747f513..b5010bd32ea3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -533,6 +533,13 @@ enum { }; enum { + MAX_TXQ_DESC_SIZE = 64, + MAX_RXQ_DESC_SIZE = 128, + MAX_FL_DESC_SIZE = 8, + MAX_CTRL_TXQ_DESC_SIZE = 64, +}; + +enum { INGQ_EXTRAS = 2, /* firmware event queue and */ /* forwarded interrupts */ MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS, @@ -685,6 +692,7 @@ struct sge_eth_stats { /* Ethernet queue statistics */ unsigned long rx_cso; /* # of Rx checksum offloads */ unsigned long vlan_ex; /* # of Rx VLAN extractions */ unsigned long rx_drops; /* # of packets dropped due to no mem */ + unsigned long bad_rx_pkts; /* # of packets with err_vec!=0 */ }; struct sge_eth_rxq { /* SW Ethernet Rx queue */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 5f01c0a7fd98..972f0a124714 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -30,6 +30,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_MBOX_LOG, cudbg_collect_mbox_log }, + { CUDBG_QDESC, cudbg_collect_qdesc }, { CUDBG_DEV_LOG, cudbg_collect_fw_devlog }, { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, { CUDBG_CIM_LA, cudbg_collect_cim_la }, @@ -311,6 +312,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) } len = cudbg_mbytes_to_bytes(len); break; + case CUDBG_QDESC: + cudbg_fill_qdesc_num_and_size(adap, NULL, &len); + break; default: break; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index b34f0f077a31..6ba3104ff7eb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -114,6 +114,24 @@ void cxgb4_dcb_reset(struct net_device *dev) cxgb4_dcb_state_init(dev); } +/* update the dcb port support, if version is IEEE then set it to + * FW_PORT_DCB_VER_IEEE and if DCB_CAP_DCBX_VER_CEE is already set then + * clear that. and if it is set to CEE then set dcb supported to + * DCB_CAP_DCBX_VER_CEE & if DCB_CAP_DCBX_VER_IEEE is set, clear it + */ +static inline void cxgb4_dcb_update_support(struct port_dcb_info *dcb) +{ + if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { + if (dcb->supported & DCB_CAP_DCBX_VER_CEE) + dcb->supported &= ~DCB_CAP_DCBX_VER_CEE; + dcb->supported |= DCB_CAP_DCBX_VER_IEEE; + } else if (dcb->dcb_version == FW_PORT_DCB_VER_CEE1D01) { + if (dcb->supported & DCB_CAP_DCBX_VER_IEEE) + dcb->supported &= ~DCB_CAP_DCBX_VER_IEEE; + dcb->supported |= DCB_CAP_DCBX_VER_CEE; + } +} + /* Finite State machine for Data Center Bridging. */ void cxgb4_dcb_state_fsm(struct net_device *dev, @@ -165,6 +183,15 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, } case CXGB4_DCB_STATE_FW_INCOMPLETE: { + if (transition_to != CXGB4_DCB_INPUT_FW_DISABLED) { + /* during this CXGB4_DCB_STATE_FW_INCOMPLETE state, + * check if the dcb version is changed (there can be + * mismatch in default config & the negotiated switch + * configuration at FW, so update the dcb support + * accordingly. + */ + cxgb4_dcb_update_support(dcb); + } switch (transition_to) { case CXGB4_DCB_INPUT_FW_ENABLED: { /* we're alreaady in firmware DCB mode */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 0f72f9c4ec74..cab492ec8f59 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2784,6 +2784,7 @@ do { \ RL("LROmerged:", stats.lro_merged); RL("LROpackets:", stats.lro_pkts); RL("RxDrops:", stats.rx_drops); + RL("RxBadPkts:", stats.bad_rx_pkts); TL("TSO:", tso); TL("TxCSO:", tx_cso); TL("VLANins:", vlan_ins); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 961e3087d1d3..1a93efa60e71 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -62,7 +62,6 @@ #include <net/netevent.h> #include <net/addrconf.h> #include <net/bonding.h> -#include <net/addrconf.h> #include <linux/uaccess.h> #include <linux/crash_dump.h> #include <net/udp_tunnel.h> @@ -2749,6 +2748,27 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf, return -EINVAL; } + if (max_tx_rate == 0) { + /* unbind VF to to any Traffic Class */ + fw_pfvf = + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH)); + fw_class = 0xffffffff; + ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, + &fw_pfvf, &fw_class); + if (ret) { + dev_err(adap->pdev_dev, + "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n", + ret, adap->pf, vf); + return -EINVAL; + } + dev_info(adap->pdev_dev, + "PF %d VF %d is unbound from TX Rate Limiting\n", + adap->pf, vf); + adap->vfinfo[vf].tx_rate = 0; + return 0; + } + ret = t4_get_link_params(pi, &link_ok, &speed, &mtu); if (ret != FW_SUCCESS) { dev_err(adap->pdev_dev, @@ -2798,8 +2818,8 @@ static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf, &fw_class); if (ret) { dev_err(adap->pdev_dev, - "Err %d in binding VF %d to Traffic Class %d\n", - ret, vf, class_id); + "Err %d in binding PF %d VF %d to Traffic Class %d\n", + ret, adap->pf, vf, class_id); return -EINVAL; } dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 301c4df8a566..99022c0898b5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -433,10 +433,12 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, else lport = netdev2pinfo(physdev)->lport; - if (is_vlan_dev(neigh->dev)) + if (is_vlan_dev(neigh->dev)) { vlan = vlan_dev_vlan_id(neigh->dev); - else + vlan |= vlan_dev_get_egress_qos_mask(neigh->dev, priority); + } else { vlan = VLAN_NONE; + } write_lock_bh(&d->lock); for (e = d->l2tab[hash].first; e; e = e->next) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 6807bc3a44fb..b90188401d4a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2830,6 +2830,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, csum_ok = pkt->csum_calc && !err_vec && (q->netdev->features & NETIF_F_RXCSUM); + + if (err_vec) + rxq->stats.bad_rx_pkts++; + if (((pkt->l2info & htonl(RXF_TCP_F)) || tnl_hdr_len) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 5fe5d16dee72..f85eab57e9e1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3889,7 +3889,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) c.param[0].mnem = cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE)); - c.param[0].val = (__force __be32)op; + c.param[0].val = cpu_to_be32(op); return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); } @@ -10209,7 +10209,9 @@ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, FW_ACL_VLAN_CMD_VFN_V(vf)); vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd)); /* Drop all packets that donot match vlan id */ - vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F; + vlan_cmd.dropnovlan_fm = (enable + ? (FW_ACL_VLAN_CMD_DROPNOVLAN_F | + FW_ACL_VLAN_CMD_FM_F) : 0); if (enable != 0) { vlan_cmd.nvlan = 1; vlan_cmd.vlanid[0] = cpu_to_be16(vlan); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 5dc6c4154af8..6d2bc8789223 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -2464,6 +2464,7 @@ struct fw_acl_vlan_cmd { #define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7 #define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S) +#define FW_ACL_VLAN_CMD_DROPNOVLAN_F FW_ACL_VLAN_CMD_DROPNOVLAN_V(1U) #define FW_ACL_VLAN_CMD_FM_S 6 #define FW_ACL_VLAN_CMD_FM_M 0x1 diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 1c9ad3630c77..ceec467f590d 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -372,9 +372,8 @@ static int gmac_setup_phy(struct net_device *netdev) return -ENODEV; netdev->phydev = phy; - phy->supported &= PHY_GBIT_FEATURES; - phy->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; - phy->advertising = phy->supported; + phy_set_max_speed(phy, SPEED_1000); + phy_support_asym_pause(phy); /* set PHY interface type */ switch (phy->interface) { diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 5a847941c46b..79521e27f0d1 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -284,13 +284,11 @@ static int dnet_mii_probe(struct net_device *dev) /* mask with MAC supported features */ if (bp->capabilities & DNET_HAS_GIGABIT) - phydev->supported &= PHY_GBIT_FEATURES; + phy_set_max_speed(phydev, SPEED_1000); else - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); - phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; - - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); bp->link = 0; bp->speed = 0; diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 60da0499ad66..0f3e7f21c6fa 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -721,10 +721,7 @@ static int ethoc_mdio_probe(struct net_device *dev) return err; } - phy->advertising &= ~(ADVERTISED_1000baseT_Full | - ADVERTISED_1000baseT_Half); - phy->supported &= ~(SUPPORTED_1000baseT_Full | - SUPPORTED_1000baseT_Half); + phy_set_max_speed(phy, SPEED_100); return 0; } diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index ed6c76d20b45..d8ead7e4177e 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1079,8 +1079,7 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv, phy_interface_t intf) /* Indicate that we support PAUSE frames (see comment in * Documentation/networking/phy.txt) */ - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); /* Display what we found */ phy_attached_info(phydev); @@ -1220,22 +1219,11 @@ static int ftgmac100_set_pauseparam(struct net_device *netdev, priv->tx_pause = pause->tx_pause; priv->rx_pause = pause->rx_pause; - if (phydev) { - phydev->advertising &= ~ADVERTISED_Pause; - phydev->advertising &= ~ADVERTISED_Asym_Pause; + if (phydev) + phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); - if (pause->rx_pause) { - phydev->advertising |= ADVERTISED_Pause; - phydev->advertising |= ADVERTISED_Asym_Pause; - } - - if (pause->tx_pause) - phydev->advertising ^= ADVERTISED_Asym_Pause; - } if (netif_running(netdev)) { - if (phydev && priv->aneg_pause) - phy_start_aneg(phydev); - else + if (!(phydev && priv->aneg_pause)) ftgmac100_config_pause(priv); } diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index a580a3dcbe59..7a30276e1ba6 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -97,4 +97,12 @@ config GIANFAR source "drivers/net/ethernet/freescale/dpaa/Kconfig" +config FSL_DPAA2_ETH + tristate "Freescale DPAA2 Ethernet" + depends on FSL_MC_BUS && FSL_MC_DPIO + depends on NETDEVICES && ETHERNET + ---help--- + Ethernet driver for Freescale DPAA2 SoCs, using the + Freescale MC bus driver + endif # NET_VENDOR_FREESCALE diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 0914a3ea4405..3b4ff08e3841 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -21,3 +21,5 @@ ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o obj-$(CONFIG_FSL_FMAN) += fman/ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/ + +obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 65a22cd9aef2..a5131a510e8b 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1280,7 +1280,7 @@ static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, err = bman_release(dpaa_bp->pool, bmb, cnt); /* Should never occur, address anyway to avoid leaking the buffers */ - if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) + if (WARN_ON(err) && dpaa_bp->free_buf_cb) while (cnt-- > 0) dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); @@ -1704,10 +1704,8 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, skb = build_skb(vaddr, dpaa_bp->size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); - if (unlikely(!skb)) { - WARN_ONCE(1, "Build skb failure on Rx\n"); + if (WARN_ONCE(!skb, "Build skb failure on Rx\n")) goto free_buffer; - } WARN_ON(fd_off != priv->rx_headroom); skb_reserve(skb, fd_off); skb_put(skb, qm_fd_get_length(fd)); @@ -1770,7 +1768,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, sz = dpaa_bp->size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); skb = build_skb(sg_vaddr, sz); - if (WARN_ON(unlikely(!skb))) + if (WARN_ON(!skb)) goto free_buffers; skb->ip_summed = rx_csum_offload(priv, fd); @@ -2493,8 +2491,7 @@ static int dpaa_phy_init(struct net_device *net_dev) /* Remove any features not supported by the controller */ phy_dev->supported &= mac_dev->if_support; - phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); - phy_dev->advertising = phy_dev->supported; + phy_support_asym_pause(phy_dev); mac_dev->phy_dev = phy_dev; net_dev->phydev = phy_dev; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 3184c8f7cdd0..5d0fdf667b82 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -194,9 +194,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, return -ENODEV; } - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - (epause->rx_pause != epause->tx_pause))) + if (!phy_validate_pause(phydev, epause)) return -EINVAL; /* The MAC should know how to handle PAUSE frame autonegotiation before @@ -210,29 +208,8 @@ static int dpaa_set_pauseparam(struct net_device *net_dev, /* Determine the sym/asym advertised PAUSE capabilities from the desired * rx/tx pause settings. */ - newadv = 0; - if (epause->rx_pause) - newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (epause->tx_pause) - newadv ^= ADVERTISED_Asym_Pause; - oldadv = phydev->advertising & - (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - - /* If there are differences between the old and the new advertised - * values, restart PHY autonegotiation and advertise the new values. - */ - if (oldadv != newadv) { - phydev->advertising &= ~(ADVERTISED_Pause - | ADVERTISED_Asym_Pause); - phydev->advertising |= newadv; - if (phydev->autoneg) { - err = phy_start_aneg(phydev); - if (err < 0) - netdev_err(net_dev, "phy_start_aneg() = %d\n", - err); - } - } + phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile new file mode 100644 index 000000000000..9315ecdba612 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Freescale DPAA2 Ethernet controller +# + +obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o + +fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o + +# Needed by the tracing framework +CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h new file mode 100644 index 000000000000..9801528db2a5 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2014-2015 Freescale Semiconductor Inc. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM dpaa2_eth + +#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _DPAA2_ETH_TRACE_H + +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include "dpaa2-eth.h" +#include <linux/tracepoint.h> + +#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u" +/* trace_printk format for raw buffer event class */ +#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d" + +/* This is used to declare a class of events. + * individual events of this type will be defined below. + */ + +/* Store details about a frame descriptor */ +DECLARE_EVENT_CLASS(dpaa2_eth_fd, + /* Trace function prototype */ + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + /* Repeat argument list here */ + TP_ARGS(netdev, fd), + + /* A structure containing the relevant information we want + * to record. Declare name and type for each normal element, + * name, type and size for arrays. Use __string for variable + * length strings. + */ + TP_STRUCT__entry( + __field(u64, fd_addr) + __field(u32, fd_len) + __field(u16, fd_offset) + __string(name, netdev->name) + ), + + /* The function that assigns values to the above declared + * fields + */ + TP_fast_assign( + __entry->fd_addr = dpaa2_fd_get_addr(fd); + __entry->fd_len = dpaa2_fd_get_len(fd); + __entry->fd_offset = dpaa2_fd_get_offset(fd); + __assign_str(name, netdev->name); + ), + + /* This is what gets printed when the trace event is + * triggered. + */ + TP_printk(TR_FMT, + __get_str(name), + __entry->fd_addr, + __entry->fd_len, + __entry->fd_offset) +); + +/* Now declare events of the above type. Format is: + * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class + */ + +/* Tx (egress) fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + +/* Rx fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + +/* Tx confirmation fd */ +DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, + TP_PROTO(struct net_device *netdev, + const struct dpaa2_fd *fd), + + TP_ARGS(netdev, fd) +); + +/* Log data about raw buffers. Useful for tracing DPBP content. */ +TRACE_EVENT(dpaa2_eth_buf_seed, + /* Trace function prototype */ + TP_PROTO(struct net_device *netdev, + /* virtual address and size */ + void *vaddr, + size_t size, + /* dma map address and size */ + dma_addr_t dma_addr, + size_t map_size, + /* buffer pool id, if relevant */ + u16 bpid), + + /* Repeat argument list here */ + TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), + + /* A structure containing the relevant information we want + * to record. Declare name and type for each normal element, + * name, type and size for arrays. Use __string for variable + * length strings. + */ + TP_STRUCT__entry( + __field(void *, vaddr) + __field(size_t, size) + __field(dma_addr_t, dma_addr) + __field(size_t, map_size) + __field(u16, bpid) + __string(name, netdev->name) + ), + + /* The function that assigns values to the above declared + * fields + */ + TP_fast_assign( + __entry->vaddr = vaddr; + __entry->size = size; + __entry->dma_addr = dma_addr; + __entry->map_size = map_size; + __entry->bpid = bpid; + __assign_str(name, netdev->name); + ), + + /* This is what gets printed when the trace event is + * triggered. + */ + TP_printk(TR_BUF_FMT, + __get_str(name), + __entry->vaddr, + __entry->size, + &__entry->dma_addr, + __entry->map_size, + __entry->bpid) +); + +/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). + * The syntax is the same as for DECLARE_EVENT_CLASS(). + */ + +#endif /* _DPAA2_ETH_TRACE_H */ + +/* This must be outside ifdef _DPAA2_ETH_TRACE_H */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE dpaa2-eth-trace +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c new file mode 100644 index 000000000000..559154a5d9e9 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -0,0 +1,2654 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2016-2017 NXP + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/etherdevice.h> +#include <linux/of_net.h> +#include <linux/interrupt.h> +#include <linux/msi.h> +#include <linux/kthread.h> +#include <linux/iommu.h> +#include <linux/net_tstamp.h> +#include <linux/fsl/mc.h> + +#include <net/sock.h> + +#include "dpaa2-eth.h" + +/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files + * using trace events only need to #include <trace/events/sched.h> + */ +#define CREATE_TRACE_POINTS +#include "dpaa2-eth-trace.h" + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Freescale Semiconductor, Inc"); +MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); + +static void *dpaa2_iova_to_virt(struct iommu_domain *domain, + dma_addr_t iova_addr) +{ + phys_addr_t phys_addr; + + phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; + + return phys_to_virt(phys_addr); +} + +static void validate_rx_csum(struct dpaa2_eth_priv *priv, + u32 fd_status, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* HW checksum validation is disabled, nothing to do here */ + if (!(priv->net_dev->features & NETIF_F_RXCSUM)) + return; + + /* Read checksum validation bits */ + if (!((fd_status & DPAA2_FAS_L3CV) && + (fd_status & DPAA2_FAS_L4CV))) + return; + + /* Inform the stack there's no need to compute L3/L4 csum anymore */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +/* Free a received FD. + * Not to be used for Tx conf FDs or on any other paths. + */ +static void free_rx_fd(struct dpaa2_eth_priv *priv, + const struct dpaa2_fd *fd, + void *vaddr) +{ + struct device *dev = priv->net_dev->dev.parent; + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u8 fd_format = dpaa2_fd_get_format(fd); + struct dpaa2_sg_entry *sgt; + void *sg_vaddr; + int i; + + /* If single buffer frame, just free the data buffer */ + if (fd_format == dpaa2_fd_single) + goto free_buf; + else if (fd_format != dpaa2_fd_sg) + /* We don't support any other format */ + return; + + /* For S/G frames, we first need to free all SG entries + * except the first one, which was taken care of already + */ + sgt = vaddr + dpaa2_fd_get_offset(fd); + for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { + addr = dpaa2_sg_get_addr(&sgt[i]); + sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + + skb_free_frag(sg_vaddr); + if (dpaa2_sg_is_final(&sgt[i])) + break; + } + +free_buf: + skb_free_frag(vaddr); +} + +/* Build a linear skb based on a single-buffer frame descriptor */ +static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + void *fd_vaddr) +{ + struct sk_buff *skb = NULL; + u16 fd_offset = dpaa2_fd_get_offset(fd); + u32 fd_length = dpaa2_fd_get_len(fd); + + ch->buf_count--; + + skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, fd_offset); + skb_put(skb, fd_length); + + return skb; +} + +/* Build a non linear (fragmented) skb based on a S/G table */ +static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_sg_entry *sgt) +{ + struct sk_buff *skb = NULL; + struct device *dev = priv->net_dev->dev.parent; + void *sg_vaddr; + dma_addr_t sg_addr; + u16 sg_offset; + u32 sg_length; + struct page *page, *head_page; + int page_offset; + int i; + + for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { + struct dpaa2_sg_entry *sge = &sgt[i]; + + /* NOTE: We only support SG entries in dpaa2_sg_single format, + * but this is the only format we may receive from HW anyway + */ + + /* Get the address and length from the S/G entry */ + sg_addr = dpaa2_sg_get_addr(sge); + sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); + dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + + sg_length = dpaa2_sg_get_len(sge); + + if (i == 0) { + /* We build the skb around the first data buffer */ + skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); + if (unlikely(!skb)) { + /* Free the first SG entry now, since we already + * unmapped it and obtained the virtual address + */ + skb_free_frag(sg_vaddr); + + /* We still need to subtract the buffers used + * by this FD from our software counter + */ + while (!dpaa2_sg_is_final(&sgt[i]) && + i < DPAA2_ETH_MAX_SG_ENTRIES) + i++; + break; + } + + sg_offset = dpaa2_sg_get_offset(sge); + skb_reserve(skb, sg_offset); + skb_put(skb, sg_length); + } else { + /* Rest of the data buffers are stored as skb frags */ + page = virt_to_page(sg_vaddr); + head_page = virt_to_head_page(sg_vaddr); + + /* Offset in page (which may be compound). + * Data in subsequent SG entries is stored from the + * beginning of the buffer, so we don't need to add the + * sg_offset. + */ + page_offset = ((unsigned long)sg_vaddr & + (PAGE_SIZE - 1)) + + (page_address(page) - page_address(head_page)); + + skb_add_rx_frag(skb, i - 1, head_page, page_offset, + sg_length, DPAA2_ETH_RX_BUF_SIZE); + } + + if (dpaa2_sg_is_final(sge)) + break; + } + + WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); + + /* Count all data buffers + SG table buffer */ + ch->buf_count -= i + 2; + + return skb; +} + +/* Main Rx frame processing routine */ +static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct napi_struct *napi, + u16 queue_id) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + u8 fd_format = dpaa2_fd_get_format(fd); + void *vaddr; + struct sk_buff *skb; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + struct device *dev = priv->net_dev->dev.parent; + struct dpaa2_fas *fas; + void *buf_data; + u32 status = 0; + + /* Tracing point */ + trace_dpaa2_rx_fd(priv->net_dev, fd); + + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); + + fas = dpaa2_get_fas(vaddr, false); + prefetch(fas); + buf_data = vaddr + dpaa2_fd_get_offset(fd); + prefetch(buf_data); + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + + if (fd_format == dpaa2_fd_single) { + skb = build_linear_skb(priv, ch, fd, vaddr); + } else if (fd_format == dpaa2_fd_sg) { + skb = build_frag_skb(priv, ch, buf_data); + skb_free_frag(vaddr); + percpu_extras->rx_sg_frames++; + percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); + } else { + /* We don't support any other format */ + goto err_frame_format; + } + + if (unlikely(!skb)) + goto err_build_skb; + + prefetch(skb->data); + + /* Get the timestamp value */ + if (priv->rx_tstamp) { + struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); + __le64 *ts = dpaa2_get_ts(vaddr, false); + u64 ns; + + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); + shhwtstamps->hwtstamp = ns_to_ktime(ns); + } + + /* Check if we need to validate the L4 csum */ + if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { + status = le32_to_cpu(fas->status); + validate_rx_csum(priv, status, skb); + } + + skb->protocol = eth_type_trans(skb, priv->net_dev); + skb_record_rx_queue(skb, queue_id); + + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + + napi_gro_receive(napi, skb); + + return; + +err_build_skb: + free_rx_fd(priv, fd, vaddr); +err_frame_format: + percpu_stats->rx_dropped++; +} + +/* Consume all frames pull-dequeued into the store. This is the simplest way to + * make sure we don't accidentally issue another volatile dequeue which would + * overwrite (leak) frames already in the store. + * + * Observance of NAPI budget is not our concern, leaving that to the caller. + */ +static int consume_frames(struct dpaa2_eth_channel *ch) +{ + struct dpaa2_eth_priv *priv = ch->priv; + struct dpaa2_eth_fq *fq; + struct dpaa2_dq *dq; + const struct dpaa2_fd *fd; + int cleaned = 0; + int is_last; + + do { + dq = dpaa2_io_store_next(ch->store, &is_last); + if (unlikely(!dq)) { + /* If we're here, we *must* have placed a + * volatile dequeue comnmand, so keep reading through + * the store until we get some sort of valid response + * token (either a valid frame or an "empty dequeue") + */ + continue; + } + + fd = dpaa2_dq_fd(dq); + fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); + fq->stats.frames++; + + fq->consume(priv, ch, fd, &ch->napi, fq->flowid); + cleaned++; + } while (!is_last); + + return cleaned; +} + +/* Configure the egress frame annotation for timestamp update */ +static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) +{ + struct dpaa2_faead *faead; + u32 ctrl, frc; + + /* Mark the egress frame annotation area as valid */ + frc = dpaa2_fd_get_frc(fd); + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); + + /* Set hardware annotation size */ + ctrl = dpaa2_fd_get_ctrl(fd); + dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); + + /* enable UPD (update prepanded data) bit in FAEAD field of + * hardware frame annotation area + */ + ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; + faead = dpaa2_get_faead(buf_start, true); + faead->ctrl = cpu_to_le32(ctrl); +} + +/* Create a frame descriptor based on a fragmented skb */ +static int build_sg_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) +{ + struct device *dev = priv->net_dev->dev.parent; + void *sgt_buf = NULL; + dma_addr_t addr; + int nr_frags = skb_shinfo(skb)->nr_frags; + struct dpaa2_sg_entry *sgt; + int i, err; + int sgt_buf_size; + struct scatterlist *scl, *crt_scl; + int num_sg; + int num_dma_bufs; + struct dpaa2_eth_swa *swa; + + /* Create and map scatterlist. + * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have + * to go beyond nr_frags+1. + * Note: We don't support chained scatterlists + */ + if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) + return -EINVAL; + + scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); + if (unlikely(!scl)) + return -ENOMEM; + + sg_init_table(scl, nr_frags + 1); + num_sg = skb_to_sgvec(skb, scl, 0, skb->len); + num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); + if (unlikely(!num_dma_bufs)) { + err = -ENOMEM; + goto dma_map_sg_failed; + } + + /* Prepare the HW SGT structure */ + sgt_buf_size = priv->tx_data_offset + + sizeof(struct dpaa2_sg_entry) * num_dma_bufs; + sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); + if (unlikely(!sgt_buf)) { + err = -ENOMEM; + goto sgt_buf_alloc_failed; + } + sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); + memset(sgt_buf, 0, sgt_buf_size); + + sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); + + /* Fill in the HW SGT structure. + * + * sgt_buf is zeroed out, so the following fields are implicit + * in all sgt entries: + * - offset is 0 + * - format is 'dpaa2_sg_single' + */ + for_each_sg(scl, crt_scl, num_dma_bufs, i) { + dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); + dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); + } + dpaa2_sg_set_final(&sgt[i - 1], true); + + /* Store the skb backpointer in the SGT buffer. + * Fit the scatterlist and the number of buffers alongside the + * skb backpointer in the software annotation area. We'll need + * all of them on Tx Conf. + */ + swa = (struct dpaa2_eth_swa *)sgt_buf; + swa->skb = skb; + swa->scl = scl; + swa->num_sg = num_sg; + swa->sgt_size = sgt_buf_size; + + /* Separately map the SGT buffer */ + addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) { + err = -ENOMEM; + goto dma_map_single_failed; + } + dpaa2_fd_set_offset(fd, priv->tx_data_offset); + dpaa2_fd_set_format(fd, dpaa2_fd_sg); + dpaa2_fd_set_addr(fd, addr); + dpaa2_fd_set_len(fd, skb->len); + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1); + + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + enable_tx_tstamp(fd, sgt_buf); + + return 0; + +dma_map_single_failed: + skb_free_frag(sgt_buf); +sgt_buf_alloc_failed: + dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); +dma_map_sg_failed: + kfree(scl); + return err; +} + +/* Create a frame descriptor based on a linear skb */ +static int build_single_fd(struct dpaa2_eth_priv *priv, + struct sk_buff *skb, + struct dpaa2_fd *fd) +{ + struct device *dev = priv->net_dev->dev.parent; + u8 *buffer_start, *aligned_start; + struct sk_buff **skbh; + dma_addr_t addr; + + buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); + + /* If there's enough room to align the FD address, do it. + * It will help hardware optimize accesses. + */ + aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, + DPAA2_ETH_TX_BUF_ALIGN); + if (aligned_start >= skb->head) + buffer_start = aligned_start; + + /* Store a backpointer to the skb at the beginning of the buffer + * (in the private data area) such that we can release it + * on Tx confirm + */ + skbh = (struct sk_buff **)buffer_start; + *skbh = skb; + + addr = dma_map_single(dev, buffer_start, + skb_tail_pointer(skb) - buffer_start, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, addr))) + return -ENOMEM; + + dpaa2_fd_set_addr(fd, addr); + dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); + dpaa2_fd_set_len(fd, skb->len); + dpaa2_fd_set_format(fd, dpaa2_fd_single); + dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1); + + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + enable_tx_tstamp(fd, buffer_start); + + return 0; +} + +/* FD freeing routine on the Tx path + * + * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb + * back-pointed to is also freed. + * This can be called either from dpaa2_eth_tx_conf() or on the error path of + * dpaa2_eth_tx(). + */ +static void free_tx_fd(const struct dpaa2_eth_priv *priv, + const struct dpaa2_fd *fd) +{ + struct device *dev = priv->net_dev->dev.parent; + dma_addr_t fd_addr; + struct sk_buff **skbh, *skb; + unsigned char *buffer_start; + struct dpaa2_eth_swa *swa; + u8 fd_format = dpaa2_fd_get_format(fd); + + fd_addr = dpaa2_fd_get_addr(fd); + skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); + + if (fd_format == dpaa2_fd_single) { + skb = *skbh; + buffer_start = (unsigned char *)skbh; + /* Accessing the skb buffer is safe before dma unmap, because + * we didn't map the actual skb shell. + */ + dma_unmap_single(dev, fd_addr, + skb_tail_pointer(skb) - buffer_start, + DMA_BIDIRECTIONAL); + } else if (fd_format == dpaa2_fd_sg) { + swa = (struct dpaa2_eth_swa *)skbh; + skb = swa->skb; + + /* Unmap the scatterlist */ + dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL); + kfree(swa->scl); + + /* Unmap the SGT buffer */ + dma_unmap_single(dev, fd_addr, swa->sgt_size, + DMA_BIDIRECTIONAL); + } else { + netdev_dbg(priv->net_dev, "Invalid FD format\n"); + return; + } + + /* Get the timestamp value */ + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + struct skb_shared_hwtstamps shhwtstamps; + __le64 *ts = dpaa2_get_ts(skbh, true); + u64 ns; + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + + ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + } + + /* Free SGT buffer allocated on tx */ + if (fd_format != dpaa2_fd_single) + skb_free_frag(skbh); + + /* Move on with skb release */ + dev_kfree_skb(skb); +} + +static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpaa2_fd fd; + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + struct dpaa2_eth_fq *fq; + u16 queue_mapping; + unsigned int needed_headroom; + int err, i; + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + percpu_extras = this_cpu_ptr(priv->percpu_extras); + + needed_headroom = dpaa2_eth_needed_headroom(priv, skb); + if (skb_headroom(skb) < needed_headroom) { + struct sk_buff *ns; + + ns = skb_realloc_headroom(skb, needed_headroom); + if (unlikely(!ns)) { + percpu_stats->tx_dropped++; + goto err_alloc_headroom; + } + percpu_extras->tx_reallocs++; + + if (skb->sk) + skb_set_owner_w(ns, skb->sk); + + dev_kfree_skb(skb); + skb = ns; + } + + /* We'll be holding a back-reference to the skb until Tx Confirmation; + * we don't want that overwritten by a concurrent Tx with a cloned skb. + */ + skb = skb_unshare(skb, GFP_ATOMIC); + if (unlikely(!skb)) { + /* skb_unshare() has already freed the skb */ + percpu_stats->tx_dropped++; + return NETDEV_TX_OK; + } + + /* Setup the FD fields */ + memset(&fd, 0, sizeof(fd)); + + if (skb_is_nonlinear(skb)) { + err = build_sg_fd(priv, skb, &fd); + percpu_extras->tx_sg_frames++; + percpu_extras->tx_sg_bytes += skb->len; + } else { + err = build_single_fd(priv, skb, &fd); + } + + if (unlikely(err)) { + percpu_stats->tx_dropped++; + goto err_build_fd; + } + + /* Tracing point */ + trace_dpaa2_tx_fd(net_dev, &fd); + + /* TxConf FQ selection relies on queue id from the stack. + * In case of a forwarded frame from another DPNI interface, we choose + * a queue affined to the same core that processed the Rx frame + */ + queue_mapping = skb_get_queue_mapping(skb); + fq = &priv->fq[queue_mapping]; + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { + err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, + priv->tx_qdid, 0, + fq->tx_qdbin, &fd); + if (err != -EBUSY) + break; + } + percpu_extras->tx_portal_busy += i; + if (unlikely(err < 0)) { + percpu_stats->tx_errors++; + /* Clean up everything, including freeing the skb */ + free_tx_fd(priv, &fd); + } else { + percpu_stats->tx_packets++; + percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); + } + + return NETDEV_TX_OK; + +err_build_fd: +err_alloc_headroom: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +/* Tx confirmation frame processing routine */ +static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct napi_struct *napi __always_unused, + u16 queue_id __always_unused) +{ + struct rtnl_link_stats64 *percpu_stats; + struct dpaa2_eth_drv_stats *percpu_extras; + u32 fd_errors; + + /* Tracing point */ + trace_dpaa2_tx_conf_fd(priv->net_dev, fd); + + percpu_extras = this_cpu_ptr(priv->percpu_extras); + percpu_extras->tx_conf_frames++; + percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); + + /* Check frame errors in the FD field */ + fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; + free_tx_fd(priv, fd); + + if (likely(!fd_errors)) + return; + + if (net_ratelimit()) + netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", + fd_errors); + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + /* Tx-conf logically pertains to the egress path. */ + percpu_stats->tx_errors++; +} + +static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) +{ + int err; + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_RX_L3_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, + "dpni_set_offload(RX_L3_CSUM) failed\n"); + return err; + } + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_RX_L4_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, + "dpni_set_offload(RX_L4_CSUM) failed\n"); + return err; + } + + return 0; +} + +static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) +{ + int err; + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_TX_L3_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); + return err; + } + + err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, + DPNI_OFF_TX_L4_CSUM, enable); + if (err) { + netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); + return err; + } + + return 0; +} + +/* Free buffers acquired from the buffer pool or which were meant to + * be released in the pool + */ +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) +{ + struct device *dev = priv->net_dev->dev.parent; + void *vaddr; + int i; + + for (i = 0; i < count; i++) { + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); + dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + skb_free_frag(vaddr); + } +} + +/* Perform a single release command to add buffers + * to the specified buffer pool + */ +static int add_bufs(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, u16 bpid) +{ + struct device *dev = priv->net_dev->dev.parent; + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + void *buf; + dma_addr_t addr; + int i, err; + + for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { + /* Allocate buffer visible to WRIOP + skb shared info + + * alignment padding + */ + buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv)); + if (unlikely(!buf)) + goto err_alloc; + + buf = PTR_ALIGN(buf, priv->rx_buf_align); + + addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, addr))) + goto err_map; + + buf_array[i] = addr; + + /* tracing point */ + trace_dpaa2_eth_buf_seed(priv->net_dev, + buf, dpaa2_eth_buf_raw_size(priv), + addr, DPAA2_ETH_RX_BUF_SIZE, + bpid); + } + +release_bufs: + /* In case the portal is busy, retry until successful */ + while ((err = dpaa2_io_service_release(ch->dpio, bpid, + buf_array, i)) == -EBUSY) + cpu_relax(); + + /* If release command failed, clean up and bail out; + * not much else we can do about it + */ + if (err) { + free_bufs(priv, buf_array, i); + return 0; + } + + return i; + +err_map: + skb_free_frag(buf); +err_alloc: + /* If we managed to allocate at least some buffers, + * release them to hardware + */ + if (i) + goto release_bufs; + + return 0; +} + +static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) +{ + int i, j; + int new_count; + + /* This is the lazy seeding of Rx buffer pools. + * dpaa2_add_bufs() is also used on the Rx hotpath and calls + * napi_alloc_frag(). The trouble with that is that it in turn ends up + * calling this_cpu_ptr(), which mandates execution in atomic context. + * Rather than splitting up the code, do a one-off preempt disable. + */ + preempt_disable(); + for (j = 0; j < priv->num_channels; j++) { + for (i = 0; i < DPAA2_ETH_NUM_BUFS; + i += DPAA2_ETH_BUFS_PER_CMD) { + new_count = add_bufs(priv, priv->channel[j], bpid); + priv->channel[j]->buf_count += new_count; + + if (new_count < DPAA2_ETH_BUFS_PER_CMD) { + preempt_enable(); + return -ENOMEM; + } + } + } + preempt_enable(); + + return 0; +} + +/** + * Drain the specified number of buffers from the DPNI's private buffer pool. + * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD + */ +static void drain_bufs(struct dpaa2_eth_priv *priv, int count) +{ + u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; + int ret; + + do { + ret = dpaa2_io_service_acquire(NULL, priv->bpid, + buf_array, count); + if (ret < 0) { + netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); + return; + } + free_bufs(priv, buf_array, ret); + } while (ret); +} + +static void drain_pool(struct dpaa2_eth_priv *priv) +{ + int i; + + drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); + drain_bufs(priv, 1); + + for (i = 0; i < priv->num_channels; i++) + priv->channel[i]->buf_count = 0; +} + +/* Function is called from softirq context only, so we don't need to guard + * the access to percpu count + */ +static int refill_pool(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + u16 bpid) +{ + int new_count; + + if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) + return 0; + + do { + new_count = add_bufs(priv, ch, bpid); + if (unlikely(!new_count)) { + /* Out of memory; abort for now, we'll try later on */ + break; + } + ch->buf_count += new_count; + } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); + + if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) + return -ENOMEM; + + return 0; +} + +static int pull_channel(struct dpaa2_eth_channel *ch) +{ + int err; + int dequeues = -1; + + /* Retry while portal is busy */ + do { + err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, + ch->store); + dequeues++; + cpu_relax(); + } while (err == -EBUSY); + + ch->stats.dequeue_portal_busy += dequeues; + if (unlikely(err)) + ch->stats.pull_err++; + + return err; +} + +/* NAPI poll routine + * + * Frames are dequeued from the QMan channel associated with this NAPI context. + * Rx, Tx confirmation and (if configured) Rx error frames all count + * towards the NAPI budget. + */ +static int dpaa2_eth_poll(struct napi_struct *napi, int budget) +{ + struct dpaa2_eth_channel *ch; + int cleaned = 0, store_cleaned; + struct dpaa2_eth_priv *priv; + int err; + + ch = container_of(napi, struct dpaa2_eth_channel, napi); + priv = ch->priv; + + while (cleaned < budget) { + err = pull_channel(ch); + if (unlikely(err)) + break; + + /* Refill pool if appropriate */ + refill_pool(priv, ch, priv->bpid); + + store_cleaned = consume_frames(ch); + cleaned += store_cleaned; + + /* If we have enough budget left for a full store, + * try a new pull dequeue, otherwise we're done here + */ + if (store_cleaned == 0 || + cleaned > budget - DPAA2_ETH_STORE_SIZE) + break; + } + + if (cleaned < budget && napi_complete_done(napi, cleaned)) { + /* Re-enable data available notifications */ + do { + err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); + cpu_relax(); + } while (err == -EBUSY); + WARN_ONCE(err, "CDAN notifications rearm failed on core %d", + ch->nctx.desired_cpu); + } + + ch->stats.frames += cleaned; + + return cleaned; +} + +static void enable_ch_napi(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *ch; + int i; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + napi_enable(&ch->napi); + } +} + +static void disable_ch_napi(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *ch; + int i; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + napi_disable(&ch->napi); + } +} + +static int link_state_update(struct dpaa2_eth_priv *priv) +{ + struct dpni_link_state state; + int err; + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (unlikely(err)) { + netdev_err(priv->net_dev, + "dpni_get_link_state() failed\n"); + return err; + } + + /* Chech link state; speed / duplex changes are not treated yet */ + if (priv->link_state.up == state.up) + return 0; + + priv->link_state = state; + if (state.up) { + netif_carrier_on(priv->net_dev); + netif_tx_start_all_queues(priv->net_dev); + } else { + netif_tx_stop_all_queues(priv->net_dev); + netif_carrier_off(priv->net_dev); + } + + netdev_info(priv->net_dev, "Link Event: state %s\n", + state.up ? "up" : "down"); + + return 0; +} + +static int dpaa2_eth_open(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err; + + err = seed_pool(priv, priv->bpid); + if (err) { + /* Not much to do; the buffer pool, though not filled up, + * may still contain some buffers which would enable us + * to limp on. + */ + netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", + priv->dpbp_dev->obj_desc.id, priv->bpid); + } + + /* We'll only start the txqs when the link is actually ready; make sure + * we don't race against the link up notification, which may come + * immediately after dpni_enable(); + */ + netif_tx_stop_all_queues(net_dev); + enable_ch_napi(priv); + /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will + * return true and cause 'ip link show' to report the LOWER_UP flag, + * even though the link notification wasn't even received. + */ + netif_carrier_off(net_dev); + + err = dpni_enable(priv->mc_io, 0, priv->mc_token); + if (err < 0) { + netdev_err(net_dev, "dpni_enable() failed\n"); + goto enable_err; + } + + /* If the DPMAC object has already processed the link up interrupt, + * we have to learn the link state ourselves. + */ + err = link_state_update(priv); + if (err < 0) { + netdev_err(net_dev, "Can't update link state\n"); + goto link_state_err; + } + + return 0; + +link_state_err: +enable_err: + disable_ch_napi(priv); + drain_pool(priv); + return err; +} + +/* The DPIO store must be empty when we call this, + * at the end of every NAPI cycle. + */ +static u32 drain_channel(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch) +{ + u32 drained = 0, total = 0; + + do { + pull_channel(ch); + drained = consume_frames(ch); + total += drained; + } while (drained); + + return total; +} + +static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *ch; + int i; + u32 drained = 0; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + drained += drain_channel(priv, ch); + } + + return drained; +} + +static int dpaa2_eth_stop(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int dpni_enabled; + int retries = 10; + u32 drained; + + netif_tx_stop_all_queues(net_dev); + netif_carrier_off(net_dev); + + /* Loop while dpni_disable() attempts to drain the egress FQs + * and confirm them back to us. + */ + do { + dpni_disable(priv->mc_io, 0, priv->mc_token); + dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); + if (dpni_enabled) + /* Allow the hardware some slack */ + msleep(100); + } while (dpni_enabled && --retries); + if (!retries) { + netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); + /* Must go on and disable NAPI nonetheless, so we don't crash at + * the next "ifconfig up" + */ + } + + /* Wait for NAPI to complete on every core and disable it. + * In particular, this will also prevent NAPI from being rescheduled if + * a new CDAN is serviced, effectively discarding the CDAN. We therefore + * don't even need to disarm the channels, except perhaps for the case + * of a huge coalescing value. + */ + disable_ch_napi(priv); + + /* Manually drain the Rx and TxConf queues */ + drained = drain_ingress_frames(priv); + if (drained) + netdev_dbg(net_dev, "Drained %d frames.\n", drained); + + /* Empty the buffer pool */ + drain_pool(priv); + + return 0; +} + +static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct device *dev = net_dev->dev.parent; + int err; + + err = eth_mac_addr(net_dev, addr); + if (err < 0) { + dev_err(dev, "eth_mac_addr() failed (%d)\n", err); + return err; + } + + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, + net_dev->dev_addr); + if (err) { + dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); + return err; + } + + return 0; +} + +/** Fill in counters maintained by the GPP driver. These may be different from + * the hardware counters obtained by ethtool. + */ +static void dpaa2_eth_get_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct rtnl_link_stats64 *percpu_stats; + u64 *cpustats; + u64 *netstats = (u64 *)stats; + int i, j; + int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); + + for_each_possible_cpu(i) { + percpu_stats = per_cpu_ptr(priv->percpu_stats, i); + cpustats = (u64 *)percpu_stats; + for (j = 0; j < num; j++) + netstats[j] += cpustats[j]; + } +} + +/* Copy mac unicast addresses from @net_dev to @priv. + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. + */ +static void add_uc_hw_addr(const struct net_device *net_dev, + struct dpaa2_eth_priv *priv) +{ + struct netdev_hw_addr *ha; + int err; + + netdev_for_each_uc_addr(ha, net_dev) { + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, + ha->addr); + if (err) + netdev_warn(priv->net_dev, + "Could not add ucast MAC %pM to the filtering table (err %d)\n", + ha->addr, err); + } +} + +/* Copy mac multicast addresses from @net_dev to @priv + * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. + */ +static void add_mc_hw_addr(const struct net_device *net_dev, + struct dpaa2_eth_priv *priv) +{ + struct netdev_hw_addr *ha; + int err; + + netdev_for_each_mc_addr(ha, net_dev) { + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, + ha->addr); + if (err) + netdev_warn(priv->net_dev, + "Could not add mcast MAC %pM to the filtering table (err %d)\n", + ha->addr, err); + } +} + +static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int uc_count = netdev_uc_count(net_dev); + int mc_count = netdev_mc_count(net_dev); + u8 max_mac = priv->dpni_attrs.mac_filter_entries; + u32 options = priv->dpni_attrs.options; + u16 mc_token = priv->mc_token; + struct fsl_mc_io *mc_io = priv->mc_io; + int err; + + /* Basic sanity checks; these probably indicate a misconfiguration */ + if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) + netdev_info(net_dev, + "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", + max_mac); + + /* Force promiscuous if the uc or mc counts exceed our capabilities. */ + if (uc_count > max_mac) { + netdev_info(net_dev, + "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", + uc_count, max_mac); + goto force_promisc; + } + if (mc_count + uc_count > max_mac) { + netdev_info(net_dev, + "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", + uc_count + mc_count, max_mac); + goto force_mc_promisc; + } + + /* Adjust promisc settings due to flag combinations */ + if (net_dev->flags & IFF_PROMISC) + goto force_promisc; + if (net_dev->flags & IFF_ALLMULTI) { + /* First, rebuild unicast filtering table. This should be done + * in promisc mode, in order to avoid frame loss while we + * progressively add entries to the table. + * We don't know whether we had been in promisc already, and + * making an MC call to find out is expensive; so set uc promisc + * nonetheless. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set uc promisc\n"); + + /* Actual uc table reconstruction. */ + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); + if (err) + netdev_warn(net_dev, "Can't clear uc filters\n"); + add_uc_hw_addr(net_dev, priv); + + /* Finally, clear uc promisc and set mc promisc as requested. */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); + if (err) + netdev_warn(net_dev, "Can't clear uc promisc\n"); + goto force_mc_promisc; + } + + /* Neither unicast, nor multicast promisc will be on... eventually. + * For now, rebuild mac filtering tables while forcing both of them on. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); + + /* Actual mac filtering tables reconstruction */ + err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); + if (err) + netdev_warn(net_dev, "Can't clear mac filters\n"); + add_mc_hw_addr(net_dev, priv); + add_uc_hw_addr(net_dev, priv); + + /* Now we can clear both ucast and mcast promisc, without risking + * to drop legitimate frames anymore. + */ + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); + if (err) + netdev_warn(net_dev, "Can't clear ucast promisc\n"); + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); + if (err) + netdev_warn(net_dev, "Can't clear mcast promisc\n"); + + return; + +force_promisc: + err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set ucast promisc\n"); +force_mc_promisc: + err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); + if (err) + netdev_warn(net_dev, "Can't set mcast promisc\n"); +} + +static int dpaa2_eth_set_features(struct net_device *net_dev, + netdev_features_t features) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + netdev_features_t changed = features ^ net_dev->features; + bool enable; + int err; + + if (changed & NETIF_F_RXCSUM) { + enable = !!(features & NETIF_F_RXCSUM); + err = set_rx_csum(priv, enable); + if (err) + return err; + } + + if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { + enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); + err = set_tx_csum(priv, enable); + if (err) + return err; + } + + return 0; +} + +static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct hwtstamp_config config; + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + priv->tx_tstamp = false; + break; + case HWTSTAMP_TX_ON: + priv->tx_tstamp = true; + break; + default: + return -ERANGE; + } + + if (config.rx_filter == HWTSTAMP_FILTER_NONE) { + priv->rx_tstamp = false; + } else { + priv->rx_tstamp = true; + /* TS is set for all frame types, not only those requested */ + config.rx_filter = HWTSTAMP_FILTER_ALL; + } + + return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? + -EFAULT : 0; +} + +static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + if (cmd == SIOCSHWTSTAMP) + return dpaa2_eth_ts_ioctl(dev, rq, cmd); + + return -EINVAL; +} + +static const struct net_device_ops dpaa2_eth_ops = { + .ndo_open = dpaa2_eth_open, + .ndo_start_xmit = dpaa2_eth_tx, + .ndo_stop = dpaa2_eth_stop, + .ndo_set_mac_address = dpaa2_eth_set_addr, + .ndo_get_stats64 = dpaa2_eth_get_stats, + .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, + .ndo_set_features = dpaa2_eth_set_features, + .ndo_do_ioctl = dpaa2_eth_ioctl, +}; + +static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) +{ + struct dpaa2_eth_channel *ch; + + ch = container_of(ctx, struct dpaa2_eth_channel, nctx); + + /* Update NAPI statistics */ + ch->stats.cdan++; + + napi_schedule_irqoff(&ch->napi); +} + +/* Allocate and configure a DPCON object */ +static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) +{ + struct fsl_mc_device *dpcon; + struct device *dev = priv->net_dev->dev.parent; + struct dpcon_attr attrs; + int err; + + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), + FSL_MC_POOL_DPCON, &dpcon); + if (err) { + dev_info(dev, "Not enough DPCONs, will go on as-is\n"); + return NULL; + } + + err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_open() failed\n"); + goto free; + } + + err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_reset() failed\n"); + goto close; + } + + err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); + if (err) { + dev_err(dev, "dpcon_get_attributes() failed\n"); + goto close; + } + + err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); + if (err) { + dev_err(dev, "dpcon_enable() failed\n"); + goto close; + } + + return dpcon; + +close: + dpcon_close(priv->mc_io, 0, dpcon->mc_handle); +free: + fsl_mc_object_free(dpcon); + + return NULL; +} + +static void free_dpcon(struct dpaa2_eth_priv *priv, + struct fsl_mc_device *dpcon) +{ + dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); + dpcon_close(priv->mc_io, 0, dpcon->mc_handle); + fsl_mc_object_free(dpcon); +} + +static struct dpaa2_eth_channel * +alloc_channel(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_eth_channel *channel; + struct dpcon_attr attr; + struct device *dev = priv->net_dev->dev.parent; + int err; + + channel = kzalloc(sizeof(*channel), GFP_KERNEL); + if (!channel) + return NULL; + + channel->dpcon = setup_dpcon(priv); + if (!channel->dpcon) + goto err_setup; + + err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, + &attr); + if (err) { + dev_err(dev, "dpcon_get_attributes() failed\n"); + goto err_get_attr; + } + + channel->dpcon_id = attr.id; + channel->ch_id = attr.qbman_ch_id; + channel->priv = priv; + + return channel; + +err_get_attr: + free_dpcon(priv, channel->dpcon); +err_setup: + kfree(channel); + return NULL; +} + +static void free_channel(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *channel) +{ + free_dpcon(priv, channel->dpcon); + kfree(channel); +} + +/* DPIO setup: allocate and configure QBMan channels, setup core affinity + * and register data availability notifications + */ +static int setup_dpio(struct dpaa2_eth_priv *priv) +{ + struct dpaa2_io_notification_ctx *nctx; + struct dpaa2_eth_channel *channel; + struct dpcon_notification_cfg dpcon_notif_cfg; + struct device *dev = priv->net_dev->dev.parent; + int i, err; + + /* We want the ability to spread ingress traffic (RX, TX conf) to as + * many cores as possible, so we need one channel for each core + * (unless there's fewer queues than cores, in which case the extra + * channels would be wasted). + * Allocate one channel per core and register it to the core's + * affine DPIO. If not enough channels are available for all cores + * or if some cores don't have an affine DPIO, there will be no + * ingress frame processing on those cores. + */ + cpumask_clear(&priv->dpio_cpumask); + for_each_online_cpu(i) { + /* Try to allocate a channel */ + channel = alloc_channel(priv); + if (!channel) { + dev_info(dev, + "No affine channel for cpu %d and above\n", i); + err = -ENODEV; + goto err_alloc_ch; + } + + priv->channel[priv->num_channels] = channel; + + nctx = &channel->nctx; + nctx->is_cdan = 1; + nctx->cb = cdan_cb; + nctx->id = channel->ch_id; + nctx->desired_cpu = i; + + /* Register the new context */ + channel->dpio = dpaa2_io_service_select(i); + err = dpaa2_io_service_register(channel->dpio, nctx); + if (err) { + dev_dbg(dev, "No affine DPIO for cpu %d\n", i); + /* If no affine DPIO for this core, there's probably + * none available for next cores either. Signal we want + * to retry later, in case the DPIO devices weren't + * probed yet. + */ + err = -EPROBE_DEFER; + goto err_service_reg; + } + + /* Register DPCON notification with MC */ + dpcon_notif_cfg.dpio_id = nctx->dpio_id; + dpcon_notif_cfg.priority = 0; + dpcon_notif_cfg.user_ctx = nctx->qman64; + err = dpcon_set_notification(priv->mc_io, 0, + channel->dpcon->mc_handle, + &dpcon_notif_cfg); + if (err) { + dev_err(dev, "dpcon_set_notification failed()\n"); + goto err_set_cdan; + } + + /* If we managed to allocate a channel and also found an affine + * DPIO for this core, add it to the final mask + */ + cpumask_set_cpu(i, &priv->dpio_cpumask); + priv->num_channels++; + + /* Stop if we already have enough channels to accommodate all + * RX and TX conf queues + */ + if (priv->num_channels == dpaa2_eth_queue_count(priv)) + break; + } + + return 0; + +err_set_cdan: + dpaa2_io_service_deregister(channel->dpio, nctx); +err_service_reg: + free_channel(priv, channel); +err_alloc_ch: + if (cpumask_empty(&priv->dpio_cpumask)) { + dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); + return err; + } + + dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", + cpumask_pr_args(&priv->dpio_cpumask)); + + return 0; +} + +static void free_dpio(struct dpaa2_eth_priv *priv) +{ + int i; + struct dpaa2_eth_channel *ch; + + /* deregister CDAN notifications and free channels */ + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + dpaa2_io_service_deregister(ch->dpio, &ch->nctx); + free_channel(priv, ch); + } +} + +static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, + int cpu) +{ + struct device *dev = priv->net_dev->dev.parent; + int i; + + for (i = 0; i < priv->num_channels; i++) + if (priv->channel[i]->nctx.desired_cpu == cpu) + return priv->channel[i]; + + /* We should never get here. Issue a warning and return + * the first channel, because it's still better than nothing + */ + dev_warn(dev, "No affine channel found for cpu %d\n", cpu); + + return priv->channel[0]; +} + +static void set_fq_affinity(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + struct cpumask xps_mask; + struct dpaa2_eth_fq *fq; + int rx_cpu, txc_cpu; + int i, err; + + /* For each FQ, pick one channel/CPU to deliver frames to. + * This may well change at runtime, either through irqbalance or + * through direct user intervention. + */ + rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); + + for (i = 0; i < priv->num_fqs; i++) { + fq = &priv->fq[i]; + switch (fq->type) { + case DPAA2_RX_FQ: + fq->target_cpu = rx_cpu; + rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); + if (rx_cpu >= nr_cpu_ids) + rx_cpu = cpumask_first(&priv->dpio_cpumask); + break; + case DPAA2_TX_CONF_FQ: + fq->target_cpu = txc_cpu; + + /* Tell the stack to affine to txc_cpu the Tx queue + * associated with the confirmation one + */ + cpumask_clear(&xps_mask); + cpumask_set_cpu(txc_cpu, &xps_mask); + err = netif_set_xps_queue(priv->net_dev, &xps_mask, + fq->flowid); + if (err) + dev_err(dev, "Error setting XPS queue\n"); + + txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); + if (txc_cpu >= nr_cpu_ids) + txc_cpu = cpumask_first(&priv->dpio_cpumask); + break; + default: + dev_err(dev, "Unknown FQ type: %d\n", fq->type); + } + fq->channel = get_affine_channel(priv, fq->target_cpu); + } +} + +static void setup_fqs(struct dpaa2_eth_priv *priv) +{ + int i; + + /* We have one TxConf FQ per Tx flow. + * The number of Tx and Rx queues is the same. + * Tx queues come first in the fq array. + */ + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { + priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; + priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; + priv->fq[priv->num_fqs++].flowid = (u16)i; + } + + for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { + priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; + priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; + priv->fq[priv->num_fqs++].flowid = (u16)i; + } + + /* For each FQ, decide on which core to process incoming frames */ + set_fq_affinity(priv); +} + +/* Allocate and configure one buffer pool for each interface */ +static int setup_dpbp(struct dpaa2_eth_priv *priv) +{ + int err; + struct fsl_mc_device *dpbp_dev; + struct device *dev = priv->net_dev->dev.parent; + struct dpbp_attr dpbp_attrs; + + err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, + &dpbp_dev); + if (err) { + dev_err(dev, "DPBP device allocation failed\n"); + return err; + } + + priv->dpbp_dev = dpbp_dev; + + err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, + &dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_open() failed\n"); + goto err_open; + } + + err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_reset() failed\n"); + goto err_reset; + } + + err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); + if (err) { + dev_err(dev, "dpbp_enable() failed\n"); + goto err_enable; + } + + err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, + &dpbp_attrs); + if (err) { + dev_err(dev, "dpbp_get_attributes() failed\n"); + goto err_get_attr; + } + priv->bpid = dpbp_attrs.bpid; + + return 0; + +err_get_attr: + dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); +err_enable: +err_reset: + dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); +err_open: + fsl_mc_object_free(dpbp_dev); + + return err; +} + +static void free_dpbp(struct dpaa2_eth_priv *priv) +{ + drain_pool(priv); + dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); + dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); + fsl_mc_object_free(priv->dpbp_dev); +} + +static int set_buffer_layout(struct dpaa2_eth_priv *priv) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_buffer_layout buf_layout = {0}; + int err; + + /* We need to check for WRIOP version 1.0.0, but depending on the MC + * version, this number is not always provided correctly on rev1. + * We need to check for both alternatives in this situation. + */ + if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || + priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) + priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; + else + priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; + + /* tx buffer */ + buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; + buf_layout.pass_timestamp = true; + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, &buf_layout); + if (err) { + dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); + return err; + } + + /* tx-confirm buffer */ + buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX_CONFIRM, &buf_layout); + if (err) { + dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); + return err; + } + + /* Now that we've set our tx buffer layout, retrieve the minimum + * required tx data offset. + */ + err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, + &priv->tx_data_offset); + if (err) { + dev_err(dev, "dpni_get_tx_data_offset() failed\n"); + return err; + } + + if ((priv->tx_data_offset % 64) != 0) + dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", + priv->tx_data_offset); + + /* rx buffer */ + buf_layout.pass_frame_status = true; + buf_layout.pass_parser_result = true; + buf_layout.data_align = priv->rx_buf_align; + buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); + buf_layout.private_data_size = 0; + buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, &buf_layout); + if (err) { + dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); + return err; + } + + return 0; +} + +/* Configure the DPNI object this interface is associated with */ +static int setup_dpni(struct fsl_mc_device *ls_dev) +{ + struct device *dev = &ls_dev->dev; + struct dpaa2_eth_priv *priv; + struct net_device *net_dev; + int err; + + net_dev = dev_get_drvdata(dev); + priv = netdev_priv(net_dev); + + /* get a handle for the DPNI object */ + err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); + if (err) { + dev_err(dev, "dpni_open() failed\n"); + return err; + } + + /* Check if we can work with this DPNI object */ + err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, + &priv->dpni_ver_minor); + if (err) { + dev_err(dev, "dpni_get_api_version() failed\n"); + goto close; + } + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { + dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", + priv->dpni_ver_major, priv->dpni_ver_minor, + DPNI_VER_MAJOR, DPNI_VER_MINOR); + err = -ENOTSUPP; + goto close; + } + + ls_dev->mc_io = priv->mc_io; + ls_dev->mc_handle = priv->mc_token; + + err = dpni_reset(priv->mc_io, 0, priv->mc_token); + if (err) { + dev_err(dev, "dpni_reset() failed\n"); + goto close; + } + + err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, + &priv->dpni_attrs); + if (err) { + dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); + goto close; + } + + err = set_buffer_layout(priv); + if (err) + goto close; + + return 0; + +close: + dpni_close(priv->mc_io, 0, priv->mc_token); + + return err; +} + +static void free_dpni(struct dpaa2_eth_priv *priv) +{ + int err; + + err = dpni_reset(priv->mc_io, 0, priv->mc_token); + if (err) + netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", + err); + + dpni_close(priv->mc_io, 0, priv->mc_token); +} + +static int setup_rx_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_queue queue; + struct dpni_queue_id qid; + struct dpni_taildrop td; + int err; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); + if (err) { + dev_err(dev, "dpni_get_queue(RX) failed\n"); + return err; + } + + fq->fqid = qid.fqid; + + queue.destination.id = fq->channel->dpcon_id; + queue.destination.type = DPNI_DEST_DPCON; + queue.destination.priority = 1; + queue.user_context = (u64)(uintptr_t)fq; + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, 0, fq->flowid, + DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, + &queue); + if (err) { + dev_err(dev, "dpni_set_queue(RX) failed\n"); + return err; + } + + td.enable = 1; + td.threshold = DPAA2_ETH_TAILDROP_THRESH; + err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE, + DPNI_QUEUE_RX, 0, fq->flowid, &td); + if (err) { + dev_err(dev, "dpni_set_threshold() failed\n"); + return err; + } + + return 0; +} + +static int setup_tx_flow(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_fq *fq) +{ + struct device *dev = priv->net_dev->dev.parent; + struct dpni_queue queue; + struct dpni_queue_id qid; + int err; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid); + if (err) { + dev_err(dev, "dpni_get_queue(TX) failed\n"); + return err; + } + + fq->tx_qdbin = qid.qdbin; + + err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, + &queue, &qid); + if (err) { + dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); + return err; + } + + fq->fqid = qid.fqid; + + queue.destination.id = fq->channel->dpcon_id; + queue.destination.type = DPNI_DEST_DPCON; + queue.destination.priority = 0; + queue.user_context = (u64)(uintptr_t)fq; + err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, + DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, + &queue); + if (err) { + dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); + return err; + } + + return 0; +} + +/* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */ +static const struct dpaa2_eth_hash_fields hash_fields[] = { + { + /* IP header */ + .rxnfc_field = RXH_IP_SRC, + .cls_prot = NET_PROT_IP, + .cls_field = NH_FLD_IP_SRC, + .size = 4, + }, { + .rxnfc_field = RXH_IP_DST, + .cls_prot = NET_PROT_IP, + .cls_field = NH_FLD_IP_DST, + .size = 4, + }, { + .rxnfc_field = RXH_L3_PROTO, + .cls_prot = NET_PROT_IP, + .cls_field = NH_FLD_IP_PROTO, + .size = 1, + }, { + /* Using UDP ports, this is functionally equivalent to raw + * byte pairs from L4 header. + */ + .rxnfc_field = RXH_L4_B_0_1, + .cls_prot = NET_PROT_UDP, + .cls_field = NH_FLD_UDP_PORT_SRC, + .size = 2, + }, { + .rxnfc_field = RXH_L4_B_2_3, + .cls_prot = NET_PROT_UDP, + .cls_field = NH_FLD_UDP_PORT_DST, + .size = 2, + }, +}; + +/* Set RX hash options + * flags is a combination of RXH_ bits + */ +static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) +{ + struct device *dev = net_dev->dev.parent; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpkg_profile_cfg cls_cfg; + struct dpni_rx_tc_dist_cfg dist_cfg; + u8 *dma_mem; + int i; + int err = 0; + + if (!dpaa2_eth_hash_enabled(priv)) { + dev_dbg(dev, "Hashing support is not enabled\n"); + return 0; + } + + memset(&cls_cfg, 0, sizeof(cls_cfg)); + + for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { + struct dpkg_extract *key = + &cls_cfg.extracts[cls_cfg.num_extracts]; + + if (!(flags & hash_fields[i].rxnfc_field)) + continue; + + if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { + dev_err(dev, "error adding key extraction rule, too many rules?\n"); + return -E2BIG; + } + + key->type = DPKG_EXTRACT_FROM_HDR; + key->extract.from_hdr.prot = hash_fields[i].cls_prot; + key->extract.from_hdr.type = DPKG_FULL_FIELD; + key->extract.from_hdr.field = hash_fields[i].cls_field; + cls_cfg.num_extracts++; + + priv->rx_hash_fields |= hash_fields[i].rxnfc_field; + } + + dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); + if (!dma_mem) + return -ENOMEM; + + err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); + if (err) { + dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); + goto err_prep_key; + } + + memset(&dist_cfg, 0, sizeof(dist_cfg)); + + /* Prepare for setting the rx dist */ + dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, + DPAA2_CLASSIFIER_DMA_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { + dev_err(dev, "DMA mapping failed\n"); + err = -ENOMEM; + goto err_dma_map; + } + + dist_cfg.dist_size = dpaa2_eth_queue_count(priv); + dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; + + err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); + dma_unmap_single(dev, dist_cfg.key_cfg_iova, + DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); + if (err) + dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); + +err_dma_map: +err_prep_key: + kfree(dma_mem); + return err; +} + +/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, + * frame queues and channels + */ +static int bind_dpni(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct device *dev = net_dev->dev.parent; + struct dpni_pools_cfg pools_params; + struct dpni_error_cfg err_cfg; + int err = 0; + int i; + + pools_params.num_dpbp = 1; + pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; + pools_params.pools[0].backup_pool = 0; + pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; + err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); + if (err) { + dev_err(dev, "dpni_set_pools() failed\n"); + return err; + } + + /* have the interface implicitly distribute traffic based on + * the default hash key + */ + err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); + if (err) + dev_err(dev, "Failed to configure hashing\n"); + + /* Configure handling of error frames */ + err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; + err_cfg.set_frame_annotation = 1; + err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; + err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, + &err_cfg); + if (err) { + dev_err(dev, "dpni_set_errors_behavior failed\n"); + return err; + } + + /* Configure Rx and Tx conf queues to generate CDANs */ + for (i = 0; i < priv->num_fqs; i++) { + switch (priv->fq[i].type) { + case DPAA2_RX_FQ: + err = setup_rx_flow(priv, &priv->fq[i]); + break; + case DPAA2_TX_CONF_FQ: + err = setup_tx_flow(priv, &priv->fq[i]); + break; + default: + dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); + return -EINVAL; + } + if (err) + return err; + } + + err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_TX, &priv->tx_qdid); + if (err) { + dev_err(dev, "dpni_get_qdid() failed\n"); + return err; + } + + return 0; +} + +/* Allocate rings for storing incoming frame descriptors */ +static int alloc_rings(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct device *dev = net_dev->dev.parent; + int i; + + for (i = 0; i < priv->num_channels; i++) { + priv->channel[i]->store = + dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); + if (!priv->channel[i]->store) { + netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); + goto err_ring; + } + } + + return 0; + +err_ring: + for (i = 0; i < priv->num_channels; i++) { + if (!priv->channel[i]->store) + break; + dpaa2_io_store_destroy(priv->channel[i]->store); + } + + return -ENOMEM; +} + +static void free_rings(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_channels; i++) + dpaa2_io_store_destroy(priv->channel[i]->store); +} + +static int set_mac_addr(struct dpaa2_eth_priv *priv) +{ + struct net_device *net_dev = priv->net_dev; + struct device *dev = net_dev->dev.parent; + u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; + int err; + + /* Get firmware address, if any */ + err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); + if (err) { + dev_err(dev, "dpni_get_port_mac_addr() failed\n"); + return err; + } + + /* Get DPNI attributes address, if any */ + err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, + dpni_mac_addr); + if (err) { + dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); + return err; + } + + /* First check if firmware has any address configured by bootloader */ + if (!is_zero_ether_addr(mac_addr)) { + /* If the DPMAC addr != DPNI addr, update it */ + if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { + err = dpni_set_primary_mac_addr(priv->mc_io, 0, + priv->mc_token, + mac_addr); + if (err) { + dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); + return err; + } + } + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); + } else if (is_zero_ether_addr(dpni_mac_addr)) { + /* No MAC address configured, fill in net_dev->dev_addr + * with a random one + */ + eth_hw_addr_random(net_dev); + dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); + + err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, + net_dev->dev_addr); + if (err) { + dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); + return err; + } + + /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all + * practical purposes, this will be our "permanent" mac address, + * at least until the next reboot. This move will also permit + * register_netdevice() to properly fill up net_dev->perm_addr. + */ + net_dev->addr_assign_type = NET_ADDR_PERM; + } else { + /* NET_ADDR_PERM is default, all we have to do is + * fill in the device addr. + */ + memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); + } + + return 0; +} + +static int netdev_init(struct net_device *net_dev) +{ + struct device *dev = net_dev->dev.parent; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + u32 options = priv->dpni_attrs.options; + u64 supported = 0, not_supported = 0; + u8 bcast_addr[ETH_ALEN]; + u8 num_queues; + int err; + + net_dev->netdev_ops = &dpaa2_eth_ops; + net_dev->ethtool_ops = &dpaa2_ethtool_ops; + + err = set_mac_addr(priv); + if (err) + return err; + + /* Explicitly add the broadcast address to the MAC filtering table */ + eth_broadcast_addr(bcast_addr); + err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); + if (err) { + dev_err(dev, "dpni_add_mac_addr() failed\n"); + return err; + } + + /* Set MTU upper limit; lower limit is 68B (default value) */ + net_dev->max_mtu = DPAA2_ETH_MAX_MTU; + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, + DPAA2_ETH_MFL); + if (err) { + dev_err(dev, "dpni_set_max_frame_length() failed\n"); + return err; + } + + /* Set actual number of queues in the net device */ + num_queues = dpaa2_eth_queue_count(priv); + err = netif_set_real_num_tx_queues(net_dev, num_queues); + if (err) { + dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); + return err; + } + err = netif_set_real_num_rx_queues(net_dev, num_queues); + if (err) { + dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); + return err; + } + + /* Capabilities listing */ + supported |= IFF_LIVE_ADDR_CHANGE; + + if (options & DPNI_OPT_NO_MAC_FILTER) + not_supported |= IFF_UNICAST_FLT; + else + supported |= IFF_UNICAST_FLT; + + net_dev->priv_flags |= supported; + net_dev->priv_flags &= ~not_supported; + + /* Features */ + net_dev->features = NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_LLTX; + net_dev->hw_features = net_dev->features; + + return 0; +} + +static int poll_link_state(void *arg) +{ + struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; + int err; + + while (!kthread_should_stop()) { + err = link_state_update(priv); + if (unlikely(err)) + return err; + + msleep(DPAA2_ETH_LINK_STATE_REFRESH); + } + + return 0; +} + +static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) +{ + u32 status = ~0; + struct device *dev = (struct device *)arg; + struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); + struct net_device *net_dev = dev_get_drvdata(dev); + int err; + + err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, + DPNI_IRQ_INDEX, &status); + if (unlikely(err)) { + netdev_err(net_dev, "Can't get irq status (err %d)\n", err); + return IRQ_HANDLED; + } + + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) + link_state_update(netdev_priv(net_dev)); + + return IRQ_HANDLED; +} + +static int setup_irqs(struct fsl_mc_device *ls_dev) +{ + int err = 0; + struct fsl_mc_device_irq *irq; + + err = fsl_mc_allocate_irqs(ls_dev); + if (err) { + dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); + return err; + } + + irq = ls_dev->irqs[0]; + err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, + NULL, dpni_irq0_handler_thread, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + dev_name(&ls_dev->dev), &ls_dev->dev); + if (err < 0) { + dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); + goto free_mc_irq; + } + + err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, + DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED); + if (err < 0) { + dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); + goto free_irq; + } + + err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, + DPNI_IRQ_INDEX, 1); + if (err < 0) { + dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); + goto free_irq; + } + + return 0; + +free_irq: + devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); +free_mc_irq: + fsl_mc_free_irqs(ls_dev); + + return err; +} + +static void add_ch_napi(struct dpaa2_eth_priv *priv) +{ + int i; + struct dpaa2_eth_channel *ch; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ + netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, + NAPI_POLL_WEIGHT); + } +} + +static void del_ch_napi(struct dpaa2_eth_priv *priv) +{ + int i; + struct dpaa2_eth_channel *ch; + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + netif_napi_del(&ch->napi); + } +} + +static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) +{ + struct device *dev; + struct net_device *net_dev = NULL; + struct dpaa2_eth_priv *priv = NULL; + int err = 0; + + dev = &dpni_dev->dev; + + /* Net device */ + net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); + if (!net_dev) { + dev_err(dev, "alloc_etherdev_mq() failed\n"); + return -ENOMEM; + } + + SET_NETDEV_DEV(net_dev, dev); + dev_set_drvdata(dev, net_dev); + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + + priv->iommu_domain = iommu_get_domain_for_dev(dev); + + /* Obtain a MC portal */ + err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, + &priv->mc_io); + if (err) { + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "MC portal allocation failed\n"); + goto err_portal_alloc; + } + + /* MC objects initialization and configuration */ + err = setup_dpni(dpni_dev); + if (err) + goto err_dpni_setup; + + err = setup_dpio(priv); + if (err) + goto err_dpio_setup; + + setup_fqs(priv); + + err = setup_dpbp(priv); + if (err) + goto err_dpbp_setup; + + err = bind_dpni(priv); + if (err) + goto err_bind; + + /* Add a NAPI context for each channel */ + add_ch_napi(priv); + + /* Percpu statistics */ + priv->percpu_stats = alloc_percpu(*priv->percpu_stats); + if (!priv->percpu_stats) { + dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); + err = -ENOMEM; + goto err_alloc_percpu_stats; + } + priv->percpu_extras = alloc_percpu(*priv->percpu_extras); + if (!priv->percpu_extras) { + dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); + err = -ENOMEM; + goto err_alloc_percpu_extras; + } + + err = netdev_init(net_dev); + if (err) + goto err_netdev_init; + + /* Configure checksum offload based on current interface flags */ + err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); + if (err) + goto err_csum; + + err = set_tx_csum(priv, !!(net_dev->features & + (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); + if (err) + goto err_csum; + + err = alloc_rings(priv); + if (err) + goto err_alloc_rings; + + err = setup_irqs(dpni_dev); + if (err) { + netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); + priv->poll_thread = kthread_run(poll_link_state, priv, + "%s_poll_link", net_dev->name); + if (IS_ERR(priv->poll_thread)) { + dev_err(dev, "Error starting polling thread\n"); + goto err_poll_thread; + } + priv->do_link_poll = true; + } + + err = register_netdev(net_dev); + if (err < 0) { + dev_err(dev, "register_netdev() failed\n"); + goto err_netdev_reg; + } + + dev_info(dev, "Probed interface %s\n", net_dev->name); + return 0; + +err_netdev_reg: + if (priv->do_link_poll) + kthread_stop(priv->poll_thread); + else + fsl_mc_free_irqs(dpni_dev); +err_poll_thread: + free_rings(priv); +err_alloc_rings: +err_csum: +err_netdev_init: + free_percpu(priv->percpu_extras); +err_alloc_percpu_extras: + free_percpu(priv->percpu_stats); +err_alloc_percpu_stats: + del_ch_napi(priv); +err_bind: + free_dpbp(priv); +err_dpbp_setup: + free_dpio(priv); +err_dpio_setup: + free_dpni(priv); +err_dpni_setup: + fsl_mc_portal_free(priv->mc_io); +err_portal_alloc: + dev_set_drvdata(dev, NULL); + free_netdev(net_dev); + + return err; +} + +static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) +{ + struct device *dev; + struct net_device *net_dev; + struct dpaa2_eth_priv *priv; + + dev = &ls_dev->dev; + net_dev = dev_get_drvdata(dev); + priv = netdev_priv(net_dev); + + unregister_netdev(net_dev); + + if (priv->do_link_poll) + kthread_stop(priv->poll_thread); + else + fsl_mc_free_irqs(ls_dev); + + free_rings(priv); + free_percpu(priv->percpu_stats); + free_percpu(priv->percpu_extras); + + del_ch_napi(priv); + free_dpbp(priv); + free_dpio(priv); + free_dpni(priv); + + fsl_mc_portal_free(priv->mc_io); + + free_netdev(net_dev); + + dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); + + return 0; +} + +static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { + { + .vendor = FSL_MC_VENDOR_FREESCALE, + .obj_type = "dpni", + }, + { .vendor = 0x0 } +}; +MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); + +static struct fsl_mc_driver dpaa2_eth_driver = { + .driver = { + .name = KBUILD_MODNAME, + .owner = THIS_MODULE, + }, + .probe = dpaa2_eth_probe, + .remove = dpaa2_eth_remove, + .match_id_table = dpaa2_eth_match_id_table +}; + +module_fsl_mc_driver(dpaa2_eth_driver); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h new file mode 100644 index 000000000000..d54cb0b99d08 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -0,0 +1,412 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ + +#ifndef __DPAA2_ETH_H +#define __DPAA2_ETH_H + +#include <linux/netdevice.h> +#include <linux/if_vlan.h> +#include <linux/fsl/mc.h> + +#include <soc/fsl/dpaa2-io.h> +#include <soc/fsl/dpaa2-fd.h> +#include "dpni.h" +#include "dpni-cmd.h" + +#include "dpaa2-eth-trace.h" + +#define DPAA2_WRIOP_VERSION(x, y, z) ((x) << 10 | (y) << 5 | (z) << 0) + +#define DPAA2_ETH_STORE_SIZE 16 + +/* Maximum number of scatter-gather entries in an ingress frame, + * considering the maximum receive frame size is 64K + */ +#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) + +/* Maximum acceptable MTU value. It is in direct relation with the hardware + * enforced Max Frame Length (currently 10k). + */ +#define DPAA2_ETH_MFL (10 * 1024) +#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) +/* Convert L3 MTU to L2 MFL */ +#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN) + +/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo + * frames in the Rx queues (length of the current frame is not + * taken into account when making the taildrop decision) + */ +#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) + +/* Buffer quota per queue. Must be large enough such that for minimum sized + * frames taildrop kicks in before the bpool gets depleted, so we compute + * how many 64B frames fit inside the taildrop threshold and add a margin + * to accommodate the buffer refill delay. + */ +#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) +#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) +#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE + +/* Maximum number of buffers that can be acquired/released through a single + * QBMan command + */ +#define DPAA2_ETH_BUFS_PER_CMD 7 + +/* Hardware requires alignment for ingress/egress buffer addresses */ +#define DPAA2_ETH_TX_BUF_ALIGN 64 + +#define DPAA2_ETH_RX_BUF_SIZE 2048 +#define DPAA2_ETH_SKB_SIZE \ + (DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +/* Hardware annotation area in RX/TX buffers */ +#define DPAA2_ETH_RX_HWA_SIZE 64 +#define DPAA2_ETH_TX_HWA_SIZE 128 + +/* PTP nominal frequency 1GHz */ +#define DPAA2_PTP_CLK_PERIOD_NS 1 + +/* Due to a limitation in WRIOP 1.0.0, the RX buffer data must be aligned + * to 256B. For newer revisions, the requirement is only for 64B alignment + */ +#define DPAA2_ETH_RX_BUF_ALIGN_REV1 256 +#define DPAA2_ETH_RX_BUF_ALIGN 64 + +/* We are accommodating a skb backpointer and some S/G info + * in the frame's software annotation. The hardware + * options are either 0 or 64, so we choose the latter. + */ +#define DPAA2_ETH_SWA_SIZE 64 + +/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ +struct dpaa2_eth_swa { + struct sk_buff *skb; + struct scatterlist *scl; + int num_sg; + int sgt_size; +}; + +/* Annotation valid bits in FD FRC */ +#define DPAA2_FD_FRC_FASV 0x8000 +#define DPAA2_FD_FRC_FAEADV 0x4000 +#define DPAA2_FD_FRC_FAPRV 0x2000 +#define DPAA2_FD_FRC_FAIADV 0x1000 +#define DPAA2_FD_FRC_FASWOV 0x0800 +#define DPAA2_FD_FRC_FAICFDV 0x0400 + +/* Error bits in FD CTRL */ +#define DPAA2_FD_RX_ERR_MASK (FD_CTRL_SBE | FD_CTRL_FAERR) +#define DPAA2_FD_TX_ERR_MASK (FD_CTRL_UFD | \ + FD_CTRL_SBE | \ + FD_CTRL_FSE | \ + FD_CTRL_FAERR) + +/* Annotation bits in FD CTRL */ +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128B */ + +/* Frame annotation status */ +struct dpaa2_fas { + u8 reserved; + u8 ppid; + __le16 ifpid; + __le32 status; +}; + +/* Frame annotation status word is located in the first 8 bytes + * of the buffer's hardware annoatation area + */ +#define DPAA2_FAS_OFFSET 0 +#define DPAA2_FAS_SIZE (sizeof(struct dpaa2_fas)) + +/* Timestamp is located in the next 8 bytes of the buffer's + * hardware annotation area + */ +#define DPAA2_TS_OFFSET 0x8 + +/* Frame annotation egress action descriptor */ +#define DPAA2_FAEAD_OFFSET 0x58 + +struct dpaa2_faead { + __le32 conf_fqid; + __le32 ctrl; +}; + +#define DPAA2_FAEAD_A2V 0x20000000 +#define DPAA2_FAEAD_UPDV 0x00001000 +#define DPAA2_FAEAD_UPD 0x00000010 + +/* Accessors for the hardware annotation fields that we use */ +static inline void *dpaa2_get_hwa(void *buf_addr, bool swa) +{ + return buf_addr + (swa ? DPAA2_ETH_SWA_SIZE : 0); +} + +static inline struct dpaa2_fas *dpaa2_get_fas(void *buf_addr, bool swa) +{ + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAS_OFFSET; +} + +static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa) +{ + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET; +} + +static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa) +{ + return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET; +} + +/* Error and status bits in the frame annotation status word */ +/* Debug frame, otherwise supposed to be discarded */ +#define DPAA2_FAS_DISC 0x80000000 +/* MACSEC frame */ +#define DPAA2_FAS_MS 0x40000000 +#define DPAA2_FAS_PTP 0x08000000 +/* Ethernet multicast frame */ +#define DPAA2_FAS_MC 0x04000000 +/* Ethernet broadcast frame */ +#define DPAA2_FAS_BC 0x02000000 +#define DPAA2_FAS_KSE 0x00040000 +#define DPAA2_FAS_EOFHE 0x00020000 +#define DPAA2_FAS_MNLE 0x00010000 +#define DPAA2_FAS_TIDE 0x00008000 +#define DPAA2_FAS_PIEE 0x00004000 +/* Frame length error */ +#define DPAA2_FAS_FLE 0x00002000 +/* Frame physical error */ +#define DPAA2_FAS_FPE 0x00001000 +#define DPAA2_FAS_PTE 0x00000080 +#define DPAA2_FAS_ISP 0x00000040 +#define DPAA2_FAS_PHE 0x00000020 +#define DPAA2_FAS_BLE 0x00000010 +/* L3 csum validation performed */ +#define DPAA2_FAS_L3CV 0x00000008 +/* L3 csum error */ +#define DPAA2_FAS_L3CE 0x00000004 +/* L4 csum validation performed */ +#define DPAA2_FAS_L4CV 0x00000002 +/* L4 csum error */ +#define DPAA2_FAS_L4CE 0x00000001 +/* Possible errors on the ingress path */ +#define DPAA2_FAS_RX_ERR_MASK (DPAA2_FAS_KSE | \ + DPAA2_FAS_EOFHE | \ + DPAA2_FAS_MNLE | \ + DPAA2_FAS_TIDE | \ + DPAA2_FAS_PIEE | \ + DPAA2_FAS_FLE | \ + DPAA2_FAS_FPE | \ + DPAA2_FAS_PTE | \ + DPAA2_FAS_ISP | \ + DPAA2_FAS_PHE | \ + DPAA2_FAS_BLE | \ + DPAA2_FAS_L3CE | \ + DPAA2_FAS_L4CE) + +/* Time in milliseconds between link state updates */ +#define DPAA2_ETH_LINK_STATE_REFRESH 1000 + +/* Number of times to retry a frame enqueue before giving up. + * Value determined empirically, in order to minimize the number + * of frames dropped on Tx + */ +#define DPAA2_ETH_ENQUEUE_RETRIES 10 + +/* Driver statistics, other than those in struct rtnl_link_stats64. + * These are usually collected per-CPU and aggregated by ethtool. + */ +struct dpaa2_eth_drv_stats { + __u64 tx_conf_frames; + __u64 tx_conf_bytes; + __u64 tx_sg_frames; + __u64 tx_sg_bytes; + __u64 tx_reallocs; + __u64 rx_sg_frames; + __u64 rx_sg_bytes; + /* Enqueues retried due to portal busy */ + __u64 tx_portal_busy; +}; + +/* Per-FQ statistics */ +struct dpaa2_eth_fq_stats { + /* Number of frames received on this queue */ + __u64 frames; +}; + +/* Per-channel statistics */ +struct dpaa2_eth_ch_stats { + /* Volatile dequeues retried due to portal busy */ + __u64 dequeue_portal_busy; + /* Number of CDANs; useful to estimate avg NAPI len */ + __u64 cdan; + /* Number of frames received on queues from this channel */ + __u64 frames; + /* Pull errors */ + __u64 pull_err; +}; + +/* Maximum number of queues associated with a DPNI */ +#define DPAA2_ETH_MAX_RX_QUEUES 16 +#define DPAA2_ETH_MAX_TX_QUEUES 16 +#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ + DPAA2_ETH_MAX_TX_QUEUES) + +#define DPAA2_ETH_MAX_DPCONS 16 + +enum dpaa2_eth_fq_type { + DPAA2_RX_FQ = 0, + DPAA2_TX_CONF_FQ, +}; + +struct dpaa2_eth_priv; + +struct dpaa2_eth_fq { + u32 fqid; + u32 tx_qdbin; + u16 flowid; + int target_cpu; + struct dpaa2_eth_channel *channel; + enum dpaa2_eth_fq_type type; + + void (*consume)(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + const struct dpaa2_fd *fd, + struct napi_struct *napi, + u16 queue_id); + struct dpaa2_eth_fq_stats stats; +}; + +struct dpaa2_eth_channel { + struct dpaa2_io_notification_ctx nctx; + struct fsl_mc_device *dpcon; + int dpcon_id; + int ch_id; + struct napi_struct napi; + struct dpaa2_io *dpio; + struct dpaa2_io_store *store; + struct dpaa2_eth_priv *priv; + int buf_count; + struct dpaa2_eth_ch_stats stats; +}; + +struct dpaa2_eth_hash_fields { + u64 rxnfc_field; + enum net_prot cls_prot; + int cls_field; + int size; +}; + +/* Driver private data */ +struct dpaa2_eth_priv { + struct net_device *net_dev; + + u8 num_fqs; + struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; + + u8 num_channels; + struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; + + struct dpni_attr dpni_attrs; + u16 dpni_ver_major; + u16 dpni_ver_minor; + u16 tx_data_offset; + + struct fsl_mc_device *dpbp_dev; + u16 bpid; + struct iommu_domain *iommu_domain; + + bool tx_tstamp; /* Tx timestamping enabled */ + bool rx_tstamp; /* Rx timestamping enabled */ + + u16 tx_qdid; + u16 rx_buf_align; + struct fsl_mc_io *mc_io; + /* Cores which have an affine DPIO/DPCON. + * This is the cpu set on which Rx and Tx conf frames are processed + */ + struct cpumask dpio_cpumask; + + /* Standard statistics */ + struct rtnl_link_stats64 __percpu *percpu_stats; + /* Extra stats, in addition to the ones known by the kernel */ + struct dpaa2_eth_drv_stats __percpu *percpu_extras; + + u16 mc_token; + + struct dpni_link_state link_state; + bool do_link_poll; + struct task_struct *poll_thread; + + /* enabled ethtool hashing bits */ + u64 rx_hash_fields; +}; + +#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ + | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ + | RXH_L4_B_2_3) + +/* default Rx hash options, set during probing */ +#define DPAA2_RXH_DEFAULT (RXH_L3_PROTO | RXH_IP_SRC | RXH_IP_DST | \ + RXH_L4_B_0_1 | RXH_L4_B_2_3) + +#define dpaa2_eth_hash_enabled(priv) \ + ((priv)->dpni_attrs.num_queues > 1) + +/* Required by struct dpni_rx_tc_dist_cfg::key_cfg_iova */ +#define DPAA2_CLASSIFIER_DMA_SIZE 256 + +extern const struct ethtool_ops dpaa2_ethtool_ops; +extern int dpaa2_phc_index; + +static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv, + u16 ver_major, u16 ver_minor) +{ + if (priv->dpni_ver_major == ver_major) + return priv->dpni_ver_minor - ver_minor; + return priv->dpni_ver_major - ver_major; +} + +/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around + * the buffer also needs space for its shared info struct, and we need + * to allocate enough to accommodate hardware alignment restrictions + */ +static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv) +{ + return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align; +} + +static inline +unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, + struct sk_buff *skb) +{ + unsigned int headroom = DPAA2_ETH_SWA_SIZE; + + /* For non-linear skbs we have no headroom requirement, as we build a + * SG frame with a newly allocated SGT buffer + */ + if (skb_is_nonlinear(skb)) + return 0; + + /* If we have Tx timestamping, need 128B hardware annotation */ + if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) + headroom += DPAA2_ETH_TX_HWA_SIZE; + + return headroom; +} + +/* Extra headroom space requested to hardware, in order to make sure there's + * no realloc'ing in forwarding scenarios + */ +static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) +{ + return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN - + DPAA2_ETH_RX_HWA_SIZE; +} + +static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) +{ + return priv->dpni_attrs.num_queues; +} + +#endif /* __DPAA2_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c new file mode 100644 index 000000000000..8056a95e1265 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2014-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ + +#include <linux/net_tstamp.h> + +#include "dpni.h" /* DPNI_LINK_OPT_* */ +#include "dpaa2-eth.h" + +/* To be kept in sync with DPNI statistics */ +static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { + "[hw] rx frames", + "[hw] rx bytes", + "[hw] rx mcast frames", + "[hw] rx mcast bytes", + "[hw] rx bcast frames", + "[hw] rx bcast bytes", + "[hw] tx frames", + "[hw] tx bytes", + "[hw] tx mcast frames", + "[hw] tx mcast bytes", + "[hw] tx bcast frames", + "[hw] tx bcast bytes", + "[hw] rx filtered frames", + "[hw] rx discarded frames", + "[hw] rx nobuffer discards", + "[hw] tx discarded frames", + "[hw] tx confirmed frames", +}; + +#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) + +static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { + /* per-cpu stats */ + "[drv] tx conf frames", + "[drv] tx conf bytes", + "[drv] tx sg frames", + "[drv] tx sg bytes", + "[drv] tx realloc frames", + "[drv] rx sg frames", + "[drv] rx sg bytes", + "[drv] enqueue portal busy", + /* Channel stats */ + "[drv] dequeue portal busy", + "[drv] channel pull errors", + "[drv] cdan", +}; + +#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) + +static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, + struct ethtool_drvinfo *drvinfo) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor); + + strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), + sizeof(drvinfo->bus_info)); +} + +static int +dpaa2_eth_get_link_ksettings(struct net_device *net_dev, + struct ethtool_link_ksettings *link_settings) +{ + struct dpni_link_state state = {0}; + int err = 0; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); + if (err) { + netdev_err(net_dev, "ERROR %d getting link state\n", err); + goto out; + } + + /* At the moment, we have no way of interrogating the DPMAC + * from the DPNI side - and for that matter there may exist + * no DPMAC at all. So for now we just don't report anything + * beyond the DPNI attributes. + */ + if (state.options & DPNI_LINK_OPT_AUTONEG) + link_settings->base.autoneg = AUTONEG_ENABLE; + if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) + link_settings->base.duplex = DUPLEX_FULL; + link_settings->base.speed = state.rate; + +out: + return err; +} + +#define DPNI_DYNAMIC_LINK_SET_VER_MAJOR 7 +#define DPNI_DYNAMIC_LINK_SET_VER_MINOR 1 +static int +dpaa2_eth_set_link_ksettings(struct net_device *net_dev, + const struct ethtool_link_ksettings *link_settings) +{ + struct dpni_link_cfg cfg = {0}; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + int err = 0; + + /* If using an older MC version, the DPNI must be down + * in order to be able to change link settings. Taking steps to let + * the user know that. + */ + if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_DYNAMIC_LINK_SET_VER_MAJOR, + DPNI_DYNAMIC_LINK_SET_VER_MINOR) < 0) { + if (netif_running(net_dev)) { + netdev_info(net_dev, "Interface must be brought down first.\n"); + return -EACCES; + } + } + + cfg.rate = link_settings->base.speed; + if (link_settings->base.autoneg == AUTONEG_ENABLE) + cfg.options |= DPNI_LINK_OPT_AUTONEG; + else + cfg.options &= ~DPNI_LINK_OPT_AUTONEG; + if (link_settings->base.duplex == DUPLEX_HALF) + cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; + else + cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; + + err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); + if (err) + /* ethtool will be loud enough if we return an error; no point + * in putting our own error message on the console by default + */ + netdev_dbg(net_dev, "ERROR %d setting link cfg\n", err); + + return err; +} + +static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { + strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { + strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ + return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; + default: + return -EOPNOTSUPP; + } +} + +/** Fill in hardware counters, as returned by MC. + */ +static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, + struct ethtool_stats *stats, + u64 *data) +{ + int i = 0; + int j, k, err; + int num_cnt; + union dpni_statistics dpni_stats; + u64 cdan = 0; + u64 portal_busy = 0, pull_err = 0; + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct dpaa2_eth_drv_stats *extras; + struct dpaa2_eth_ch_stats *ch_stats; + + memset(data, 0, + sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); + + /* Print standard counters, from DPNI statistics */ + for (j = 0; j <= 2; j++) { + err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, + j, &dpni_stats); + if (err != 0) + netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j); + switch (j) { + case 0: + num_cnt = sizeof(dpni_stats.page_0) / sizeof(u64); + break; + case 1: + num_cnt = sizeof(dpni_stats.page_1) / sizeof(u64); + break; + case 2: + num_cnt = sizeof(dpni_stats.page_2) / sizeof(u64); + break; + } + for (k = 0; k < num_cnt; k++) + *(data + i++) = dpni_stats.raw.counter[k]; + } + + /* Print per-cpu extra stats */ + for_each_online_cpu(k) { + extras = per_cpu_ptr(priv->percpu_extras, k); + for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) + *((__u64 *)data + i + j) += *((__u64 *)extras + j); + } + i += j; + + for (j = 0; j < priv->num_channels; j++) { + ch_stats = &priv->channel[j]->stats; + cdan += ch_stats->cdan; + portal_busy += ch_stats->dequeue_portal_busy; + pull_err += ch_stats->pull_err; + } + + *(data + i++) = portal_busy; + *(data + i++) = pull_err; + *(data + i++) = cdan; +} + +static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, + struct ethtool_rxnfc *rxnfc, u32 *rule_locs) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + switch (rxnfc->cmd) { + case ETHTOOL_GRXFH: + /* we purposely ignore cmd->flow_type for now, because the + * classifier only supports a single set of fields for all + * protocols + */ + rxnfc->data = priv->rx_hash_fields; + break; + case ETHTOOL_GRXRINGS: + rxnfc->data = dpaa2_eth_queue_count(priv); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +int dpaa2_phc_index = -1; +EXPORT_SYMBOL(dpaa2_phc_index); + +static int dpaa2_eth_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + + info->phc_index = dpaa2_phc_index; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_ALL); + return 0; +} + +const struct ethtool_ops dpaa2_ethtool_ops = { + .get_drvinfo = dpaa2_eth_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = dpaa2_eth_get_link_ksettings, + .set_link_ksettings = dpaa2_eth_set_link_ksettings, + .get_sset_count = dpaa2_eth_get_sset_count, + .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, + .get_strings = dpaa2_eth_get_strings, + .get_rxnfc = dpaa2_eth_get_rxnfc, + .get_ts_info = dpaa2_eth_get_ts_info, +}; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpkg.h b/drivers/net/ethernet/freescale/dpaa2/dpkg.h new file mode 100644 index 000000000000..6de613b13e4d --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpkg.h @@ -0,0 +1,480 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2013-2015 Freescale Semiconductor Inc. + */ +#ifndef __FSL_DPKG_H_ +#define __FSL_DPKG_H_ + +#include <linux/types.h> + +/* Data Path Key Generator API + * Contains initialization APIs and runtime APIs for the Key Generator + */ + +/** Key Generator properties */ + +/** + * Number of masks per key extraction + */ +#define DPKG_NUM_OF_MASKS 4 +/** + * Number of extractions per key profile + */ +#define DPKG_MAX_NUM_OF_EXTRACTS 10 + +/** + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field + * @DPKG_FULL_FIELD: Extract a full field + */ +enum dpkg_extract_from_hdr_type { + DPKG_FROM_HDR = 0, + DPKG_FROM_FIELD = 1, + DPKG_FULL_FIELD = 2 +}; + +/** + * enum dpkg_extract_type - Enumeration for selecting extraction type + * @DPKG_EXTRACT_FROM_HDR: Extract from the header + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; + * e.g. can be used to extract header existence; + * please refer to 'Parse Result definition' section in the parser BG + */ +enum dpkg_extract_type { + DPKG_EXTRACT_FROM_HDR = 0, + DPKG_EXTRACT_FROM_DATA = 1, + DPKG_EXTRACT_FROM_PARSE = 3 +}; + +/** + * struct dpkg_mask - A structure for defining a single extraction mask + * @mask: Byte mask for the extracted content + * @offset: Offset within the extracted content + */ +struct dpkg_mask { + u8 mask; + u8 offset; +}; + +/* Protocol fields */ + +/* Ethernet fields */ +#define NH_FLD_ETH_DA BIT(0) +#define NH_FLD_ETH_SA BIT(1) +#define NH_FLD_ETH_LENGTH BIT(2) +#define NH_FLD_ETH_TYPE BIT(3) +#define NH_FLD_ETH_FINAL_CKSUM BIT(4) +#define NH_FLD_ETH_PADDING BIT(5) +#define NH_FLD_ETH_ALL_FIELDS (BIT(6) - 1) + +/* VLAN fields */ +#define NH_FLD_VLAN_VPRI BIT(0) +#define NH_FLD_VLAN_CFI BIT(1) +#define NH_FLD_VLAN_VID BIT(2) +#define NH_FLD_VLAN_LENGTH BIT(3) +#define NH_FLD_VLAN_TYPE BIT(4) +#define NH_FLD_VLAN_ALL_FIELDS (BIT(5) - 1) + +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ + NH_FLD_VLAN_CFI | \ + NH_FLD_VLAN_VID) + +/* IP (generic) fields */ +#define NH_FLD_IP_VER BIT(0) +#define NH_FLD_IP_DSCP BIT(2) +#define NH_FLD_IP_ECN BIT(3) +#define NH_FLD_IP_PROTO BIT(4) +#define NH_FLD_IP_SRC BIT(5) +#define NH_FLD_IP_DST BIT(6) +#define NH_FLD_IP_TOS_TC BIT(7) +#define NH_FLD_IP_ID BIT(8) +#define NH_FLD_IP_ALL_FIELDS (BIT(9) - 1) + +/* IPV4 fields */ +#define NH_FLD_IPV4_VER BIT(0) +#define NH_FLD_IPV4_HDR_LEN BIT(1) +#define NH_FLD_IPV4_TOS BIT(2) +#define NH_FLD_IPV4_TOTAL_LEN BIT(3) +#define NH_FLD_IPV4_ID BIT(4) +#define NH_FLD_IPV4_FLAG_D BIT(5) +#define NH_FLD_IPV4_FLAG_M BIT(6) +#define NH_FLD_IPV4_OFFSET BIT(7) +#define NH_FLD_IPV4_TTL BIT(8) +#define NH_FLD_IPV4_PROTO BIT(9) +#define NH_FLD_IPV4_CKSUM BIT(10) +#define NH_FLD_IPV4_SRC_IP BIT(11) +#define NH_FLD_IPV4_DST_IP BIT(12) +#define NH_FLD_IPV4_OPTS BIT(13) +#define NH_FLD_IPV4_OPTS_COUNT BIT(14) +#define NH_FLD_IPV4_ALL_FIELDS (BIT(15) - 1) + +/* IPV6 fields */ +#define NH_FLD_IPV6_VER BIT(0) +#define NH_FLD_IPV6_TC BIT(1) +#define NH_FLD_IPV6_SRC_IP BIT(2) +#define NH_FLD_IPV6_DST_IP BIT(3) +#define NH_FLD_IPV6_NEXT_HDR BIT(4) +#define NH_FLD_IPV6_FL BIT(5) +#define NH_FLD_IPV6_HOP_LIMIT BIT(6) +#define NH_FLD_IPV6_ID BIT(7) +#define NH_FLD_IPV6_ALL_FIELDS (BIT(8) - 1) + +/* ICMP fields */ +#define NH_FLD_ICMP_TYPE BIT(0) +#define NH_FLD_ICMP_CODE BIT(1) +#define NH_FLD_ICMP_CKSUM BIT(2) +#define NH_FLD_ICMP_ID BIT(3) +#define NH_FLD_ICMP_SQ_NUM BIT(4) +#define NH_FLD_ICMP_ALL_FIELDS (BIT(5) - 1) + +/* IGMP fields */ +#define NH_FLD_IGMP_VERSION BIT(0) +#define NH_FLD_IGMP_TYPE BIT(1) +#define NH_FLD_IGMP_CKSUM BIT(2) +#define NH_FLD_IGMP_DATA BIT(3) +#define NH_FLD_IGMP_ALL_FIELDS (BIT(4) - 1) + +/* TCP fields */ +#define NH_FLD_TCP_PORT_SRC BIT(0) +#define NH_FLD_TCP_PORT_DST BIT(1) +#define NH_FLD_TCP_SEQ BIT(2) +#define NH_FLD_TCP_ACK BIT(3) +#define NH_FLD_TCP_OFFSET BIT(4) +#define NH_FLD_TCP_FLAGS BIT(5) +#define NH_FLD_TCP_WINDOW BIT(6) +#define NH_FLD_TCP_CKSUM BIT(7) +#define NH_FLD_TCP_URGPTR BIT(8) +#define NH_FLD_TCP_OPTS BIT(9) +#define NH_FLD_TCP_OPTS_COUNT BIT(10) +#define NH_FLD_TCP_ALL_FIELDS (BIT(11) - 1) + +/* UDP fields */ +#define NH_FLD_UDP_PORT_SRC BIT(0) +#define NH_FLD_UDP_PORT_DST BIT(1) +#define NH_FLD_UDP_LEN BIT(2) +#define NH_FLD_UDP_CKSUM BIT(3) +#define NH_FLD_UDP_ALL_FIELDS (BIT(4) - 1) + +/* UDP-lite fields */ +#define NH_FLD_UDP_LITE_PORT_SRC BIT(0) +#define NH_FLD_UDP_LITE_PORT_DST BIT(1) +#define NH_FLD_UDP_LITE_ALL_FIELDS (BIT(2) - 1) + +/* UDP-encap-ESP fields */ +#define NH_FLD_UDP_ENC_ESP_PORT_SRC BIT(0) +#define NH_FLD_UDP_ENC_ESP_PORT_DST BIT(1) +#define NH_FLD_UDP_ENC_ESP_LEN BIT(2) +#define NH_FLD_UDP_ENC_ESP_CKSUM BIT(3) +#define NH_FLD_UDP_ENC_ESP_SPI BIT(4) +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM BIT(5) +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS (BIT(6) - 1) + +/* SCTP fields */ +#define NH_FLD_SCTP_PORT_SRC BIT(0) +#define NH_FLD_SCTP_PORT_DST BIT(1) +#define NH_FLD_SCTP_VER_TAG BIT(2) +#define NH_FLD_SCTP_CKSUM BIT(3) +#define NH_FLD_SCTP_ALL_FIELDS (BIT(4) - 1) + +/* DCCP fields */ +#define NH_FLD_DCCP_PORT_SRC BIT(0) +#define NH_FLD_DCCP_PORT_DST BIT(1) +#define NH_FLD_DCCP_ALL_FIELDS (BIT(2) - 1) + +/* IPHC fields */ +#define NH_FLD_IPHC_CID BIT(0) +#define NH_FLD_IPHC_CID_TYPE BIT(1) +#define NH_FLD_IPHC_HCINDEX BIT(2) +#define NH_FLD_IPHC_GEN BIT(3) +#define NH_FLD_IPHC_D_BIT BIT(4) +#define NH_FLD_IPHC_ALL_FIELDS (BIT(5) - 1) + +/* SCTP fields */ +#define NH_FLD_SCTP_CHUNK_DATA_TYPE BIT(0) +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS BIT(1) +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH BIT(2) +#define NH_FLD_SCTP_CHUNK_DATA_TSN BIT(3) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID BIT(4) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN BIT(5) +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID BIT(6) +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED BIT(7) +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING BIT(8) +#define NH_FLD_SCTP_CHUNK_DATA_END BIT(9) +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS (BIT(10) - 1) + +/* L2TPV2 fields */ +#define NH_FLD_L2TPV2_TYPE_BIT BIT(0) +#define NH_FLD_L2TPV2_LENGTH_BIT BIT(1) +#define NH_FLD_L2TPV2_SEQUENCE_BIT BIT(2) +#define NH_FLD_L2TPV2_OFFSET_BIT BIT(3) +#define NH_FLD_L2TPV2_PRIORITY_BIT BIT(4) +#define NH_FLD_L2TPV2_VERSION BIT(5) +#define NH_FLD_L2TPV2_LEN BIT(6) +#define NH_FLD_L2TPV2_TUNNEL_ID BIT(7) +#define NH_FLD_L2TPV2_SESSION_ID BIT(8) +#define NH_FLD_L2TPV2_NS BIT(9) +#define NH_FLD_L2TPV2_NR BIT(10) +#define NH_FLD_L2TPV2_OFFSET_SIZE BIT(11) +#define NH_FLD_L2TPV2_FIRST_BYTE BIT(12) +#define NH_FLD_L2TPV2_ALL_FIELDS (BIT(13) - 1) + +/* L2TPV3 fields */ +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT BIT(0) +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT BIT(1) +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT BIT(2) +#define NH_FLD_L2TPV3_CTRL_VERSION BIT(3) +#define NH_FLD_L2TPV3_CTRL_LENGTH BIT(4) +#define NH_FLD_L2TPV3_CTRL_CONTROL BIT(5) +#define NH_FLD_L2TPV3_CTRL_SENT BIT(6) +#define NH_FLD_L2TPV3_CTRL_RECV BIT(7) +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE BIT(8) +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS (BIT(9) - 1) + +#define NH_FLD_L2TPV3_SESS_TYPE_BIT BIT(0) +#define NH_FLD_L2TPV3_SESS_VERSION BIT(1) +#define NH_FLD_L2TPV3_SESS_ID BIT(2) +#define NH_FLD_L2TPV3_SESS_COOKIE BIT(3) +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS (BIT(4) - 1) + +/* PPP fields */ +#define NH_FLD_PPP_PID BIT(0) +#define NH_FLD_PPP_COMPRESSED BIT(1) +#define NH_FLD_PPP_ALL_FIELDS (BIT(2) - 1) + +/* PPPoE fields */ +#define NH_FLD_PPPOE_VER BIT(0) +#define NH_FLD_PPPOE_TYPE BIT(1) +#define NH_FLD_PPPOE_CODE BIT(2) +#define NH_FLD_PPPOE_SID BIT(3) +#define NH_FLD_PPPOE_LEN BIT(4) +#define NH_FLD_PPPOE_SESSION BIT(5) +#define NH_FLD_PPPOE_PID BIT(6) +#define NH_FLD_PPPOE_ALL_FIELDS (BIT(7) - 1) + +/* PPP-Mux fields */ +#define NH_FLD_PPPMUX_PID BIT(0) +#define NH_FLD_PPPMUX_CKSUM BIT(1) +#define NH_FLD_PPPMUX_COMPRESSED BIT(2) +#define NH_FLD_PPPMUX_ALL_FIELDS (BIT(3) - 1) + +/* PPP-Mux sub-frame fields */ +#define NH_FLD_PPPMUX_SUBFRM_PFF BIT(0) +#define NH_FLD_PPPMUX_SUBFRM_LXT BIT(1) +#define NH_FLD_PPPMUX_SUBFRM_LEN BIT(2) +#define NH_FLD_PPPMUX_SUBFRM_PID BIT(3) +#define NH_FLD_PPPMUX_SUBFRM_USE_PID BIT(4) +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS (BIT(5) - 1) + +/* LLC fields */ +#define NH_FLD_LLC_DSAP BIT(0) +#define NH_FLD_LLC_SSAP BIT(1) +#define NH_FLD_LLC_CTRL BIT(2) +#define NH_FLD_LLC_ALL_FIELDS (BIT(3) - 1) + +/* NLPID fields */ +#define NH_FLD_NLPID_NLPID BIT(0) +#define NH_FLD_NLPID_ALL_FIELDS (BIT(1) - 1) + +/* SNAP fields */ +#define NH_FLD_SNAP_OUI BIT(0) +#define NH_FLD_SNAP_PID BIT(1) +#define NH_FLD_SNAP_ALL_FIELDS (BIT(2) - 1) + +/* LLC SNAP fields */ +#define NH_FLD_LLC_SNAP_TYPE BIT(0) +#define NH_FLD_LLC_SNAP_ALL_FIELDS (BIT(1) - 1) + +/* ARP fields */ +#define NH_FLD_ARP_HTYPE BIT(0) +#define NH_FLD_ARP_PTYPE BIT(1) +#define NH_FLD_ARP_HLEN BIT(2) +#define NH_FLD_ARP_PLEN BIT(3) +#define NH_FLD_ARP_OPER BIT(4) +#define NH_FLD_ARP_SHA BIT(5) +#define NH_FLD_ARP_SPA BIT(6) +#define NH_FLD_ARP_THA BIT(7) +#define NH_FLD_ARP_TPA BIT(8) +#define NH_FLD_ARP_ALL_FIELDS (BIT(9) - 1) + +/* RFC2684 fields */ +#define NH_FLD_RFC2684_LLC BIT(0) +#define NH_FLD_RFC2684_NLPID BIT(1) +#define NH_FLD_RFC2684_OUI BIT(2) +#define NH_FLD_RFC2684_PID BIT(3) +#define NH_FLD_RFC2684_VPN_OUI BIT(4) +#define NH_FLD_RFC2684_VPN_IDX BIT(5) +#define NH_FLD_RFC2684_ALL_FIELDS (BIT(6) - 1) + +/* User defined fields */ +#define NH_FLD_USER_DEFINED_SRCPORT BIT(0) +#define NH_FLD_USER_DEFINED_PCDID BIT(1) +#define NH_FLD_USER_DEFINED_ALL_FIELDS (BIT(2) - 1) + +/* Payload fields */ +#define NH_FLD_PAYLOAD_BUFFER BIT(0) +#define NH_FLD_PAYLOAD_SIZE BIT(1) +#define NH_FLD_MAX_FRM_SIZE BIT(2) +#define NH_FLD_MIN_FRM_SIZE BIT(3) +#define NH_FLD_PAYLOAD_TYPE BIT(4) +#define NH_FLD_FRAME_SIZE BIT(5) +#define NH_FLD_PAYLOAD_ALL_FIELDS (BIT(6) - 1) + +/* GRE fields */ +#define NH_FLD_GRE_TYPE BIT(0) +#define NH_FLD_GRE_ALL_FIELDS (BIT(1) - 1) + +/* MINENCAP fields */ +#define NH_FLD_MINENCAP_SRC_IP BIT(0) +#define NH_FLD_MINENCAP_DST_IP BIT(1) +#define NH_FLD_MINENCAP_TYPE BIT(2) +#define NH_FLD_MINENCAP_ALL_FIELDS (BIT(3) - 1) + +/* IPSEC AH fields */ +#define NH_FLD_IPSEC_AH_SPI BIT(0) +#define NH_FLD_IPSEC_AH_NH BIT(1) +#define NH_FLD_IPSEC_AH_ALL_FIELDS (BIT(2) - 1) + +/* IPSEC ESP fields */ +#define NH_FLD_IPSEC_ESP_SPI BIT(0) +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM BIT(1) +#define NH_FLD_IPSEC_ESP_ALL_FIELDS (BIT(2) - 1) + +/* MPLS fields */ +#define NH_FLD_MPLS_LABEL_STACK BIT(0) +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS (BIT(1) - 1) + +/* MACSEC fields */ +#define NH_FLD_MACSEC_SECTAG BIT(0) +#define NH_FLD_MACSEC_ALL_FIELDS (BIT(1) - 1) + +/* GTP fields */ +#define NH_FLD_GTP_TEID BIT(0) + +/* Supported protocols */ +enum net_prot { + NET_PROT_NONE = 0, + NET_PROT_PAYLOAD, + NET_PROT_ETH, + NET_PROT_VLAN, + NET_PROT_IPV4, + NET_PROT_IPV6, + NET_PROT_IP, + NET_PROT_TCP, + NET_PROT_UDP, + NET_PROT_UDP_LITE, + NET_PROT_IPHC, + NET_PROT_SCTP, + NET_PROT_SCTP_CHUNK_DATA, + NET_PROT_PPPOE, + NET_PROT_PPP, + NET_PROT_PPPMUX, + NET_PROT_PPPMUX_SUBFRM, + NET_PROT_L2TPV2, + NET_PROT_L2TPV3_CTRL, + NET_PROT_L2TPV3_SESS, + NET_PROT_LLC, + NET_PROT_LLC_SNAP, + NET_PROT_NLPID, + NET_PROT_SNAP, + NET_PROT_MPLS, + NET_PROT_IPSEC_AH, + NET_PROT_IPSEC_ESP, + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ + NET_PROT_MACSEC, + NET_PROT_GRE, + NET_PROT_MINENCAP, + NET_PROT_DCCP, + NET_PROT_ICMP, + NET_PROT_IGMP, + NET_PROT_ARP, + NET_PROT_CAPWAP_DATA, + NET_PROT_CAPWAP_CTRL, + NET_PROT_RFC2684, + NET_PROT_ICMPV6, + NET_PROT_FCOE, + NET_PROT_FIP, + NET_PROT_ISCSI, + NET_PROT_GTP, + NET_PROT_USER_DEFINED_L2, + NET_PROT_USER_DEFINED_L3, + NET_PROT_USER_DEFINED_L4, + NET_PROT_USER_DEFINED_L5, + NET_PROT_USER_DEFINED_SHIM1, + NET_PROT_USER_DEFINED_SHIM2, + + NET_PROT_DUMMY_LAST +}; + +/** + * struct dpkg_extract - A structure for defining a single extraction + * @type: Determines how the union below is interpreted: + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; + * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' + * @extract: Selects extraction method + * @extract.from_hdr: Used when 'type = DPKG_EXTRACT_FROM_HDR' + * @extract.from_data: Used when 'type = DPKG_EXTRACT_FROM_DATA' + * @extract.from_parse: Used when 'type = DPKG_EXTRACT_FROM_PARSE' + * @extract.from_hdr.prot: Any of the supported headers + * @extract.from_hdr.type: Defines the type of header extraction: + * DPKG_FROM_HDR: use size & offset below; + * DPKG_FROM_FIELD: use field, size and offset below; + * DPKG_FULL_FIELD: use field below + * @extract.from_hdr.field: One of the supported fields (NH_FLD_) + * @extract.from_hdr.size: Size in bytes + * @extract.from_hdr.offset: Byte offset + * @extract.from_hdr.hdr_index: Clear for cases not listed below; + * Used for protocols that may have more than a single + * header, 0 indicates an outer header; + * Supported protocols (possible values): + * NET_PROT_VLAN (0, HDR_INDEX_LAST); + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); + * NET_PROT_IP(0, HDR_INDEX_LAST); + * NET_PROT_IPv4(0, HDR_INDEX_LAST); + * NET_PROT_IPv6(0, HDR_INDEX_LAST); + * @extract.from_data.size: Size in bytes + * @extract.from_data.offset: Byte offset + * @extract.from_parse.size: Size in bytes + * @extract.from_parse.offset: Byte offset + * @num_of_byte_masks: Defines the number of valid entries in the array below; + * This is also the number of bytes to be used as masks + * @masks: Masks parameters + */ +struct dpkg_extract { + enum dpkg_extract_type type; + union { + struct { + enum net_prot prot; + enum dpkg_extract_from_hdr_type type; + u32 field; + u8 size; + u8 offset; + u8 hdr_index; + } from_hdr; + struct { + u8 size; + u8 offset; + } from_data; + struct { + u8 size; + u8 offset; + } from_parse; + } extract; + + u8 num_of_byte_masks; + struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; +}; + +/** + * struct dpkg_profile_cfg - A structure for defining a full Key Generation + * profile (rule) + * @num_extracts: Defines the number of valid entries in the array below + * @extracts: Array of required extractions + */ +struct dpkg_profile_cfg { + u8 num_extracts; + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; +}; + +#endif /* __FSL_DPKG_H_ */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h new file mode 100644 index 000000000000..83698abce8b4 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h @@ -0,0 +1,518 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ +#ifndef _FSL_DPNI_CMD_H +#define _FSL_DPNI_CMD_H + +#include "dpni.h" + +/* DPNI Version */ +#define DPNI_VER_MAJOR 7 +#define DPNI_VER_MINOR 0 +#define DPNI_CMD_BASE_VERSION 1 +#define DPNI_CMD_ID_OFFSET 4 + +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) + +#define DPNI_CMDID_OPEN DPNI_CMD(0x801) +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800) +#define DPNI_CMDID_CREATE DPNI_CMD(0x901) +#define DPNI_CMDID_DESTROY DPNI_CMD(0x900) +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01) + +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002) +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003) +#define DPNI_CMDID_GET_ATTR DPNI_CMD(0x004) +#define DPNI_CMDID_RESET DPNI_CMD(0x005) +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006) + +#define DPNI_CMDID_SET_IRQ DPNI_CMD(0x010) +#define DPNI_CMDID_GET_IRQ DPNI_CMD(0x011) +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012) +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013) +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014) +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015) +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) + +#define DPNI_CMDID_SET_POOLS DPNI_CMD(0x200) +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) + +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212) +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215) +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216) +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217) +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A) +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD(0x21B) + +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220) +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221) +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222) +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223) +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224) +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225) +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226) +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227) +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228) + +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235) + +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244) +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245) +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246) + +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD(0x25D) +#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F) +#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260) +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD(0x261) +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD(0x262) + +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263) + +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264) +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265) + +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266) +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267) +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268) +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD(0x269) +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD(0x26A) +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B) +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPNI_MASK(field) \ + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \ + DPNI_##field##_SHIFT) + +#define dpni_set_field(var, field, val) \ + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field))) +#define dpni_get_field(var, field) \ + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT) + +struct dpni_cmd_open { + __le32 dpni_id; +}; + +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) +struct dpni_cmd_set_pools { + /* cmd word 0 */ + u8 num_dpbp; + u8 backup_pool_mask; + __le16 pad; + /* cmd word 0..4 */ + __le32 dpbp_id[DPNI_MAX_DPBP]; + /* cmd word 4..6 */ + __le16 buffer_size[DPNI_MAX_DPBP]; +}; + +/* The enable indication is always the least significant bit */ +#define DPNI_ENABLE_SHIFT 0 +#define DPNI_ENABLE_SIZE 1 + +struct dpni_rsp_is_enabled { + u8 enabled; +}; + +struct dpni_rsp_get_irq { + /* response word 0 */ + __le32 irq_val; + __le32 pad; + /* response word 1 */ + __le64 irq_addr; + /* response word 2 */ + __le32 irq_num; + __le32 type; +}; + +struct dpni_cmd_set_irq_enable { + u8 enable; + u8 pad[3]; + u8 irq_index; +}; + +struct dpni_cmd_get_irq_enable { + __le32 pad; + u8 irq_index; +}; + +struct dpni_rsp_get_irq_enable { + u8 enabled; +}; + +struct dpni_cmd_set_irq_mask { + __le32 mask; + u8 irq_index; +}; + +struct dpni_cmd_get_irq_mask { + __le32 pad; + u8 irq_index; +}; + +struct dpni_rsp_get_irq_mask { + __le32 mask; +}; + +struct dpni_cmd_get_irq_status { + __le32 status; + u8 irq_index; +}; + +struct dpni_rsp_get_irq_status { + __le32 status; +}; + +struct dpni_cmd_clear_irq_status { + __le32 status; + u8 irq_index; +}; + +struct dpni_rsp_get_attr { + /* response word 0 */ + __le32 options; + u8 num_queues; + u8 num_tcs; + u8 mac_filter_entries; + u8 pad0; + /* response word 1 */ + u8 vlan_filter_entries; + u8 pad1; + u8 qos_entries; + u8 pad2; + __le16 fs_entries; + __le16 pad3; + /* response word 2 */ + u8 qos_key_size; + u8 fs_key_size; + __le16 wriop_version; +}; + +#define DPNI_ERROR_ACTION_SHIFT 0 +#define DPNI_ERROR_ACTION_SIZE 4 +#define DPNI_FRAME_ANN_SHIFT 4 +#define DPNI_FRAME_ANN_SIZE 1 + +struct dpni_cmd_set_errors_behavior { + __le32 errors; + /* from least significant bit: error_action:4, set_frame_annotation:1 */ + u8 flags; +}; + +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation + * buffer layouts, but they all share the same parameters. + * If one of the functions changes, below structure needs to be split. + */ + +#define DPNI_PASS_TS_SHIFT 0 +#define DPNI_PASS_TS_SIZE 1 +#define DPNI_PASS_PR_SHIFT 1 +#define DPNI_PASS_PR_SIZE 1 +#define DPNI_PASS_FS_SHIFT 2 +#define DPNI_PASS_FS_SIZE 1 + +struct dpni_cmd_get_buffer_layout { + u8 qtype; +}; + +struct dpni_rsp_get_buffer_layout { + /* response word 0 */ + u8 pad0[6]; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + u8 flags; + u8 pad1; + /* response word 1 */ + __le16 private_data_size; + __le16 data_align; + __le16 head_room; + __le16 tail_room; +}; + +struct dpni_cmd_set_buffer_layout { + /* cmd word 0 */ + u8 qtype; + u8 pad0[3]; + __le16 options; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + u8 flags; + u8 pad1; + /* cmd word 1 */ + __le16 private_data_size; + __le16 data_align; + __le16 head_room; + __le16 tail_room; +}; + +struct dpni_cmd_set_offload { + u8 pad[3]; + u8 dpni_offload; + __le32 config; +}; + +struct dpni_cmd_get_offload { + u8 pad[3]; + u8 dpni_offload; +}; + +struct dpni_rsp_get_offload { + __le32 pad; + __le32 config; +}; + +struct dpni_cmd_get_qdid { + u8 qtype; +}; + +struct dpni_rsp_get_qdid { + __le16 qdid; +}; + +struct dpni_rsp_get_tx_data_offset { + __le16 data_offset; +}; + +struct dpni_cmd_get_statistics { + u8 page_number; +}; + +struct dpni_rsp_get_statistics { + __le64 counter[DPNI_STATISTICS_CNT]; +}; + +struct dpni_cmd_set_link_cfg { + /* cmd word 0 */ + __le64 pad0; + /* cmd word 1 */ + __le32 rate; + __le32 pad1; + /* cmd word 2 */ + __le64 options; +}; + +#define DPNI_LINK_STATE_SHIFT 0 +#define DPNI_LINK_STATE_SIZE 1 + +struct dpni_rsp_get_link_state { + /* response word 0 */ + __le32 pad0; + /* from LSB: up:1 */ + u8 flags; + u8 pad1[3]; + /* response word 1 */ + __le32 rate; + __le32 pad2; + /* response word 2 */ + __le64 options; +}; + +struct dpni_cmd_set_max_frame_length { + __le16 max_frame_length; +}; + +struct dpni_rsp_get_max_frame_length { + __le16 max_frame_length; +}; + +struct dpni_cmd_set_multicast_promisc { + u8 enable; +}; + +struct dpni_rsp_get_multicast_promisc { + u8 enabled; +}; + +struct dpni_cmd_set_unicast_promisc { + u8 enable; +}; + +struct dpni_rsp_get_unicast_promisc { + u8 enabled; +}; + +struct dpni_cmd_set_primary_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_rsp_get_primary_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_rsp_get_port_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_cmd_add_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +struct dpni_cmd_remove_mac_addr { + __le16 pad; + u8 mac_addr[6]; +}; + +#define DPNI_UNICAST_FILTERS_SHIFT 0 +#define DPNI_UNICAST_FILTERS_SIZE 1 +#define DPNI_MULTICAST_FILTERS_SHIFT 1 +#define DPNI_MULTICAST_FILTERS_SIZE 1 + +struct dpni_cmd_clear_mac_filters { + /* from LSB: unicast:1, multicast:1 */ + u8 flags; +}; + +#define DPNI_DIST_MODE_SHIFT 0 +#define DPNI_DIST_MODE_SIZE 4 +#define DPNI_MISS_ACTION_SHIFT 4 +#define DPNI_MISS_ACTION_SIZE 4 + +struct dpni_cmd_set_rx_tc_dist { + /* cmd word 0 */ + __le16 dist_size; + u8 tc_id; + /* from LSB: dist_mode:4, miss_action:4 */ + u8 flags; + __le16 pad0; + __le16 default_flow_id; + /* cmd word 1..5 */ + __le64 pad1[5]; + /* cmd word 6 */ + __le64 key_cfg_iova; +}; + +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at + * key_cfg_iova) + */ +struct dpni_mask_cfg { + u8 mask; + u8 offset; +}; + +#define DPNI_EFH_TYPE_SHIFT 0 +#define DPNI_EFH_TYPE_SIZE 4 +#define DPNI_EXTRACT_TYPE_SHIFT 0 +#define DPNI_EXTRACT_TYPE_SIZE 4 + +struct dpni_dist_extract { + /* word 0 */ + u8 prot; + /* EFH type stored in the 4 least significant bits */ + u8 efh_type; + u8 size; + u8 offset; + __le32 field; + /* word 1 */ + u8 hdr_index; + u8 constant; + u8 num_of_repeats; + u8 num_of_byte_masks; + /* Extraction type is stored in the 4 LSBs */ + u8 extract_type; + u8 pad[3]; + /* word 2 */ + struct dpni_mask_cfg masks[4]; +}; + +struct dpni_ext_set_rx_tc_dist { + /* extension word 0 */ + u8 num_extracts; + u8 pad[7]; + /* words 1..25 */ + struct dpni_dist_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; +}; + +struct dpni_cmd_get_queue { + u8 qtype; + u8 tc; + u8 index; +}; + +#define DPNI_DEST_TYPE_SHIFT 0 +#define DPNI_DEST_TYPE_SIZE 4 +#define DPNI_STASH_CTRL_SHIFT 6 +#define DPNI_STASH_CTRL_SIZE 1 +#define DPNI_HOLD_ACTIVE_SHIFT 7 +#define DPNI_HOLD_ACTIVE_SIZE 1 + +struct dpni_rsp_get_queue { + /* response word 0 */ + __le64 pad0; + /* response word 1 */ + __le32 dest_id; + __le16 pad1; + u8 dest_prio; + /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */ + u8 flags; + /* response word 2 */ + __le64 flc; + /* response word 3 */ + __le64 user_context; + /* response word 4 */ + __le32 fqid; + __le16 qdbin; +}; + +struct dpni_cmd_set_queue { + /* cmd word 0 */ + u8 qtype; + u8 tc; + u8 index; + u8 options; + __le32 pad0; + /* cmd word 1 */ + __le32 dest_id; + __le16 pad1; + u8 dest_prio; + u8 flags; + /* cmd word 2 */ + __le64 flc; + /* cmd word 3 */ + __le64 user_context; +}; + +struct dpni_cmd_set_taildrop { + /* cmd word 0 */ + u8 congestion_point; + u8 qtype; + u8 tc; + u8 index; + __le32 pad0; + /* cmd word 1 */ + /* Only least significant bit is relevant */ + u8 enable; + u8 pad1; + u8 units; + u8 pad2; + __le32 threshold; +}; + +struct dpni_cmd_get_taildrop { + u8 congestion_point; + u8 qtype; + u8 tc; + u8 index; +}; + +struct dpni_rsp_get_taildrop { + /* cmd word 0 */ + __le64 pad0; + /* cmd word 1 */ + /* only least significant bit is relevant */ + u8 enable; + u8 pad1; + u8 units; + u8 pad2; + __le32 threshold; +}; + +struct dpni_rsp_get_api_version { + __le16 major; + __le16 minor; +}; + +#endif /* _FSL_DPNI_CMD_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c new file mode 100644 index 000000000000..d6ac26797cec --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c @@ -0,0 +1,1600 @@ +// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/fsl/mc.h> +#include "dpni.h" +#include "dpni-cmd.h" + +/** + * dpni_prepare_key_cfg() - function prepare extract parameters + * @cfg: defining a full Key Generation profile (rule) + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA + * + * This function has to be called before the following functions: + * - dpni_set_rx_tc_dist() + * - dpni_set_qos_table() + */ +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, u8 *key_cfg_buf) +{ + int i, j; + struct dpni_ext_set_rx_tc_dist *dpni_ext; + struct dpni_dist_extract *extr; + + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) + return -EINVAL; + + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf; + dpni_ext->num_extracts = cfg->num_extracts; + + for (i = 0; i < cfg->num_extracts; i++) { + extr = &dpni_ext->extracts[i]; + + switch (cfg->extracts[i].type) { + case DPKG_EXTRACT_FROM_HDR: + extr->prot = cfg->extracts[i].extract.from_hdr.prot; + dpni_set_field(extr->efh_type, EFH_TYPE, + cfg->extracts[i].extract.from_hdr.type); + extr->size = cfg->extracts[i].extract.from_hdr.size; + extr->offset = cfg->extracts[i].extract.from_hdr.offset; + extr->field = cpu_to_le32( + cfg->extracts[i].extract.from_hdr.field); + extr->hdr_index = + cfg->extracts[i].extract.from_hdr.hdr_index; + break; + case DPKG_EXTRACT_FROM_DATA: + extr->size = cfg->extracts[i].extract.from_data.size; + extr->offset = + cfg->extracts[i].extract.from_data.offset; + break; + case DPKG_EXTRACT_FROM_PARSE: + extr->size = cfg->extracts[i].extract.from_parse.size; + extr->offset = + cfg->extracts[i].extract.from_parse.offset; + break; + default: + return -EINVAL; + } + + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks; + dpni_set_field(extr->extract_type, EXTRACT_TYPE, + cfg->extracts[i].type); + + for (j = 0; j < DPKG_NUM_OF_MASKS; j++) { + extr->masks[j].mask = cfg->extracts[i].masks[j].mask; + extr->masks[j].offset = + cfg->extracts[i].masks[j].offset; + } + } + + return 0; +} + +/** + * dpni_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpni_id: DPNI unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpni_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpni_id, + u16 *token) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_open *cmd_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpni_cmd_open *)cmd.params; + cmd_params->dpni_id = cpu_to_le32(dpni_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpni_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_pools() - Set buffer pools configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Buffer pools configuration + * + * mandatory for DPNI operation + * warning:Allowed only when DPNI is disabled + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_pools(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_pools_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_pools *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_pools *)cmd.params; + cmd_params->num_dpbp = cfg->num_dpbp; + for (i = 0; i < DPNI_MAX_DPBP; i++) { + cmd_params->dpbp_id[i] = cpu_to_le32(cfg->pools[i].dpbp_id); + cmd_params->buffer_size[i] = + cpu_to_le16(cfg->pools[i].buffer_size); + cmd_params->backup_pool_mask |= + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); + } + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_enable() - Enable the DPNI, allow sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_disable() - Disable the DPNI, stop sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_is_enabled() - Check if the DPNI is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_is_enabled(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_is_enabled *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_reset() - Reset the DPNI, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token) +{ + struct fsl_mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_irq_enable() - Set overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Interrupt state: - enable = 1, disable = 0 + * + * Allows GPP software to control when interrupts are generated. + * Each interrupt can have up to 32 causes. The enable/disable control's the + * overall interrupt state. if the interrupt is disabled no causes will cause + * an interrupt. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_enable *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_enable() - Get overall interrupt state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Returned interrupt state - enable = 1, disable = 0 + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_enable *cmd_params; + struct dpni_rsp_get_irq_enable *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_irq_mask() - Set interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: event mask to trigger interrupt; + * each bit: + * 0 = ignore event + * 1 = consider event for asserting IRQ + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_mask *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params; + cmd_params->mask = cpu_to_le32(mask); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_mask() - Get interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: Returned event mask to trigger interrupt + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_mask *cmd_params; + struct dpni_rsp_get_irq_mask *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params; + *mask = le32_to_cpu(rsp_params->mask); + + return 0; +} + +/** + * dpni_get_irq_status() - Get the current status of any pending interrupts. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: Returned interrupts status - one bit per cause: + * 0 = no interrupt pending + * 1 = interrupt pending + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_status *cmd_params; + struct dpni_rsp_get_irq_status *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(*status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params; + *status = le32_to_cpu(rsp_params->status); + + return 0; +} + +/** + * dpni_clear_irq_status() - Clear a pending interrupt's status + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: bits to clear (W1C) - one bit per cause: + * 0 = don't change + * 1 = clear status bit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_clear_irq_status *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params; + cmd_params->irq_index = irq_index; + cmd_params->status = cpu_to_le32(status); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_attributes() - Retrieve DPNI attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @attr: Object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_attr *attr) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_attr *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_attr *)cmd.params; + attr->options = le32_to_cpu(rsp_params->options); + attr->num_queues = rsp_params->num_queues; + attr->num_tcs = rsp_params->num_tcs; + attr->mac_filter_entries = rsp_params->mac_filter_entries; + attr->vlan_filter_entries = rsp_params->vlan_filter_entries; + attr->qos_entries = rsp_params->qos_entries; + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries); + attr->qos_key_size = rsp_params->qos_key_size; + attr->fs_key_size = rsp_params->fs_key_size; + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version); + + return 0; +} + +/** + * dpni_set_errors_behavior() - Set errors behavior + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Errors configuration + * + * this function may be called numerous times with different + * error masks + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_error_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_errors_behavior *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params; + cmd_params->errors = cpu_to_le32(cfg->errors); + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action); + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_buffer_layout() - Retrieve buffer layout attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to retrieve configuration for + * @layout: Returns buffer layout attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_buffer_layout *cmd_params; + struct dpni_rsp_get_buffer_layout *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params; + layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS); + layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR); + layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS); + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size); + layout->data_align = le16_to_cpu(rsp_params->data_align); + layout->data_head_room = le16_to_cpu(rsp_params->head_room); + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room); + + return 0; +} + +/** + * dpni_set_buffer_layout() - Set buffer layout configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue this configuration applies to + * @layout: Buffer layout configuration + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_buffer_layout *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->options = cpu_to_le16(layout->options); + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp); + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result); + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status); + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size); + cmd_params->data_align = cpu_to_le16(layout->data_align); + cmd_params->head_room = cpu_to_le16(layout->data_head_room); + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_offload() - Set DPNI offload configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @type: Type of DPNI offload + * @config: Offload configuration. + * For checksum offloads, non-zero value enables the offload + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ + +int dpni_set_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 config) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_offload *cmd_params; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_offload *)cmd.params; + cmd_params->dpni_offload = type; + cmd_params->config = cpu_to_le32(config); + + return mc_send_command(mc_io, &cmd); +} + +int dpni_get_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 *config) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_offload *cmd_params; + struct dpni_rsp_get_offload *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_offload *)cmd.params; + cmd_params->dpni_offload = type; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_offload *)cmd.params; + *config = le32_to_cpu(rsp_params->config); + + return 0; +} + +/** + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used + * for enqueue operations + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to receive QDID for + * @qdid: Returned virtual QDID value that should be used as an argument + * in all enqueue operations + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_qdid(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u16 *qdid) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_qdid *cmd_params; + struct dpni_rsp_get_qdid *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params; + *qdid = le16_to_cpu(rsp_params->qdid); + + return 0; +} + +/** + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @data_offset: Tx data offset (from start of buffer) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *data_offset) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_tx_data_offset *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params; + *data_offset = le16_to_cpu(rsp_params->data_offset); + + return 0; +} + +/** + * dpni_set_link_cfg() - set the link configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_link_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_link_cfg *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params; + cmd_params->rate = cpu_to_le32(cfg->rate); + cmd_params->options = cpu_to_le64(cfg->options); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_link_state() - Return the link state (either up or down) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @state: Returned link state; + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_link_state *state) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_link_state *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params; + state->up = dpni_get_field(rsp_params->flags, LINK_STATE); + state->rate = le32_to_cpu(rsp_params->rate); + state->options = le64_to_cpu(rsp_params->options); + + return 0; +} + +/** + * dpni_set_max_frame_length() - Set the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in + * bytes); frame is discarded if its + * length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 max_frame_length) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_max_frame_length *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params; + cmd_params->max_frame_length = cpu_to_le16(max_frame_length); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_max_frame_length() - Get the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in + * bytes); frame is discarded if its + * length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *max_frame_length) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_max_frame_length *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params; + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length); + + return 0; +} + +/** + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_multicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_multicast_promisc() - Get multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_multicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_unicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_unicast_promisc() - Get unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_unicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_primary_mac_addr() - Set the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to set as primary address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_primary_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_primary_mac_addr() - Get the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: Returned MAC address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 mac_addr[6]) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_primary_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical + * port the DPNI is attached to + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address of the physical port, if any, otherwise 0 + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 mac_addr[6]) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_rsp_get_port_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_add_mac_addr() - Add MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to add + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_add_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_mac_addr() - Remove MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_remove_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @unicast: Set to '1' to clear unicast addresses + * @multicast: Set to '1' to clear multicast addresses + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int unicast, + int multicast) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_clear_mac_filters *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params; + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast); + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Traffic class distribution configuration + * + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() + * first to prepare the key_cfg_iova parameter + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rx_tc_dist_cfg *cfg) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_rx_tc_dist *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + cmd_params->tc_id = tc_id; + dpni_set_field(cmd_params->flags, DIST_MODE, cfg->dist_mode); + dpni_set_field(cmd_params->flags, MISS_ACTION, cfg->fs_cfg.miss_action); + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id); + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_queue() - Set queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported, although + * the command is ignored for Tx + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what + * configuration options are set on the queue + * @queue: Queue structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + u8 options, + const struct dpni_queue *queue) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_queue *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + cmd_params->options = options; + cmd_params->dest_id = cpu_to_le32(queue->destination.id); + cmd_params->dest_prio = queue->destination.priority; + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type); + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control); + dpni_set_field(cmd_params->flags, HOLD_ACTIVE, + queue->destination.hold_active); + cmd_params->flc = cpu_to_le64(queue->flc.value); + cmd_params->user_context = cpu_to_le64(queue->user_context); + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_queue() - Get queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @queue: Queue configuration structure + * @qid: Queue identification + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_queue *queue, + struct dpni_queue_id *qid) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_queue *cmd_params; + struct dpni_rsp_get_queue *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_queue *)cmd.params; + queue->destination.id = le32_to_cpu(rsp_params->dest_id); + queue->destination.priority = rsp_params->dest_prio; + queue->destination.type = dpni_get_field(rsp_params->flags, + DEST_TYPE); + queue->flc.stash_control = dpni_get_field(rsp_params->flags, + STASH_CTRL); + queue->destination.hold_active = dpni_get_field(rsp_params->flags, + HOLD_ACTIVE); + queue->flc.value = le64_to_cpu(rsp_params->flc); + queue->user_context = le64_to_cpu(rsp_params->user_context); + qid->fqid = le32_to_cpu(rsp_params->fqid); + qid->qdbin = le16_to_cpu(rsp_params->qdbin); + + return 0; +} + +/** + * dpni_get_statistics() - Get DPNI statistics + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @page: Selects the statistics page to retrieve, see + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2. + * @stat: Structure containing the statistics + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_statistics(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 page, + union dpni_statistics *stat) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_statistics *cmd_params; + struct dpni_rsp_get_statistics *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params; + cmd_params->page_number = page; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params; + for (i = 0; i < DPNI_STATISTICS_CNT; i++) + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]); + + return 0; +} + +/** + * dpni_set_taildrop() - Set taildrop per queue or TC + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point + * @q_type: Queue type on which the taildrop is configured. + * Only Rx queues are supported for now + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. Ignored if CONGESTION_POINT is not 0. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_taildrop *taildrop) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_set_taildrop *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + dpni_set_field(cmd_params->enable, ENABLE, taildrop->enable); + cmd_params->units = taildrop->units; + cmd_params->threshold = cpu_to_le32(taildrop->threshold); + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_taildrop() - Get taildrop information + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point + * @q_type: Queue type on which the taildrop is configured. + * Only Rx queues are supported for now + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. Ignored if CONGESTION_POINT is not 0. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_taildrop *taildrop) +{ + struct fsl_mc_command cmd = { 0 }; + struct dpni_cmd_get_taildrop *cmd_params; + struct dpni_rsp_get_taildrop *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params; + taildrop->enable = dpni_get_field(rsp_params->enable, ENABLE); + taildrop->units = rsp_params->units; + taildrop->threshold = le32_to_cpu(rsp_params->threshold); + + return 0; +} + +/** + * dpni_get_api_version() - Get Data Path Network Interface API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path network interface API + * @minor_ver: Minor version of data path network interface API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver) +{ + struct dpni_rsp_get_api_version *rsp_params; + struct fsl_mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION, + cmd_flags, 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h new file mode 100644 index 000000000000..b378a00c7c53 --- /dev/null +++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h @@ -0,0 +1,824 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + */ +#ifndef __FSL_DPNI_H +#define __FSL_DPNI_H + +#include "dpkg.h" + +struct fsl_mc_io; + +/** + * Data Path Network Interface API + * Contains initialization APIs and runtime control APIs for DPNI + */ + +/** General DPNI macros */ + +/** + * Maximum number of traffic classes + */ +#define DPNI_MAX_TC 8 +/** + * Maximum number of buffer pools per DPNI + */ +#define DPNI_MAX_DPBP 8 + +/** + * All traffic classes considered; see dpni_set_queue() + */ +#define DPNI_ALL_TCS (u8)(-1) +/** + * All flows within traffic class considered; see dpni_set_queue() + */ +#define DPNI_ALL_TC_FLOWS (u16)(-1) +/** + * Generate new flow ID; see dpni_set_queue() + */ +#define DPNI_NEW_FLOW_ID (u16)(-1) + +/** + * Tx traffic is always released to a buffer pool on transmit, there are no + * resources allocated to have the frames confirmed back to the source after + * transmission. + */ +#define DPNI_OPT_TX_FRM_RELEASE 0x000001 +/** + * Disables support for MAC address filtering for addresses other than primary + * MAC address. This affects both unicast and multicast. Promiscuous mode can + * still be enabled/disabled for both unicast and multicast. If promiscuous mode + * is disabled, only traffic matching the primary MAC address will be accepted. + */ +#define DPNI_OPT_NO_MAC_FILTER 0x000002 +/** + * Allocate policers for this DPNI. They can be used to rate-limit traffic per + * traffic class (TC) basis. + */ +#define DPNI_OPT_HAS_POLICING 0x000004 +/** + * Congestion can be managed in several ways, allowing the buffer pool to + * deplete on ingress, taildrop on each queue or use congestion groups for sets + * of queues. If set, it configures a single congestion groups across all TCs. + * If reset, a congestion group is allocated for each TC. Only relevant if the + * DPNI has multiple traffic classes. + */ +#define DPNI_OPT_SHARED_CONGESTION 0x000008 +/** + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all + * look-ups are exact match. Note that TCAM is not available on LS1088 and its + * variants. Setting this bit on these SoCs will trigger an error. + */ +#define DPNI_OPT_HAS_KEY_MASKING 0x000010 +/** + * Disables the flow steering table. + */ +#define DPNI_OPT_NO_FS 0x000020 + +int dpni_open(struct fsl_mc_io *mc_io, + u32 cmd_flags, + int dpni_id, + u16 *token); + +int dpni_close(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * struct dpni_pools_cfg - Structure representing buffer pools configuration + * @num_dpbp: Number of DPBPs + * @pools: Array of buffer pools parameters; The number of valid entries + * must match 'num_dpbp' value + * @pools.dpbp_id: DPBP object ID + * @pools.buffer_size: Buffer size + * @pools.backup_pool: Backup pool + */ +struct dpni_pools_cfg { + u8 num_dpbp; + struct { + int dpbp_id; + u16 buffer_size; + int backup_pool; + } pools[DPNI_MAX_DPBP]; +}; + +int dpni_set_pools(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_pools_cfg *cfg); + +int dpni_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpni_disable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +int dpni_is_enabled(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpni_reset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token); + +/** + * DPNI IRQ Index and Events + */ + +/** + * IRQ index + */ +#define DPNI_IRQ_INDEX 0 +/** + * IRQ event - indicates a change in link state + */ +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 + +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 en); + +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u8 *en); + +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 mask); + +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *mask); + +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 *status); + +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 irq_index, + u32 status); + +/** + * struct dpni_attr - Structure representing DPNI attributes + * @options: Any combination of the following options: + * DPNI_OPT_TX_FRM_RELEASE + * DPNI_OPT_NO_MAC_FILTER + * DPNI_OPT_HAS_POLICING + * DPNI_OPT_SHARED_CONGESTION + * DPNI_OPT_HAS_KEY_MASKING + * DPNI_OPT_NO_FS + * @num_queues: Number of Tx and Rx queues used for traffic distribution. + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI. + * @mac_filter_entries: Number of entries in the MAC address filtering table. + * @vlan_filter_entries: Number of entries in the VLAN address filtering table. + * @qos_entries: Number of entries in the QoS classification table. + * @fs_entries: Number of entries in the flow steering table. + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger + * than this when adding QoS entries will result in an error. + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a + * key larger than this when composing the hash + FS key will + * result in an error. + * @wriop_version: Version of WRIOP HW block. The 3 version values are stored + * on 6, 5, 5 bits respectively. + */ +struct dpni_attr { + u32 options; + u8 num_queues; + u8 num_tcs; + u8 mac_filter_entries; + u8 vlan_filter_entries; + u8 qos_entries; + u16 fs_entries; + u8 qos_key_size; + u8 fs_key_size; + u16 wriop_version; +}; + +int dpni_get_attributes(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_attr *attr); + +/** + * DPNI errors + */ + +/** + * Extract out of frame header error + */ +#define DPNI_ERROR_EOFHE 0x00020000 +/** + * Frame length error + */ +#define DPNI_ERROR_FLE 0x00002000 +/** + * Frame physical error + */ +#define DPNI_ERROR_FPE 0x00001000 +/** + * Parsing header error + */ +#define DPNI_ERROR_PHE 0x00000020 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L3CE 0x00000004 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L4CE 0x00000001 + +/** + * enum dpni_error_action - Defines DPNI behavior for errors + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue + */ +enum dpni_error_action { + DPNI_ERROR_ACTION_DISCARD = 0, + DPNI_ERROR_ACTION_CONTINUE = 1, + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 +}; + +/** + * struct dpni_error_cfg - Structure representing DPNI errors treatment + * @errors: Errors mask; use 'DPNI_ERROR__<X> + * @error_action: The desired action for the errors mask + * @set_frame_annotation: Set to '1' to mark the errors in frame annotation + * status (FAS); relevant only for the non-discard action + */ +struct dpni_error_cfg { + u32 errors; + enum dpni_error_action error_action; + int set_frame_annotation; +}; + +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_error_cfg *cfg); + +/** + * DPNI buffer layout modification options + */ + +/** + * Select to modify the time-stamp setting + */ +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 +/** + * Select to modify the parser-result setting; not applicable for Tx + */ +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 +/** + * Select to modify the frame-status setting + */ +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 +/** + * Select to modify the private-data-size setting + */ +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 +/** + * Select to modify the data-alignment setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 +/** + * Select to modify the data-head-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 +/** + * Select to modify the data-tail-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 + +/** + * struct dpni_buffer_layout - Structure representing DPNI buffer layout + * @options: Flags representing the suggested modifications to the buffer + * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags + * @pass_timestamp: Pass timestamp value + * @pass_parser_result: Pass parser results + * @pass_frame_status: Pass frame status + * @private_data_size: Size kept for private data (in bytes) + * @data_align: Data alignment + * @data_head_room: Data head room + * @data_tail_room: Data tail room + */ +struct dpni_buffer_layout { + u32 options; + int pass_timestamp; + int pass_parser_result; + int pass_frame_status; + u16 private_data_size; + u16 data_align; + u16 data_head_room; + u16 data_tail_room; +}; + +/** + * enum dpni_queue_type - Identifies a type of queue targeted by the command + * @DPNI_QUEUE_RX: Rx queue + * @DPNI_QUEUE_TX: Tx queue + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue + * @DPNI_QUEUE_RX_ERR: Rx error queue + */enum dpni_queue_type { + DPNI_QUEUE_RX, + DPNI_QUEUE_TX, + DPNI_QUEUE_TX_CONFIRM, + DPNI_QUEUE_RX_ERR, +}; + +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout); + +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout); + +/** + * enum dpni_offload - Identifies a type of offload targeted by the command + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation + */ +enum dpni_offload { + DPNI_OFF_RX_L3_CSUM, + DPNI_OFF_RX_L4_CSUM, + DPNI_OFF_TX_L3_CSUM, + DPNI_OFF_TX_L4_CSUM, +}; + +int dpni_set_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 config); + +int dpni_get_offload(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_offload type, + u32 *config); + +int dpni_get_qdid(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u16 *qdid); + +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *data_offset); + +#define DPNI_STATISTICS_CNT 7 + +/** + * union dpni_statistics - Union describing the DPNI statistics + * @page_0: Page_0 statistics structure + * @page_0.ingress_all_frames: Ingress frame count + * @page_0.ingress_all_bytes: Ingress byte count + * @page_0.ingress_multicast_frames: Ingress multicast frame count + * @page_0.ingress_multicast_bytes: Ingress multicast byte count + * @page_0.ingress_broadcast_frames: Ingress broadcast frame count + * @page_0.ingress_broadcast_bytes: Ingress broadcast byte count + * @page_1: Page_1 statistics structure + * @page_1.egress_all_frames: Egress frame count + * @page_1.egress_all_bytes: Egress byte count + * @page_1.egress_multicast_frames: Egress multicast frame count + * @page_1.egress_multicast_bytes: Egress multicast byte count + * @page_1.egress_broadcast_frames: Egress broadcast frame count + * @page_1.egress_broadcast_bytes: Egress broadcast byte count + * @page_2: Page_2 statistics structure + * @page_2.ingress_filtered_frames: Ingress filtered frame count + * @page_2.ingress_discarded_frames: Ingress discarded frame count + * @page_2.ingress_nobuffer_discards: Ingress discarded frame count due to + * lack of buffers + * @page_2.egress_discarded_frames: Egress discarded frame count + * @page_2.egress_confirmed_frames: Egress confirmed frame count + * @raw: raw statistics structure, used to index counters + */ +union dpni_statistics { + struct { + u64 ingress_all_frames; + u64 ingress_all_bytes; + u64 ingress_multicast_frames; + u64 ingress_multicast_bytes; + u64 ingress_broadcast_frames; + u64 ingress_broadcast_bytes; + } page_0; + struct { + u64 egress_all_frames; + u64 egress_all_bytes; + u64 egress_multicast_frames; + u64 egress_multicast_bytes; + u64 egress_broadcast_frames; + u64 egress_broadcast_bytes; + } page_1; + struct { + u64 ingress_filtered_frames; + u64 ingress_discarded_frames; + u64 ingress_nobuffer_discards; + u64 egress_discarded_frames; + u64 egress_confirmed_frames; + } page_2; + struct { + u64 counter[DPNI_STATISTICS_CNT]; + } raw; +}; + +int dpni_get_statistics(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 page, + union dpni_statistics *stat); + +/** + * Enable auto-negotiation + */ +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL + +/** + * struct - Structure representing DPNI link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values + */ +struct dpni_link_cfg { + u32 rate; + u64 options; +}; + +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const struct dpni_link_cfg *cfg); + +/** + * struct dpni_link_state - Structure representing DPNI link state + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values + * @up: Link state; '0' for down, '1' for up + */ +struct dpni_link_state { + u32 rate; + u64 options; + int up; +}; + +int dpni_get_link_state(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + struct dpni_link_state *state); + +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 max_frame_length); + +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u16 *max_frame_length); + +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en); + +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int en); + +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int *en); + +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]); + +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 mac_addr[6]); + +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + u32 cm_flags, + u16 token, + u8 mac_addr[6]); + +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]); + +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + const u8 mac_addr[6]); + +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + int unicast, + int multicast); + +/** + * enum dpni_dist_mode - DPNI distribution mode + * @DPNI_DIST_MODE_NONE: No distribution + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation + */ +enum dpni_dist_mode { + DPNI_DIST_MODE_NONE = 0, + DPNI_DIST_MODE_HASH = 1, + DPNI_DIST_MODE_FS = 2 +}; + +/** + * enum dpni_fs_miss_action - DPNI Flow Steering miss action + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash + */ +enum dpni_fs_miss_action { + DPNI_FS_MISS_DROP = 0, + DPNI_FS_MISS_EXPLICIT_FLOWID = 1, + DPNI_FS_MISS_HASH = 2 +}; + +/** + * struct dpni_fs_tbl_cfg - Flow Steering table configuration + * @miss_action: Miss action selection + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' + */ +struct dpni_fs_tbl_cfg { + enum dpni_fs_miss_action miss_action; + u16 default_flow_id; +}; + +int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, + u8 *key_cfg_buf); + +/** + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration + * @dist_size: Set the distribution size; + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, + * 112,128,192,224,256,384,448,512,768,896,1024 + * @dist_mode: Distribution mode + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpni_prepare_key_cfg() relevant only when + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' + * @fs_cfg: Flow Steering table configuration; only relevant if + * 'dist_mode = DPNI_DIST_MODE_FS' + */ +struct dpni_rx_tc_dist_cfg { + u16 dist_size; + enum dpni_dist_mode dist_mode; + u64 key_cfg_iova; + struct dpni_fs_tbl_cfg fs_cfg; +}; + +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + u8 tc_id, + const struct dpni_rx_tc_dist_cfg *cfg); + +/** + * enum dpni_dest - DPNI destination types + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and + * does not generate FQDAN notifications; user is expected to + * dequeue from the queue based on polling or other user-defined + * method + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN + * notifications to the specified DPIO; user is expected to dequeue + * from the queue only after notification is received + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate + * FQDAN notifications, but is connected to the specified DPCON + * object; user is expected to dequeue from the DPCON channel + */ +enum dpni_dest { + DPNI_DEST_NONE = 0, + DPNI_DEST_DPIO = 1, + DPNI_DEST_DPCON = 2 +}; + +/** + * struct dpni_queue - Queue structure + * @destination - Destination structure + * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0. + * Identifies either a DPIO or a DPCON object. + * Not relevant for Tx queues. + * @destination.type: May be one of the following: + * 0 - No destination, queue can be manually + * queried, but will not push traffic or + * notifications to a DPIO; + * 1 - The destination is a DPIO. When traffic + * becomes available in the queue a FQDAN + * (FQ data available notification) will be + * generated to selected DPIO; + * 2 - The destination is a DPCON. The queue is + * associated with a DPCON object for the + * purpose of scheduling between multiple + * queues. The DPCON may be independently + * configured to generate notifications. + * Not relevant for Tx queues. + * @destination.hold_active: Hold active, maintains a queue scheduled for longer + * in a DPIO during dequeue to reduce spread of traffic. + * Only relevant if queues are + * not affined to a single DPIO. + * @user_context: User data, presented to the user along with any frames + * from this queue. Not relevant for Tx queues. + * @flc: FD FLow Context structure + * @flc.value: Default FLC value for traffic dequeued from + * this queue. Please check description of FD + * structure for more information. + * Note that FLC values set using dpni_add_fs_entry, + * if any, take precedence over values per queue. + * @flc.stash_control: Boolean, indicates whether the 6 lowest + * - significant bits are used for stash control. + * significant bits are used for stash control. If set, the 6 + * least significant bits in value are interpreted as follows: + * - bits 0-1: indicates the number of 64 byte units of context + * that are stashed. FLC value is interpreted as a memory address + * in this case, excluding the 6 LS bits. + * - bits 2-3: indicates the number of 64 byte units of frame + * annotation to be stashed. Annotation is placed at FD[ADDR]. + * - bits 4-5: indicates the number of 64 byte units of frame + * data to be stashed. Frame data is placed at FD[ADDR] + + * FD[OFFSET]. + * For more details check the Frame Descriptor section in the + * hardware documentation. + */ +struct dpni_queue { + struct { + u16 id; + enum dpni_dest type; + char hold_active; + u8 priority; + } destination; + u64 user_context; + struct { + u64 value; + char stash_control; + } flc; +}; + +/** + * struct dpni_queue_id - Queue identification, used for enqueue commands + * or queue control + * @fqid: FQID used for enqueueing to and/or configuration of this specific FQ + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. Only relevant + * for Tx queues. + */ +struct dpni_queue_id { + u32 fqid; + u16 qdbin; +}; + +/** + * Set User Context + */ +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 +#define DPNI_QUEUE_OPT_DEST 0x00000002 +#define DPNI_QUEUE_OPT_FLC 0x00000004 +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008 + +int dpni_set_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + u8 options, + const struct dpni_queue *queue); + +int dpni_get_queue(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_queue_type qtype, + u8 tc, + u8 index, + struct dpni_queue *queue, + struct dpni_queue_id *qid); + +/** + * enum dpni_congestion_unit - DPNI congestion units + * @DPNI_CONGESTION_UNIT_BYTES: bytes units + * @DPNI_CONGESTION_UNIT_FRAMES: frames units + */ +enum dpni_congestion_unit { + DPNI_CONGESTION_UNIT_BYTES = 0, + DPNI_CONGESTION_UNIT_FRAMES +}; + +/** + * enum dpni_congestion_point - Structure representing congestion point + * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and + * QUEUE_INDEX + * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used to + * define the DPNI this can be either per TC (default) or per + * interface (DPNI_OPT_SHARED_CONGESTION set at DPNI create). + * QUEUE_INDEX is ignored if this type is used. + */ +enum dpni_congestion_point { + DPNI_CP_QUEUE, + DPNI_CP_GROUP, +}; + +/** + * struct dpni_taildrop - Structure representing the taildrop + * @enable: Indicates whether the taildrop is active or not. + * @units: Indicates the unit of THRESHOLD. Queue taildrop only supports + * byte units, this field is ignored and assumed = 0 if + * CONGESTION_POINT is 0. + * @threshold: Threshold value, in units identified by UNITS field. Value 0 + * cannot be used as a valid taildrop threshold, THRESHOLD must + * be > 0 if the taildrop is enabled. + */ +struct dpni_taildrop { + char enable; + enum dpni_congestion_unit units; + u32 threshold; +}; + +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + u8 tc, + u8 q_index, + struct dpni_taildrop *taildrop); + +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + u8 tc, + u8 q_index, + struct dpni_taildrop *taildrop); + +/** + * struct dpni_rule_cfg - Rule configuration for table lookup + * @key_iova: I/O virtual address of the key (must be in DMA-able memory) + * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) + * @key_size: key and mask size (in bytes) + */ +struct dpni_rule_cfg { + u64 key_iova; + u64 mask_iova; + u8 key_size; +}; + +int dpni_get_api_version(struct fsl_mc_io *mc_io, + u32 cmd_flags, + u16 *major_ver, + u16 *minor_ver); + +#endif /* __FSL_DPNI_H */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2708297e7795..ce74b7a46d07 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1946,16 +1946,15 @@ static int fec_enet_mii_probe(struct net_device *ndev) /* mask with MAC supported features */ if (fep->quirks & FEC_QUIRK_HAS_GBIT) { - phy_dev->supported &= PHY_GBIT_FEATURES; - phy_dev->supported &= ~SUPPORTED_1000baseT_Half; + phy_set_max_speed(phy_dev, 1000); + phy_remove_link_mode(phy_dev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); #if !defined(CONFIG_M5272) - phy_dev->supported |= SUPPORTED_Pause; + phy_support_sym_pause(phy_dev); #endif } else - phy_dev->supported &= PHY_BASIC_FEATURES; - - phy_dev->advertising = phy_dev->supported; + phy_set_max_speed(phy_dev, 100); fep->link = 0; fep->full_duplex = 0; @@ -2055,8 +2054,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) node = of_get_child_by_name(pdev->dev.of_node, "mdio"); err = of_mdiobus_register(fep->mii_bus, node); - if (node) - of_node_put(node); + of_node_put(node); if (err) goto err_out_free_mdiobus; @@ -2230,13 +2228,8 @@ static int fec_enet_set_pauseparam(struct net_device *ndev, fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; - if (pause->rx_pause || pause->autoneg) { - ndev->phydev->supported |= ADVERTISED_Pause; - ndev->phydev->advertising |= ADVERTISED_Pause; - } else { - ndev->phydev->supported &= ~ADVERTISED_Pause; - ndev->phydev->advertising &= ~ADVERTISED_Pause; - } + phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, + pause->autoneg); if (pause->autoneg) { if (netif_running(ndev)) diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index ac2c3f6a12bc..82722d05fedb 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -446,8 +446,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) goto error; } - snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s@%llx", np->name, - (unsigned long long)res.start); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%pOFn@%llx", np, + (unsigned long long)res.start); priv->map = of_iomap(np, 0); if (!priv->map) { diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index f27f9bae1a4a..c488d31e2af6 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -102,8 +102,6 @@ #include <linux/phy_fixed.h> #include <linux/of.h> #include <linux/of_net.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> #include "gianfar.h" @@ -1814,8 +1812,8 @@ static int init_phy(struct net_device *dev) phydev->supported &= (GFAR_SUPPORTED | gigabit_support); phydev->advertising = phydev->supported; - /* Add support for flow control, but don't advertise it by default */ - phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + /* Add support for flow control */ + phy_support_asym_pause(phydev); /* disable EEE autoneg, EEE not supported by eTSEC */ memset(&edata, 0, sizeof(struct ethtool_eee)); diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 395a5266ea30..0d76e15cd6dd 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -230,7 +230,7 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, /* Make sure we return a number greater than 0 * if usecs > 0 */ - return (usecs * 1000 + count - 1) / count; + return DIV_ROUND_UP(usecs * 1000, count); } /* Convert ethernet clock ticks to microseconds */ @@ -503,65 +503,44 @@ static int gfar_spauseparam(struct net_device *dev, struct gfar_private *priv = netdev_priv(dev); struct phy_device *phydev = dev->phydev; struct gfar __iomem *regs = priv->gfargrp[0].regs; - u32 oldadv, newadv; if (!phydev) return -ENODEV; - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && - (epause->rx_pause != epause->tx_pause))) + if (!phy_validate_pause(phydev, epause)) return -EINVAL; priv->rx_pause_en = priv->tx_pause_en = 0; + phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause); if (epause->rx_pause) { priv->rx_pause_en = 1; if (epause->tx_pause) { priv->tx_pause_en = 1; - /* FLOW_CTRL_RX & TX */ - newadv = ADVERTISED_Pause; - } else /* FLOW_CTLR_RX */ - newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause; + } } else if (epause->tx_pause) { priv->tx_pause_en = 1; - /* FLOW_CTLR_TX */ - newadv = ADVERTISED_Asym_Pause; - } else - newadv = 0; + } if (epause->autoneg) priv->pause_aneg_en = 1; else priv->pause_aneg_en = 0; - oldadv = phydev->advertising & - (ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (oldadv != newadv) { - phydev->advertising &= - ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - phydev->advertising |= newadv; - if (phydev->autoneg) - /* inform link partner of our - * new flow ctrl settings - */ - return phy_start_aneg(phydev); - - if (!epause->autoneg) { - u32 tempval; - tempval = gfar_read(®s->maccfg1); - tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); - - priv->tx_actual_en = 0; - if (priv->tx_pause_en) { - priv->tx_actual_en = 1; - tempval |= MACCFG1_TX_FLOW; - } + if (!epause->autoneg) { + u32 tempval = gfar_read(®s->maccfg1); - if (priv->rx_pause_en) - tempval |= MACCFG1_RX_FLOW; - gfar_write(®s->maccfg1, tempval); + tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + + priv->tx_actual_en = 0; + if (priv->tx_pause_en) { + priv->tx_actual_en = 1; + tempval |= MACCFG1_TX_FLOW; } + + if (priv->rx_pause_en) + tempval |= MACCFG1_RX_FLOW; + gfar_write(®s->maccfg1, tempval); } return 0; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 22a817da861e..9600837f21b8 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -1742,12 +1742,7 @@ static int init_phy(struct net_device *dev) if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) uec_configure_serdes(dev); - phydev->supported &= (SUPPORTED_MII | - SUPPORTED_Autoneg | - ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full); + phy_set_max_speed(phydev, SPEED_100); if (priv->max_speed == SPEED_1000) phydev->supported |= ADVERTISED_1000baseT_Full; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c index 09e4061d1fa6..aaf72c055711 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c @@ -319,7 +319,7 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en) hns_gmac_set_uc_match(mac_drv, en); } -int hns_gmac_wait_fifo_clean(void *mac_drv) +static int hns_gmac_wait_fifo_clean(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; int wait_cnt; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c index 6ed6f142427e..3613e400e816 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c @@ -837,8 +837,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) */ put_device(&mac_cb->phy_dev->mdio.dev); - dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, np->name); + dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n", + mac_cb->mac_id, np); } of_node_put(np); @@ -855,8 +855,8 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb) * if the phy_dev is found */ put_device(&mac_cb->phy_dev->mdio.dev); - dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n", - mac_cb->mac_id, np->name); + dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n", + mac_cb->mac_id, np); } of_node_put(np); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 955c4ab18b03..75e8ee9d4e2e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1662,11 +1662,24 @@ static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) return 0; } +static void hns3_shutdown(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + devm_kfree(&pdev->dev, ae_dev); + pci_set_drvdata(pdev, NULL); + + if (system_state == SYSTEM_POWER_OFF) + pci_set_power_state(pdev, PCI_D3hot); +} + static struct pci_driver hns3_driver = { .name = hns3_driver_name, .id_table = hns3_pci_tbl, .probe = hns3_probe, .remove = hns3_remove, + .shutdown = hns3_shutdown, .sriov_configure = hns3_pci_sriov_configure, }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index f70ee6910ee2..3019007d7ceb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -100,41 +100,26 @@ static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) struct hnae3_handle *h = hns3_get_handle(ndev); int ret; - if (!h->ae_algo->ops->start) - return -EOPNOTSUPP; - ret = hns3_nic_reset_all_ring(h); if (ret) return ret; - ret = h->ae_algo->ops->start(h); - if (ret) { - netdev_err(ndev, - "hns3_lb_up ae start return error: %d\n", ret); - return ret; - } - ret = hns3_lp_setup(ndev, loop_mode, true); usleep_range(10000, 20000); - return ret; + return 0; } static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode) { - struct hnae3_handle *h = hns3_get_handle(ndev); int ret; - if (!h->ae_algo->ops->stop) - return -EOPNOTSUPP; - ret = hns3_lp_setup(ndev, loop_mode, false); if (ret) { netdev_err(ndev, "lb_setup return error: %d\n", ret); return ret; } - h->ae_algo->ops->stop(h); usleep_range(10000, 20000); return 0; @@ -152,6 +137,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb) packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + ethh->h_dest[5] += 0x1f; eth_zero_addr(ethh->h_source); ethh->h_proto = htons(ETH_P_ARP); skb_reset_mac_header(skb); @@ -309,7 +295,7 @@ static void hns3_self_test(struct net_device *ndev, h->flags & HNAE3_SUPPORT_SERDES_LOOPBACK; if (if_running) - dev_close(ndev); + ndev->netdev_ops->ndo_stop(ndev); #if IS_ENABLED(CONFIG_VLAN_8021Q) /* Disable the vlan filter for selftest does not support it */ @@ -347,7 +333,7 @@ static void hns3_self_test(struct net_device *ndev, #endif if (if_running) - dev_open(ndev); + ndev->netdev_ops->ndo_open(ndev); } static int hns3_get_sset_count(struct net_device *netdev, int stringset) @@ -560,26 +546,56 @@ static int hns3_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct hnae3_handle *h = hns3_get_handle(netdev); - u32 flowctrl_adv = 0; + const struct hnae3_ae_ops *ops; u8 link_stat; if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; - /* 1.auto_neg & speed & duplex from cmd */ - if (netdev->phydev) { + ops = h->ae_algo->ops; + if (ops->get_port_type) + ops->get_port_type(h, &cmd->base.port); + else + return -EOPNOTSUPP; + + switch (cmd->base.port) { + case PORT_FIBRE: + /* 1.auto_neg & speed & duplex from cmd */ + if (ops->get_ksettings_an_result) + ops->get_ksettings_an_result(h, + &cmd->base.autoneg, + &cmd->base.speed, + &cmd->base.duplex); + else + return -EOPNOTSUPP; + + /* 2.get link mode*/ + if (ops->get_link_mode) + ops->get_link_mode(h, + cmd->link_modes.supported, + cmd->link_modes.advertising); + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (ops->get_mdix_mode) + ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); + + break; + case PORT_TP: + if (!netdev->phydev) + return -EOPNOTSUPP; + phy_ethtool_ksettings_get(netdev->phydev, cmd); + break; + default: + netdev_warn(netdev, + "Unknown port type, neither Fibre/Copper detected"); return 0; } - if (h->ae_algo->ops->get_ksettings_an_result) - h->ae_algo->ops->get_ksettings_an_result(h, - &cmd->base.autoneg, - &cmd->base.speed, - &cmd->base.duplex); - else - return -EOPNOTSUPP; + /* mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; link_stat = hns3_get_link(netdev); if (!link_stat) { @@ -587,36 +603,6 @@ static int hns3_get_link_ksettings(struct net_device *netdev, cmd->base.duplex = DUPLEX_UNKNOWN; } - /* 2.get link mode and port type*/ - if (h->ae_algo->ops->get_link_mode) - h->ae_algo->ops->get_link_mode(h, - cmd->link_modes.supported, - cmd->link_modes.advertising); - - cmd->base.port = PORT_NONE; - if (h->ae_algo->ops->get_port_type) - h->ae_algo->ops->get_port_type(h, - &cmd->base.port); - - /* 3.mdix_ctrl&mdix get from phy reg */ - if (h->ae_algo->ops->get_mdix_mode) - h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, - &cmd->base.eth_tp_mdix); - /* 4.mdio_support */ - cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; - - /* 5.get flow control setttings */ - if (h->ae_algo->ops->get_flowctrl_adv) - h->ae_algo->ops->get_flowctrl_adv(h, &flowctrl_adv); - - if (flowctrl_adv & ADVERTISED_Pause) - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Pause); - - if (flowctrl_adv & ADVERTISED_Asym_Pause) - ethtool_link_ksettings_add_link_mode(cmd, advertising, - Asym_Pause); - return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8577dfc799ad..cf18608669f5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2066,19 +2066,17 @@ static int hclge_init_msi(struct hclge_dev *hdev) return 0; } -static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) +static u8 hclge_check_speed_dup(u8 duplex, int speed) { - struct hclge_mac *mac = &hdev->hw.mac; - if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) - mac->duplex = (u8)duplex; - else - mac->duplex = HCLGE_MAC_FULL; + if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M)) + duplex = HCLGE_MAC_FULL; - mac->speed = speed; + return duplex; } -int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, + u8 duplex) { struct hclge_config_mac_speed_dup_cmd *req; struct hclge_desc desc; @@ -2138,7 +2136,23 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) return ret; } - hclge_check_speed_dup(hdev, duplex, speed); + return 0; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +{ + int ret; + + duplex = hclge_check_speed_dup(duplex, speed); + if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex) + return 0; + + ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex); + if (ret) + return ret; + + hdev->hw.mac.speed = speed; + hdev->hw.mac.duplex = duplex; return 0; } @@ -2259,7 +2273,9 @@ static int hclge_mac_init(struct hclge_dev *hdev) int ret; int i; - ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); + hdev->hw.mac.duplex = HCLGE_MAC_FULL; + ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, + hdev->hw.mac.duplex); if (ret) { dev_err(&hdev->pdev->dev, "Config mac speed dup fail ret=%d\n", ret); @@ -2415,13 +2431,11 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev) return ret; } - if ((mac.speed != speed) || (mac.duplex != duplex)) { - ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/duplex config failed %d\n", ret); - return ret; - } + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config failed %d\n", ret); + return ret; } return 0; @@ -3659,6 +3673,8 @@ static int hclge_set_mac_loopback(struct hclge_dev *hdev, bool en) /* 2 Then setup the loopback flag */ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, en ? 1 : 0); + hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, en ? 1 : 0); req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); @@ -3719,15 +3735,36 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en) return -EIO; } + hclge_cfg_mac_mode(hdev, en); return 0; } +static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, + int stream_id, bool enable) +{ + struct hclge_desc desc; + struct hclge_cfg_com_tqp_queue_cmd *req = + (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); + req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); + req->stream_id = cpu_to_le16(stream_id); + req->enable |= enable << HCLGE_TQP_ENABLE_B; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Tqp enable fail, status =%d.\n", ret); + return ret; +} + static int hclge_set_loopback(struct hnae3_handle *handle, enum hnae3_loop loop_mode, bool en) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int ret; + int i, ret; switch (loop_mode) { case HNAE3_MAC_INTER_LOOP_MAC: @@ -3743,27 +3780,13 @@ static int hclge_set_loopback(struct hnae3_handle *handle, break; } - return ret; -} - -static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, - int stream_id, bool enable) -{ - struct hclge_desc desc; - struct hclge_cfg_com_tqp_queue_cmd *req = - (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; - int ret; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); - req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); - req->stream_id = cpu_to_le16(stream_id); - req->enable |= enable << HCLGE_TQP_ENABLE_B; + for (i = 0; i < vport->alloc_tqps; i++) { + ret = hclge_tqp_enable(hdev, i, 0, en); + if (ret) + return ret; + } - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, - "Tqp enable fail, status =%d.\n", ret); - return ret; + return 0; } static void hclge_reset_tqp_stats(struct hnae3_handle *handle) @@ -4362,7 +4385,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4429,7 +4452,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, hnae3_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); - hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae3_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1); hclge_prepare_mac_addr(&req, addr); status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); if (!status) { @@ -4686,9 +4709,17 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, "Add vf vlan filter fail, ret =%d.\n", req0->resp_code); } else { +#define HCLGE_VF_VLAN_DEL_NO_FOUND 1 if (!req0->resp_code) return 0; + if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND) { + dev_warn(&hdev->pdev->dev, + "vlan %d filter is not in vf vlan table\n", + vlan); + return 0; + } + dev_err(&hdev->pdev->dev, "Kill vf vlan filter fail, ret =%d.\n", req0->resp_code); @@ -4732,6 +4763,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, u16 vport_idx, vport_num = 0; int ret; + if (is_kill && !vlan_id) + return 0; + ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id, 0, proto); if (ret) { @@ -4761,7 +4795,7 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, return -EINVAL; } - for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], VLAN_N_VID) + for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM) vport_num++; if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1)) @@ -5187,20 +5221,6 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle) return hdev->fw_version; } -static void hclge_get_flowctrl_adv(struct hnae3_handle *handle, - u32 *flowctrl_adv) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - struct phy_device *phydev = hdev->hw.mac.phydev; - - if (!phydev) - return; - - *flowctrl_adv |= (phydev->advertising & ADVERTISED_Pause) | - (phydev->advertising & ADVERTISED_Asym_Pause); -} - static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) { struct phy_device *phydev = hdev->hw.mac.phydev; @@ -5208,13 +5228,7 @@ static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) if (!phydev) return; - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - - if (rx_en) - phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - - if (tx_en) - phydev->advertising ^= ADVERTISED_Asym_Pause; + phy_set_asym_pause(phydev, rx_en, tx_en); } static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en) @@ -6301,7 +6315,6 @@ static const struct hnae3_ae_ops hclge_ops = { .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, .set_channels = hclge_set_channels, .get_channels = hclge_get_channels, - .get_flowctrl_adv = hclge_get_flowctrl_adv, .get_regs_len = hclge_get_regs_len, .get_regs = hclge_get_regs, .set_led_id = hclge_set_led_id, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 398971a062f4..24b1f2a0c32a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -10,8 +10,6 @@ #define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ SUPPORTED_TP | \ - SUPPORTED_Pause | \ - SUPPORTED_Asym_Pause | \ PHY_10BT_FEATURES | \ PHY_100BT_FEATURES | \ PHY_1000BT_FEATURES) @@ -213,7 +211,7 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) } phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES; - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); return 0; } diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 372664686309..7410a1de8f1d 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -423,7 +423,7 @@ static void emac_hash_mc(struct emac_instance *dev) { const int regs = EMAC_XAHT_REGS(dev); u32 *gaht_base = emac_gaht_base(dev); - u32 gaht_temp[regs]; + u32 gaht_temp[EMAC_XAHT_MAX_REGS]; struct netdev_hw_addr *ha; int i; @@ -2964,6 +2964,10 @@ static int emac_init_config(struct emac_instance *dev) dev->xaht_width_shift = EMAC4_XAHT_WIDTH_SHIFT; } + /* This should never happen */ + if (WARN_ON(EMAC_XAHT_REGS(dev) > EMAC_XAHT_MAX_REGS)) + return -ENXIO; + DBG(dev, "features : 0x%08x / 0x%08x\n", dev->features, EMAC_FTRS_POSSIBLE); DBG(dev, "tx_fifo_size : %d (%d gige)\n", dev->tx_fifo_size, dev->tx_fifo_size_gige); DBG(dev, "rx_fifo_size : %d (%d gige)\n", dev->rx_fifo_size, dev->rx_fifo_size_gige); diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index 369de2cfb15b..84caa4a3fc52 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -390,6 +390,9 @@ static inline int emac_has_feature(struct emac_instance *dev, #define EMAC4SYNC_XAHT_SLOTS_SHIFT 8 #define EMAC4SYNC_XAHT_WIDTH_SHIFT 5 +/* The largest span between slots and widths above is 3 */ +#define EMAC_XAHT_MAX_REGS (1 << 3) + #define EMAC_XAHT_SLOTS(dev) (1 << (dev)->xaht_slots_shift) #define EMAC_XAHT_WIDTH(dev) (1 << (dev)->xaht_width_shift) #define EMAC_XAHT_REGS(dev) (1 << ((dev)->xaht_slots_shift - \ diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h index eeade2ea8334..e4c20f0024f6 100644 --- a/drivers/net/ethernet/ibm/emac/mal.h +++ b/drivers/net/ethernet/ibm/emac/mal.h @@ -136,7 +136,7 @@ static inline int mal_rx_size(int len) static inline int mal_tx_chunks(int len) { - return (len + MAL_MAX_TX_SIZE - 1) / MAL_MAX_TX_SIZE; + return DIV_ROUND_UP(len, MAL_MAX_TX_SIZE); } #define MAL_CHAN_MASK(n) (0x80000000 >> (n)) diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 14397e7e9925..50590e8d1fd1 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -22,6 +22,7 @@ i40e-objs := i40e_main.o \ i40e_txrx.o \ i40e_ptp.o \ i40e_client.o \ - i40e_virtchnl_pf.o + i40e_virtchnl_pf.o \ + i40e_xsk.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7a80652e2500..876cac317e79 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -786,6 +786,11 @@ struct i40e_vsi { /* VSI specific handlers */ irqreturn_t (*irq_handler)(int irq, void *data); + + /* AF_XDP zero-copy */ + struct xdp_umem **xsk_umems; + u16 num_xsk_umems_used; + u16 num_xsk_umems; } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { @@ -1090,6 +1095,20 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) return !!vsi->xdp_prog; } +static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring) +{ + bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); + int qid = ring->queue_index; + + if (ring_is_xdp(ring)) + qid -= ring->vsi->alloc_queue_pairs; + + if (!ring->vsi->xsk_umems || !ring->vsi->xsk_umems[qid] || !xdp_on) + return NULL; + + return ring->vsi->xsk_umems[qid]; +} + int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 5ff6caa83948..87fe2e60602f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -6,25 +6,225 @@ #include "i40e.h" #include "i40e_diag.h" +/* ethtool statistics helpers */ + +/** + * struct i40e_stats - definition for an ethtool statistic + * @stat_string: statistic name to display in ethtool -S output + * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) + * @stat_offset: offsetof() the stat from a base pointer + * + * This structure defines a statistic to be added to the ethtool stats buffer. + * It defines a statistic as offset from a common base pointer. Stats should + * be defined in constant arrays using the I40E_STAT macro, with every element + * of the array using the same _type for calculating the sizeof_stat and + * stat_offset. + * + * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or + * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from + * the i40e_add_ethtool_stat() helper function. + * + * The @stat_string is interpreted as a format string, allowing formatted + * values to be inserted while looping over multiple structures for a given + * statistics array. Thus, every statistic string in an array should have the + * same type and number of format specifiers, to be formatted by variadic + * arguments to the i40e_add_stat_string() helper function. + **/ struct i40e_stats { - /* The stat_string is expected to be a format string formatted using - * vsnprintf by i40e_add_stat_strings. Every member of a stats array - * should use the same format specifiers as they will be formatted - * using the same variadic arguments. - */ char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; +/* Helper macro to define an i40e_stat structure with proper size and type. + * Use this when defining constant statistics arrays. Note that @_type expects + * only a type name and is used multiple times. + */ #define I40E_STAT(_type, _name, _stat) { \ .stat_string = _name, \ .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ .stat_offset = offsetof(_type, _stat) \ } +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ #define I40E_NETDEV_STAT(_net_stat) \ I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +/* Helper macro for defining some statistics related to queues */ +#define I40E_QUEUE_STAT(_name, _stat) \ + I40E_STAT(struct i40e_ring, _name, _stat) + +/* Stats associated with a Tx or Rx ring */ +static const struct i40e_stats i40e_gstrings_queue_stats[] = { + I40E_QUEUE_STAT("%s-%u.packets", stats.packets), + I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes), +}; + +/** + * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pointer: basis for where to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. Used to implement i40e_add_ethtool_stats and + * i40e_add_queue_stats. If the pointer is null, data will be zero'd. + */ +static void +i40e_add_one_ethtool_stat(u64 *data, void *pointer, + const struct i40e_stats *stat) +{ + char *p; + + if (!pointer) { + /* ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pointer + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location to copy stats from + * @stats: array of stats to copy + * @size: the size of the stats definition + * + * Copy the stats defined by the stats array using the pointer as a base into + * the data buffer supplied by ethtool. Updates the data pointer to point to + * the next empty location for successive calls to __i40e_add_ethtool_stats. + * If pointer is null, set the data values to zero and update the pointer to + * skip these stats. + **/ +static void +__i40e_add_ethtool_stats(u64 **data, void *pointer, + const struct i40e_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]); +} + +/** + * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location where stats are stored + * @stats: static const array of stat definitions + * + * Macro to ease the use of __i40e_add_ethtool_stats by taking a static + * constant stats array and passing the ARRAY_SIZE(). This avoids typos by + * ensuring that we pass the size associated with the given stats array. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. + **/ +#define i40e_add_ethtool_stats(data, pointer, stats) \ + __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + +/** + * i40e_add_queue_stats - copy queue statistics into supplied buffer + * @data: ethtool stats buffer + * @ring: the ring to copy + * + * Queue statistics must be copied while protected by + * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. + * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the + * ring pointer is null, zero out the queue stat values and update the data + * pointer. Otherwise safely copy the stats from the ring into the supplied + * buffer and update the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + **/ +static void +i40e_add_queue_stats(u64 **data, struct i40e_ring *ring) +{ + const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats); + const struct i40e_stats *stats = i40e_gstrings_queue_stats; + unsigned int start; + unsigned int i; + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry_irq. But first, make sure our ring is + * non-null before attempting to access its syncp. + */ + do { + start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); + for (i = 0; i < size; i++) { + i40e_add_one_ethtool_stat(&(*data)[i], ring, + &stats[i]); + } + } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* Once we successfully copy the stats in, update the data pointer */ + *data += size; +} + +/** + * __i40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * + * Format and copy the strings described by stats into the buffer pointed at + * by p. + **/ +static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } +} + +/** + * 40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. Additionally, stats must be an array such that + * ARRAY_SIZE can be called on it. + **/ +#define i40e_add_stat_strings(p, stats, ...) \ + __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) + #define I40E_PF_STAT(_name, _stat) \ I40E_STAT(struct i40e_pf, _name, _stat) #define I40E_VSI_STAT(_name, _stat) \ @@ -33,6 +233,8 @@ struct i40e_stats { I40E_STAT(struct i40e_veb, _name, _stat) #define I40E_PFC_STAT(_name, _stat) \ I40E_STAT(struct i40e_pfc_stats, _name, _stat) +#define I40E_QUEUE_STAT(_name, _stat) \ + I40E_STAT(struct i40e_ring, _name, _stat) static const struct i40e_stats i40e_gstrings_net_stats[] = { I40E_NETDEV_STAT(rx_packets), @@ -171,20 +373,11 @@ static const struct i40e_stats i40e_gstrings_pfc_stats[] = { I40E_PFC_STAT("port.rx_priority_%u_xon_2_xoff", priority_xon_2_xoff), }; -/* We use num_tx_queues here as a proxy for the maximum number of queues - * available because we always allocate queues symmetrically. - */ -#define I40E_MAX_NUM_QUEUES(n) ((n)->num_tx_queues) -#define I40E_QUEUE_STATS_LEN(n) \ - (I40E_MAX_NUM_QUEUES(n) \ - * 2 /* Tx and Rx together */ \ - * (sizeof(struct i40e_queue_stats) / sizeof(u64))) -#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) #define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats) + #define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats) -#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \ - I40E_MISC_STATS_LEN + \ - I40E_QUEUE_STATS_LEN((n))) + +#define I40E_VSI_STATS_LEN (I40E_NETDEV_STATS_LEN + I40E_MISC_STATS_LEN) #define I40E_PFC_STATS_LEN (ARRAY_SIZE(i40e_gstrings_pfc_stats) * \ I40E_MAX_USER_PRIORITY) @@ -193,10 +386,15 @@ static const struct i40e_stats i40e_gstrings_pfc_stats[] = { (ARRAY_SIZE(i40e_gstrings_veb_tc_stats) * \ I40E_MAX_TRAFFIC_CLASS)) -#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \ +#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats) + +#define I40E_PF_STATS_LEN (I40E_GLOBAL_STATS_LEN + \ I40E_PFC_STATS_LEN + \ I40E_VEB_STATS_LEN + \ - I40E_VSI_STATS_LEN((n))) + I40E_VSI_STATS_LEN) + +/* Length of stats for a single queue */ +#define I40E_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats) enum i40e_ethtool_test_id { I40E_ETH_TEST_REG = 0, @@ -1701,11 +1899,30 @@ static int i40e_get_stats_count(struct net_device *netdev) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + int stats_len; if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) - return I40E_PF_STATS_LEN(netdev); + stats_len = I40E_PF_STATS_LEN; else - return I40E_VSI_STATS_LEN(netdev); + stats_len = I40E_VSI_STATS_LEN; + + /* The number of stats reported for a given net_device must remain + * constant throughout the life of that device. + * + * This is because the API for obtaining the size, strings, and stats + * is spread out over three separate ethtool ioctls. There is no safe + * way to lock the number of stats across these calls, so we must + * assume that they will never change. + * + * Due to this, we report the maximum number of queues, even if not + * every queue is currently configured. Since we always allocate + * queues in pairs, we'll just use netdev->num_tx_queues * 2. This + * works because the num_tx_queues is set at device creation and never + * changes. + */ + stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues; + + return stats_len; } static int i40e_get_sset_count(struct net_device *netdev, int sset) @@ -1728,89 +1945,6 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset) } /** - * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer - * @data: location to store the stat value - * @pointer: basis for where to copy from - * @stat: the stat definition - * - * Copies the stat data defined by the pointer and stat structure pair into - * the memory supplied as data. Used to implement i40e_add_ethtool_stats. - * If the pointer is null, data will be zero'd. - */ -static inline void -i40e_add_one_ethtool_stat(u64 *data, void *pointer, - const struct i40e_stats *stat) -{ - char *p; - - if (!pointer) { - /* ensure that the ethtool data buffer is zero'd for any stats - * which don't have a valid pointer. - */ - *data = 0; - return; - } - - p = (char *)pointer + stat->stat_offset; - switch (stat->sizeof_stat) { - case sizeof(u64): - *data = *((u64 *)p); - break; - case sizeof(u32): - *data = *((u32 *)p); - break; - case sizeof(u16): - *data = *((u16 *)p); - break; - case sizeof(u8): - *data = *((u8 *)p); - break; - default: - WARN_ONCE(1, "unexpected stat size for %s", - stat->stat_string); - *data = 0; - } -} - -/** - * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer - * @data: ethtool stats buffer - * @pointer: location to copy stats from - * @stats: array of stats to copy - * @size: the size of the stats definition - * - * Copy the stats defined by the stats array using the pointer as a base into - * the data buffer supplied by ethtool. Updates the data pointer to point to - * the next empty location for successive calls to __i40e_add_ethtool_stats. - * If pointer is null, set the data values to zero and update the pointer to - * skip these stats. - **/ -static inline void -__i40e_add_ethtool_stats(u64 **data, void *pointer, - const struct i40e_stats stats[], - const unsigned int size) -{ - unsigned int i; - - for (i = 0; i < size; i++) - i40e_add_one_ethtool_stat((*data)++, pointer, &stats[i]); -} - -/** - * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer - * @data: ethtool stats buffer - * @pointer: location where stats are stored - * @stats: static const array of stat definitions - * - * Macro to ease the use of __i40e_add_ethtool_stats by taking a static - * constant stats array and passing the ARRAY_SIZE(). This avoids typos by - * ensuring that we pass the size associated with the given stats array. - * Assumes that stats is an array. - **/ -#define i40e_add_ethtool_stats(data, pointer, stats) \ - __i40e_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) - -/** * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure * @pf: the PF device structure * @i: the priority value to copy @@ -1853,12 +1987,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; struct i40e_veb *veb = pf->veb[pf->lan_veb]; unsigned int i; - unsigned int start; bool veb_stats; u64 *p = data; @@ -1870,38 +2002,12 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, i40e_add_ethtool_stats(&data, vsi, i40e_gstrings_misc_stats); rcu_read_lock(); - for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev) ; i++) { - tx_ring = READ_ONCE(vsi->tx_rings[i]); - - if (!tx_ring) { - /* Bump the stat counter to skip these stats, and make - * sure the memory is zero'd - */ - *(data++) = 0; - *(data++) = 0; - *(data++) = 0; - *(data++) = 0; - continue; - } - - /* process Tx ring statistics */ - do { - start = u64_stats_fetch_begin_irq(&tx_ring->syncp); - data[0] = tx_ring->stats.packets; - data[1] = tx_ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); - data += 2; - - /* Rx ring is the 2nd half of the queue pair */ - rx_ring = &tx_ring[1]; - do { - start = u64_stats_fetch_begin_irq(&rx_ring->syncp); - data[0] = rx_ring->stats.packets; - data[1] = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); - data += 2; + for (i = 0; i < netdev->num_tx_queues; i++) { + i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i])); + i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i])); } rcu_read_unlock(); + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) goto check_data_pointer; @@ -1933,42 +2039,6 @@ check_data_pointer: } /** - * __i40e_add_stat_strings - copy stat strings into ethtool buffer - * @p: ethtool supplied buffer - * @stats: stat definitions array - * @size: size of the stats array - * - * Format and copy the strings described by stats into the buffer pointed at - * by p. - **/ -static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], - const unsigned int size, ...) -{ - unsigned int i; - - for (i = 0; i < size; i++) { - va_list args; - - va_start(args, size); - vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); - *p += ETH_GSTRING_LEN; - va_end(args); - } -} - -/** - * 40e_add_stat_strings - copy stat strings into ethtool buffer - * @p: ethtool supplied buffer - * @stats: stat definitions array - * - * Format and copy the strings described by the const static stats value into - * the buffer pointed at by p. Assumes that stats can have ARRAY_SIZE called - * for it. - **/ -#define i40e_add_stat_strings(p, stats, ...) \ - __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) - -/** * i40e_get_stat_strings - copy stat strings into supplied buffer * @netdev: the netdev to collect strings for * @data: supplied buffer to copy strings into @@ -1990,16 +2060,13 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) i40e_add_stat_strings(&data, i40e_gstrings_misc_stats); - for (i = 0; i < I40E_MAX_NUM_QUEUES(netdev); i++) { - snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_packets", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "tx-%u.tx_bytes", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_packets", i); - data += ETH_GSTRING_LEN; - snprintf(data, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i); - data += ETH_GSTRING_LEN; + for (i = 0; i < netdev->num_tx_queues; i++) { + i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, + "tx", i); + i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, + "rx", i); } + if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) return; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ac685ad4d877..5d209d8fe9b8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9,7 +9,9 @@ /* Local includes */ #include "i40e.h" #include "i40e_diag.h" +#include "i40e_xsk.h" #include <net/udp_tunnel.h> +#include <net/xdp_sock.h> /* All i40e tracepoints are defined by the include below, which * must be included exactly once across the whole kernel with * CREATE_TRACE_POINTS defined @@ -420,9 +422,9 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct i40e_netdev_priv *np = netdev_priv(netdev); - struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi); + struct i40e_ring *ring; int i; if (test_bit(__I40E_VSI_DOWN, vsi->state)) @@ -436,24 +438,26 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, u64 bytes, packets; unsigned int start; - tx_ring = READ_ONCE(vsi->tx_rings[i]); - if (!tx_ring) + ring = READ_ONCE(vsi->tx_rings[i]); + if (!ring) continue; - i40e_get_netdev_stats_struct_tx(tx_ring, stats); + i40e_get_netdev_stats_struct_tx(ring, stats); - rx_ring = &tx_ring[1]; + if (i40e_enabled_xdp_vsi(vsi)) { + ring++; + i40e_get_netdev_stats_struct_tx(ring, stats); + } + ring++; do { - start = u64_stats_fetch_begin_irq(&rx_ring->syncp); - packets = rx_ring->stats.packets; - bytes = rx_ring->stats.bytes; - } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start)); + start = u64_stats_fetch_begin_irq(&ring->syncp); + packets = ring->stats.packets; + bytes = ring->stats.bytes; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); stats->rx_packets += packets; stats->rx_bytes += bytes; - if (i40e_enabled_xdp_vsi(vsi)) - i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats); } rcu_read_unlock(); @@ -3072,6 +3076,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) i40e_status err = 0; u32 qtx_ctl = 0; + if (ring_is_xdp(ring)) + ring->xsk_umem = i40e_xsk_umem(ring); + /* some ATR related tx ring init */ if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) { ring->atr_sample_rate = vsi->back->atr_sample_rate; @@ -3181,13 +3188,46 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) struct i40e_hw *hw = &vsi->back->hw; struct i40e_hmc_obj_rxq rx_ctx; i40e_status err = 0; + bool ok; + int ret; bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(rx_ctx)); - ring->rx_buf_len = vsi->rx_buf_len; + if (ring->vsi->type == I40E_VSI_MAIN) + xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); + + ring->xsk_umem = i40e_xsk_umem(ring); + if (ring->xsk_umem) { + ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - + XDP_PACKET_HEADROOM; + /* For AF_XDP ZC, we disallow packets to span on + * multiple buffers, thus letting us skip that + * handling in the fast-path. + */ + chain_len = 1; + ring->zca.free = i40e_zca_free; + ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_ZERO_COPY, + &ring->zca); + if (ret) + return ret; + dev_info(&vsi->back->pdev->dev, + "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", + ring->queue_index); + + } else { + ring->rx_buf_len = vsi->rx_buf_len; + if (ring->vsi->type == I40E_VSI_MAIN) { + ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, + MEM_TYPE_PAGE_SHARED, + NULL); + if (ret) + return ret; + } + } rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT)); @@ -3243,7 +3283,15 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); writel(0, ring->tail); - i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + ok = ring->xsk_umem ? + i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : + !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); + if (!ok) { + dev_info(&vsi->back->pdev->dev, + "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", + ring->xsk_umem ? "UMEM enabled " : "", + ring->queue_index, pf_q); + } return 0; } @@ -6568,6 +6616,24 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) struct i40e_hw *hw = &pf->hw; i40e_status err; u64 mask; + u8 speed; + + /* Card might've been put in an unstable state by other drivers + * and applications, which causes incorrect speed values being + * set on startup. In order to clear speed registers, we call + * get_phy_capabilities twice, once to get initial state of + * available speeds, and once to get current PHY config. + */ + err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, + NULL); + if (err) { + dev_err(&pf->pdev->dev, + "failed to get phy cap., ret = %s last_status = %s\n", + i40e_stat_str(hw, err), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return err; + } + speed = abilities.link_speed; /* Get the current phy config */ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, @@ -6581,9 +6647,9 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) } /* If link needs to go up, but was not forced to go down, - * no need for a flap + * and its speed values are OK, no need for a flap */ - if (is_up && abilities.phy_type != 0) + if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0) return I40E_SUCCESS; /* To force link we need to set bits for all supported PHY types, @@ -6595,7 +6661,10 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up) config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0; /* Copy the old settings, except of phy_type */ config.abilities = abilities.abilities; - config.link_speed = abilities.link_speed; + if (abilities.link_speed != 0) + config.link_speed = abilities.link_speed; + else + config.link_speed = speed; config.eee_capability = abilities.eee_capability; config.eeer = abilities.eeer_val; config.low_power_ctrl = abilities.d3_lpan; @@ -11828,6 +11897,256 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, } /** + * i40e_enter_busy_conf - Enters busy config state + * @vsi: vsi + * + * Returns 0 on success, <0 for failure. + **/ +static int i40e_enter_busy_conf(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + int timeout = 50; + + while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(1000, 2000); + } + + return 0; +} + +/** + * i40e_exit_busy_conf - Exits busy config state + * @vsi: vsi + **/ +static void i40e_exit_busy_conf(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + clear_bit(__I40E_CONFIG_BUSY, pf->state); +} + +/** + * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair + * @vsi: vsi + * @queue_pair: queue pair + **/ +static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) +{ + memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, + sizeof(vsi->rx_rings[queue_pair]->rx_stats)); + memset(&vsi->tx_rings[queue_pair]->stats, 0, + sizeof(vsi->tx_rings[queue_pair]->stats)); + if (i40e_enabled_xdp_vsi(vsi)) { + memset(&vsi->xdp_rings[queue_pair]->stats, 0, + sizeof(vsi->xdp_rings[queue_pair]->stats)); + } +} + +/** + * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair + * @vsi: vsi + * @queue_pair: queue pair + **/ +static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) +{ + i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); + if (i40e_enabled_xdp_vsi(vsi)) + i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); +} + +/** + * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * @enable: true for enable, false for disable + **/ +static void i40e_queue_pair_toggle_napi(struct i40e_vsi *vsi, int queue_pair, + bool enable) +{ + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; + struct i40e_q_vector *q_vector = rxr->q_vector; + + if (!vsi->netdev) + return; + + /* All rings in a qp belong to the same qvector. */ + if (q_vector->rx.ring || q_vector->tx.ring) { + if (enable) + napi_enable(&q_vector->napi); + else + napi_disable(&q_vector->napi); + } +} + +/** + * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * @enable: true for enable, false for disable + * + * Returns 0 on success, <0 on failure. + **/ +static int i40e_queue_pair_toggle_rings(struct i40e_vsi *vsi, int queue_pair, + bool enable) +{ + struct i40e_pf *pf = vsi->back; + int pf_q, ret = 0; + + pf_q = vsi->base_queue + queue_pair; + ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, + false /*is xdp*/, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d Tx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); + return ret; + } + + i40e_control_rx_q(pf, pf_q, enable); + ret = i40e_pf_rxq_wait(pf, pf_q, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d Rx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); + return ret; + } + + /* Due to HW errata, on Rx disable only, the register can + * indicate done before it really is. Needs 50ms to be sure + */ + if (!enable) + mdelay(50); + + if (!i40e_enabled_xdp_vsi(vsi)) + return ret; + + ret = i40e_control_wait_tx_q(vsi->seid, pf, + pf_q + vsi->alloc_queue_pairs, + true /*is xdp*/, enable); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d XDP Tx ring %d %sable timeout\n", + vsi->seid, pf_q, (enable ? "en" : "dis")); + } + + return ret; +} + +/** + * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair + * @vsi: vsi + * @queue_pair: queue_pair + **/ +static void i40e_queue_pair_enable_irq(struct i40e_vsi *vsi, int queue_pair) +{ + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + + /* All rings in a qp belong to the same qvector. */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) + i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); + else + i40e_irq_dynamic_enable_icr0(pf); + + i40e_flush(hw); +} + +/** + * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair + * @vsi: vsi + * @queue_pair: queue_pair + **/ +static void i40e_queue_pair_disable_irq(struct i40e_vsi *vsi, int queue_pair) +{ + struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + + /* For simplicity, instead of removing the qp interrupt causes + * from the interrupt linked list, we simply disable the interrupt, and + * leave the list intact. + * + * All rings in a qp belong to the same qvector. + */ + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; + + wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); + i40e_flush(hw); + synchronize_irq(pf->msix_entries[intpf].vector); + } else { + /* Legacy and MSI mode - this stops all interrupt handling */ + wr32(hw, I40E_PFINT_ICR0_ENA, 0); + wr32(hw, I40E_PFINT_DYN_CTL0, 0); + i40e_flush(hw); + synchronize_irq(pf->pdev->irq); + } +} + +/** + * i40e_queue_pair_disable - Disables a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * + * Returns 0 on success, <0 on failure. + **/ +int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) +{ + int err; + + err = i40e_enter_busy_conf(vsi); + if (err) + return err; + + i40e_queue_pair_disable_irq(vsi, queue_pair); + err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); + i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); + i40e_queue_pair_clean_rings(vsi, queue_pair); + i40e_queue_pair_reset_stats(vsi, queue_pair); + + return err; +} + +/** + * i40e_queue_pair_enable - Enables a queue pair + * @vsi: vsi + * @queue_pair: queue pair + * + * Returns 0 on success, <0 on failure. + **/ +int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair) +{ + int err; + + err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); + if (err) + return err; + + if (i40e_enabled_xdp_vsi(vsi)) { + err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); + if (err) + return err; + } + + err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); + if (err) + return err; + + err = i40e_queue_pair_toggle_rings(vsi, queue_pair, true /* on */); + i40e_queue_pair_toggle_napi(vsi, queue_pair, true /* on */); + i40e_queue_pair_enable_irq(vsi, queue_pair); + + i40e_exit_busy_conf(vsi); + + return err; +} + +/** * i40e_xdp - implements ndo_bpf for i40e * @dev: netdevice * @xdp: XDP command @@ -11847,6 +12166,12 @@ static int i40e_xdp(struct net_device *dev, case XDP_QUERY_PROG: xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; + case XDP_QUERY_XSK_UMEM: + return i40e_xsk_umem_query(vsi, &xdp->xsk.umem, + xdp->xsk.queue_id); + case XDP_SETUP_XSK_UMEM: + return i40e_xsk_umem_setup(vsi, xdp->xsk.umem, + xdp->xsk.queue_id); default: return -EINVAL; } @@ -11886,6 +12211,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bridge_setlink = i40e_ndo_bridge_setlink, .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, + .ndo_xsk_async_xmit = i40e_xsk_async_xmit, }; /** @@ -13033,7 +13359,7 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) break; - if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { + if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { dev_info(&pf->pdev->dev, "vsi seid %d not found\n", vsi_seid); return NULL; @@ -14159,6 +14485,7 @@ static void i40e_remove(struct pci_dev *pdev) mutex_destroy(&hw->aq.asq_mutex); /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ + rtnl_lock(); i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { if (pf->vsi[i]) { @@ -14167,6 +14494,7 @@ static void i40e_remove(struct pci_dev *pdev) pf->vsi[i] = NULL; } } + rtnl_unlock(); for (i = 0; i < I40E_MAX_VEB; i++) { kfree(pf->veb[i]); @@ -14378,7 +14706,13 @@ static void i40e_shutdown(struct pci_dev *pdev) wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + /* Since we're going to destroy queues during the + * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this + * whole section + */ + rtnl_lock(); i40e_clear_interrupt_scheme(pf); + rtnl_unlock(); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 35f2866b38c6..1199f0502d6d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -694,7 +694,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) if (!IS_ERR_OR_NULL(pf->ptp_clock)) return 0; - strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name)); + strncpy(pf->ptp_caps.name, i40e_driver_name, + sizeof(pf->ptp_caps.name) - 1); pf->ptp_caps.owner = THIS_MODULE; pf->ptp_caps.max_adj = 999999999; pf->ptp_caps.n_ext_ts = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b5042d1a63c0..37bd4e50ccde 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -8,16 +8,8 @@ #include "i40e.h" #include "i40e_trace.h" #include "i40e_prototype.h" - -static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, - u32 td_tag) -{ - return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | - ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | - ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | - ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | - ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); -} +#include "i40e_txrx_common.h" +#include "i40e_xsk.h" #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) /** @@ -536,8 +528,8 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, * This is used to verify if the FD programming or invalidation * requested by SW to the HW is successful or not and take actions accordingly. **/ -static void i40e_fd_handle_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, u8 prog_id) +void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id) { struct i40e_pf *pf = rx_ring->vsi->back; struct pci_dev *pdev = pf->pdev; @@ -767,8 +759,6 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi) } } -#define WB_STRIDE 4 - /** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @vsi: the VSI we care about @@ -873,27 +863,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, i += tx_ring->count; tx_ring->next_to_clean = i; - u64_stats_update_begin(&tx_ring->syncp); - tx_ring->stats.bytes += total_bytes; - tx_ring->stats.packets += total_packets; - u64_stats_update_end(&tx_ring->syncp); - tx_ring->q_vector->tx.total_bytes += total_bytes; - tx_ring->q_vector->tx.total_packets += total_packets; - - if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { - /* check to see if there are < 4 descriptors - * waiting to be written back, then kick the hardware to force - * them to be written back in case we stay in NAPI. - * In this mode on X722 we do not enable Interrupt. - */ - unsigned int j = i40e_get_tx_pending(tx_ring, false); - - if (budget && - ((j / WB_STRIDE) == 0) && (j > 0) && - !test_bit(__I40E_VSI_DOWN, vsi->state) && - (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) - tx_ring->arm_wb = true; - } + i40e_update_tx_stats(tx_ring, total_packets, total_bytes); + i40e_arm_wb(tx_ring, vsi, budget); if (ring_is_xdp(tx_ring)) return !!budget; @@ -1244,6 +1215,11 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, new_buff->page = old_buff->page; new_buff->page_offset = old_buff->page_offset; new_buff->pagecnt_bias = old_buff->pagecnt_bias; + + rx_ring->rx_stats.page_reuse_count++; + + /* clear contents of buffer_info */ + old_buff->page = NULL; } /** @@ -1266,7 +1242,7 @@ static inline bool i40e_rx_is_programming_status(u64 qw) } /** - * i40e_clean_programming_status - clean the programming status descriptor + * i40e_clean_programming_status - try clean the programming status descriptor * @rx_ring: the rx ring that has this descriptor * @rx_desc: the rx descriptor written back by HW * @qw: qword representing status_error_len in CPU ordering @@ -1275,15 +1251,22 @@ static inline bool i40e_rx_is_programming_status(u64 qw) * status being successful or not and take actions accordingly. FCoE should * handle its context/filter programming/invalidation status and take actions. * + * Returns an i40e_rx_buffer to reuse if the cleanup occurred, otherwise NULL. **/ -static void i40e_clean_programming_status(struct i40e_ring *rx_ring, - union i40e_rx_desc *rx_desc, - u64 qw) +struct i40e_rx_buffer *i40e_clean_programming_status( + struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + u64 qw) { struct i40e_rx_buffer *rx_buffer; - u32 ntc = rx_ring->next_to_clean; + u32 ntc; u8 id; + if (!i40e_rx_is_programming_status(qw)) + return NULL; + + ntc = rx_ring->next_to_clean; + /* fetch, update, and store next to clean */ rx_buffer = &rx_ring->rx_bi[ntc++]; ntc = (ntc < rx_ring->count) ? ntc : 0; @@ -1291,18 +1274,13 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, prefetch(I40E_RX_DESC(rx_ring, ntc)); - /* place unused page back on the ring */ - i40e_reuse_rx_page(rx_ring, rx_buffer); - rx_ring->rx_stats.page_reuse_count++; - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; - id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) i40e_fd_handle_status(rx_ring, rx_desc, id); + + return rx_buffer; } /** @@ -1372,6 +1350,9 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_ring->skb = NULL; } + if (rx_ring->xsk_umem) + goto skip_free; + /* Free all the Rx ring sk_buffs */ for (i = 0; i < rx_ring->count; i++) { struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; @@ -1400,6 +1381,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) rx_bi->page_offset = 0; } +skip_free: bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; memset(rx_ring->rx_bi, 0, bi_size); @@ -1492,7 +1474,7 @@ err: * @rx_ring: ring to bump * @val: new head index **/ -static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) +void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) { rx_ring->next_to_use = val; @@ -1576,8 +1558,8 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring, * @skb: packet to send up * @vlan_tag: vlan tag for packet **/ -static void i40e_receive_skb(struct i40e_ring *rx_ring, - struct sk_buff *skb, u16 vlan_tag) +void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag) { struct i40e_q_vector *q_vector = rx_ring->q_vector; @@ -1804,7 +1786,6 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. **/ -static inline void i40e_process_skb_fields(struct i40e_ring *rx_ring, union i40e_rx_desc *rx_desc, struct sk_buff *skb, u8 rx_ptype) @@ -2152,7 +2133,6 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, if (i40e_can_reuse_rx_page(rx_buffer)) { /* hand second half of page back to the ring */ i40e_reuse_rx_page(rx_ring, rx_buffer); - rx_ring->rx_stats.page_reuse_count++; } else { /* we are not reusing the buffer so unmap it */ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, @@ -2160,10 +2140,9 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring, DMA_FROM_DEVICE, I40E_RX_DMA_ATTR); __page_frag_cache_drain(rx_buffer->page, rx_buffer->pagecnt_bias); + /* clear contents of buffer_info */ + rx_buffer->page = NULL; } - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; } /** @@ -2199,16 +2178,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring, return true; } -#define I40E_XDP_PASS 0 -#define I40E_XDP_CONSUMED BIT(0) -#define I40E_XDP_TX BIT(1) -#define I40E_XDP_REDIR BIT(2) - static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, struct i40e_ring *xdp_ring); -static int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, - struct i40e_ring *xdp_ring) +int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) { struct xdp_frame *xdpf = convert_to_xdp_frame(xdp); @@ -2287,7 +2260,13 @@ static void i40e_rx_buffer_flip(struct i40e_ring *rx_ring, #endif } -static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) +/** + * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register + * @xdp_ring: XDP Tx ring + * + * This function updates the XDP Tx ring tail register. + **/ +void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) { /* Force memory writes to complete before letting h/w * know there are new descriptors to fetch. @@ -2297,6 +2276,48 @@ static inline void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring) } /** + * i40e_update_rx_stats - Update Rx ring statistics + * @rx_ring: rx descriptor ring + * @total_rx_bytes: number of bytes received + * @total_rx_packets: number of packets received + * + * This function updates the Rx ring statistics. + **/ +void i40e_update_rx_stats(struct i40e_ring *rx_ring, + unsigned int total_rx_bytes, + unsigned int total_rx_packets) +{ + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->stats.packets += total_rx_packets; + rx_ring->stats.bytes += total_rx_bytes; + u64_stats_update_end(&rx_ring->syncp); + rx_ring->q_vector->rx.total_packets += total_rx_packets; + rx_ring->q_vector->rx.total_bytes += total_rx_bytes; +} + +/** + * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map + * @rx_ring: Rx ring + * @xdp_res: Result of the receive batch + * + * This function bumps XDP Tx tail and/or flush redirect map, and + * should be called when a batch of packets has been processed in the + * napi loop. + **/ +void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res) +{ + if (xdp_res & I40E_XDP_REDIR) + xdp_do_flush_map(); + + if (xdp_res & I40E_XDP_TX) { + struct i40e_ring *xdp_ring = + rx_ring->vsi->xdp_rings[rx_ring->queue_index]; + + i40e_xdp_ring_update_tail(xdp_ring); + } +} + +/** * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -2349,11 +2370,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) */ dma_rmb(); - if (unlikely(i40e_rx_is_programming_status(qword))) { - i40e_clean_programming_status(rx_ring, rx_desc, qword); + rx_buffer = i40e_clean_programming_status(rx_ring, rx_desc, + qword); + if (unlikely(rx_buffer)) { + i40e_reuse_rx_page(rx_ring, rx_buffer); cleaned_count++; continue; } + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> I40E_RXD_QW1_LENGTH_PBUF_SHIFT; if (!size) @@ -2432,24 +2456,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) total_rx_packets++; } - if (xdp_xmit & I40E_XDP_REDIR) - xdp_do_flush_map(); - - if (xdp_xmit & I40E_XDP_TX) { - struct i40e_ring *xdp_ring = - rx_ring->vsi->xdp_rings[rx_ring->queue_index]; - - i40e_xdp_ring_update_tail(xdp_ring); - } - + i40e_finalize_xdp_rx(rx_ring, xdp_xmit); rx_ring->skb = skb; - u64_stats_update_begin(&rx_ring->syncp); - rx_ring->stats.packets += total_rx_packets; - rx_ring->stats.bytes += total_rx_bytes; - u64_stats_update_end(&rx_ring->syncp); - rx_ring->q_vector->rx.total_packets += total_rx_packets; - rx_ring->q_vector->rx.total_bytes += total_rx_bytes; + i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : (int)total_rx_packets; @@ -2587,7 +2597,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - if (!i40e_clean_tx_irq(vsi, ring, budget)) { + bool wd = ring->xsk_umem ? + i40e_clean_xdp_tx_irq(vsi, ring, budget) : + i40e_clean_tx_irq(vsi, ring, budget); + + if (!wd) { clean_complete = false; continue; } @@ -2605,7 +2619,9 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) budget_per_ring = max(budget/q_vector->num_ringpairs, 1); i40e_for_each_ring(ring, q_vector->rx) { - int cleaned = i40e_clean_rx_irq(ring, budget_per_ring); + int cleaned = ring->xsk_umem ? + i40e_clean_rx_irq_zc(ring, budget_per_ring) : + i40e_clean_rx_irq(ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index bb04f6a731fe..100e92d2982f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -296,13 +296,17 @@ struct i40e_tx_buffer { struct i40e_rx_buffer { dma_addr_t dma; - struct page *page; -#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) - __u32 page_offset; -#else - __u16 page_offset; -#endif - __u16 pagecnt_bias; + union { + struct { + struct page *page; + __u32 page_offset; + __u16 pagecnt_bias; + }; + struct { + void *addr; + u64 handle; + }; + }; }; struct i40e_queue_stats { @@ -414,6 +418,8 @@ struct i40e_ring { struct i40e_channel *ch; struct xdp_rxq_info xdp_rxq; + struct xdp_umem *xsk_umem; + struct zero_copy_allocator zca; /* ZC allocator anchor */ } ____cacheline_internodealigned_in_smp; static inline bool ring_uses_build_skb(struct i40e_ring *ring) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h new file mode 100644 index 000000000000..b5afd479a9c5 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Intel Corporation. */ + +#ifndef I40E_TXRX_COMMON_ +#define I40E_TXRX_COMMON_ + +void i40e_fd_handle_status(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, u8 prog_id); +int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring); +struct i40e_rx_buffer *i40e_clean_programming_status( + struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, + u64 qw); +void i40e_process_skb_fields(struct i40e_ring *rx_ring, + union i40e_rx_desc *rx_desc, struct sk_buff *skb, + u8 rx_ptype); +void i40e_receive_skb(struct i40e_ring *rx_ring, + struct sk_buff *skb, u16 vlan_tag); +void i40e_xdp_ring_update_tail(struct i40e_ring *xdp_ring); +void i40e_update_rx_stats(struct i40e_ring *rx_ring, + unsigned int total_rx_bytes, + unsigned int total_rx_packets); +void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res); +void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val); + +#define I40E_XDP_PASS 0 +#define I40E_XDP_CONSUMED BIT(0) +#define I40E_XDP_TX BIT(1) +#define I40E_XDP_REDIR BIT(2) + +/** + * build_ctob - Builds the Tx descriptor (cmd, offset and type) qword + **/ +static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, + u32 td_tag) +{ + return cpu_to_le64(I40E_TX_DESC_DTYPE_DATA | + ((u64)td_cmd << I40E_TXD_QW1_CMD_SHIFT) | + ((u64)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) | + ((u64)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) | + ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); +} + +/** + * i40e_update_tx_stats - Update the egress statistics for the Tx ring + * @tx_ring: Tx ring to update + * @total_packet: total packets sent + * @total_bytes: total bytes sent + **/ +static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring, + unsigned int total_packets, + unsigned int total_bytes) +{ + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->stats.bytes += total_bytes; + tx_ring->stats.packets += total_packets; + u64_stats_update_end(&tx_ring->syncp); + tx_ring->q_vector->tx.total_bytes += total_bytes; + tx_ring->q_vector->tx.total_packets += total_packets; +} + +#define WB_STRIDE 4 + +/** + * i40e_arm_wb - (Possibly) arms Tx write-back + * @tx_ring: Tx ring to update + * @vsi: the VSI + * @budget: the NAPI budget left + **/ +static inline void i40e_arm_wb(struct i40e_ring *tx_ring, + struct i40e_vsi *vsi, + int budget) +{ + if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + /* check to see if there are < 4 descriptors + * waiting to be written back, then kick the hardware to force + * them to be written back in case we stay in NAPI. + * In this mode on X722 we do not enable Interrupt. + */ + unsigned int j = i40e_get_tx_pending(tx_ring, false); + + if (budget && + ((j / WB_STRIDE) == 0) && j > 0 && + !test_bit(__I40E_VSI_DOWN, vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + } +} + +#endif /* I40E_TXRX_COMMON_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index c6d24eaede18..3e707c7e6782 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2458,7 +2458,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !is_multicast_ether_addr(addr) && vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, - "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); return -EPERM; } } @@ -2569,6 +2569,16 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ret = I40E_ERR_INVALID_MAC_ADDR; goto error_param; } + + if (vf->pf_set_mac && + ether_addr_equal(al->list[i].addr, + vf->default_lan_addr.addr)) { + dev_err(&pf->pdev->dev, + "MAC addr %pM has been set by PF, cannot delete it for VF %d, reset VF to change MAC addr\n", + vf->default_lan_addr.addr, vf->vf_id); + ret = I40E_ERR_PARAM; + goto error_param; + } } vsi = pf->vsi[vf->lan_vsi_idx]; @@ -3873,9 +3883,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) mac, vf_id); } - /* Force the VF driver stop so it has to reload with new MAC address */ + /* Force the VF interface down so it has to bring up with new MAC + * address + */ i40e_vc_disable_vf(vf); - dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); + dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); error_param: return ret; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c new file mode 100644 index 000000000000..2ebfc78bbd09 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -0,0 +1,832 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Intel Corporation. */ + +#include <linux/bpf_trace.h> +#include <net/xdp_sock.h> +#include <net/xdp.h> + +#include "i40e.h" +#include "i40e_txrx_common.h" +#include "i40e_xsk.h" + +/** + * i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs + * @vsi: Current VSI + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi) +{ + if (vsi->xsk_umems) + return 0; + + vsi->num_xsk_umems_used = 0; + vsi->num_xsk_umems = vsi->alloc_queue_pairs; + vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems), + GFP_KERNEL); + if (!vsi->xsk_umems) { + vsi->num_xsk_umems = 0; + return -ENOMEM; + } + + return 0; +} + +/** + * i40e_add_xsk_umem - Store an UMEM for a certain ring/qid + * @vsi: Current VSI + * @umem: UMEM to store + * @qid: Ring/qid to associate with the UMEM + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid) +{ + int err; + + err = i40e_alloc_xsk_umems(vsi); + if (err) + return err; + + vsi->xsk_umems[qid] = umem; + vsi->num_xsk_umems_used++; + + return 0; +} + +/** + * i40e_remove_xsk_umem - Remove an UMEM for a certain ring/qid + * @vsi: Current VSI + * @qid: Ring/qid associated with the UMEM + **/ +static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid) +{ + vsi->xsk_umems[qid] = NULL; + vsi->num_xsk_umems_used--; + + if (vsi->num_xsk_umems == 0) { + kfree(vsi->xsk_umems); + vsi->xsk_umems = NULL; + vsi->num_xsk_umems = 0; + } +} + +/** + * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev + * @vsi: Current VSI + * @umem: UMEM to DMA map + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem) +{ + struct i40e_pf *pf = vsi->back; + struct device *dev; + unsigned int i, j; + dma_addr_t dma; + + dev = &pf->pdev->dev; + for (i = 0; i < umem->npgs; i++) { + dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE, + DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); + if (dma_mapping_error(dev, dma)) + goto out_unmap; + + umem->pages[i].dma = dma; + } + + return 0; + +out_unmap: + for (j = 0; j < i; j++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); + umem->pages[i].dma = 0; + } + + return -1; +} + +/** + * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev + * @vsi: Current VSI + * @umem: UMEM to DMA map + **/ +static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem) +{ + struct i40e_pf *pf = vsi->back; + struct device *dev; + unsigned int i; + + dev = &pf->pdev->dev; + + for (i = 0; i < umem->npgs; i++) { + dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE, + DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR); + + umem->pages[i].dma = 0; + } +} + +/** + * i40e_xsk_umem_enable - Enable/associate an UMEM to a certain ring/qid + * @vsi: Current VSI + * @umem: UMEM + * @qid: Rx ring to associate UMEM to + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid) +{ + bool if_running; + int err; + + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + if (qid >= vsi->num_queue_pairs) + return -EINVAL; + + if (vsi->xsk_umems) { + if (qid >= vsi->num_xsk_umems) + return -EINVAL; + if (vsi->xsk_umems[qid]) + return -EBUSY; + } + + err = i40e_xsk_umem_dma_map(vsi, umem); + if (err) + return err; + + if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); + + if (if_running) { + err = i40e_queue_pair_disable(vsi, qid); + if (err) + return err; + } + + err = i40e_add_xsk_umem(vsi, umem, qid); + if (err) + return err; + + if (if_running) { + err = i40e_queue_pair_enable(vsi, qid); + if (err) + return err; + } + + return 0; +} + +/** + * i40e_xsk_umem_disable - Diassociate an UMEM from a certain ring/qid + * @vsi: Current VSI + * @qid: Rx ring to associate UMEM to + * + * Returns 0 on success, <0 on failure + **/ +static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid) +{ + bool if_running; + int err; + + if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems || + !vsi->xsk_umems[qid]) + return -EINVAL; + + if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); + + if (if_running) { + err = i40e_queue_pair_disable(vsi, qid); + if (err) + return err; + } + + i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]); + i40e_remove_xsk_umem(vsi, qid); + + if (if_running) { + err = i40e_queue_pair_enable(vsi, qid); + if (err) + return err; + } + + return 0; +} + +/** + * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM + * @vsi: Current VSI + * @umem: UMEM associated to the ring, if any + * @qid: Rx ring to associate UMEM to + * + * This function will store, if any, the UMEM associated to certain ring. + * + * Returns 0 on success, <0 on failure + **/ +int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, + u16 qid) +{ + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + + if (qid >= vsi->num_queue_pairs) + return -EINVAL; + + if (vsi->xsk_umems) { + if (qid >= vsi->num_xsk_umems) + return -EINVAL; + *umem = vsi->xsk_umems[qid]; + return 0; + } + + *umem = NULL; + return 0; +} + +/** + * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM + * @vsi: Current VSI + * @umem: UMEM to enable/associate to a ring, or NULL to disable + * @qid: Rx ring to (dis)associate UMEM (from)to + * + * This function enables or disables an UMEM to a certain ring. + * + * Returns 0 on success, <0 on failure + **/ +int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid) +{ + return umem ? i40e_xsk_umem_enable(vsi, umem, qid) : + i40e_xsk_umem_disable(vsi, qid); +} + +/** + * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff + * @rx_ring: Rx ring + * @xdp: xdp_buff used as input to the XDP program + * + * This function enables or disables an UMEM to a certain ring. + * + * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} + **/ +static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) +{ + int err, result = I40E_XDP_PASS; + struct i40e_ring *xdp_ring; + struct bpf_prog *xdp_prog; + u32 act; + + rcu_read_lock(); + /* NB! xdp_prog will always be !NULL, due to the fact that + * this path is enabled by setting an XDP program. + */ + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); + xdp->handle += xdp->data - xdp->data_hard_start; + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); + result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; + break; + default: + bpf_warn_invalid_xdp_action(act); + case XDP_ABORTED: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + /* fallthrough -- handle aborts by dropping packet */ + case XDP_DROP: + result = I40E_XDP_CONSUMED; + break; + } + rcu_read_unlock(); + return result; +} + +/** + * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer + * @rx_ring: Rx ring + * @bi: Rx buffer to populate + * + * This function allocates an Rx buffer. The buffer can come from fill + * queue, or via the recycle queue (next_to_alloc). + * + * Returns true for a successful allocation, false otherwise + **/ +static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi) +{ + struct xdp_umem *umem = rx_ring->xsk_umem; + void *addr = bi->addr; + u64 handle, hr; + + if (addr) { + rx_ring->rx_stats.page_reuse_count++; + return true; + } + + if (!xsk_umem_peek_addr(umem, &handle)) { + rx_ring->rx_stats.alloc_page_failed++; + return false; + } + + hr = umem->headroom + XDP_PACKET_HEADROOM; + + bi->dma = xdp_umem_get_dma(umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(umem, handle); + bi->addr += hr; + + bi->handle = handle + umem->headroom; + + xsk_umem_discard_addr(umem); + return true; +} + +/** + * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers + * @rx_ring: Rx ring + * @count: The number of buffers to allocate + * + * This function allocates a number of Rx buffers and places them on + * the Rx ring. + * + * Returns true for a successful allocation, false otherwise + **/ +bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count) +{ + u16 ntu = rx_ring->next_to_use; + union i40e_rx_desc *rx_desc; + struct i40e_rx_buffer *bi; + bool ok = true; + + rx_desc = I40E_RX_DESC(rx_ring, ntu); + bi = &rx_ring->rx_bi[ntu]; + do { + if (!i40e_alloc_buffer_zc(rx_ring, bi)) { + ok = false; + goto no_buffers; + } + + dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0, + rx_ring->rx_buf_len, + DMA_BIDIRECTIONAL); + + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); + + rx_desc++; + bi++; + ntu++; + + if (unlikely(ntu == rx_ring->count)) { + rx_desc = I40E_RX_DESC(rx_ring, 0); + bi = rx_ring->rx_bi; + ntu = 0; + } + + rx_desc->wb.qword1.status_error_len = 0; + count--; + } while (count); + +no_buffers: + if (rx_ring->next_to_use != ntu) + i40e_release_rx_desc(rx_ring, ntu); + + return ok; +} + +/** + * i40e_get_rx_buffer_zc - Return the current Rx buffer + * @rx_ring: Rx ring + * @size: The size of the rx buffer (read from descriptor) + * + * This function returns the current, received Rx buffer, and also + * does DMA synchronization. the Rx ring. + * + * Returns the received Rx buffer + **/ +static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring, + const unsigned int size) +{ + struct i40e_rx_buffer *bi; + + bi = &rx_ring->rx_bi[rx_ring->next_to_clean]; + + /* we are reusing so sync this buffer for CPU use */ + dma_sync_single_range_for_cpu(rx_ring->dev, + bi->dma, 0, + size, + DMA_BIDIRECTIONAL); + + return bi; +} + +/** + * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer + * @rx_ring: Rx ring + * @old_bi: The Rx buffer to recycle + * + * This function recycles a finished Rx buffer, and places it on the + * recycle queue (next_to_alloc). + **/ +static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *old_bi) +{ + struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; + unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; + u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + u16 nta = rx_ring->next_to_alloc; + + /* update, and store next to alloc */ + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + /* transfer page from old buffer to new buffer */ + new_bi->dma = old_bi->dma & mask; + new_bi->dma += hr; + + new_bi->addr = (void *)((unsigned long)old_bi->addr & mask); + new_bi->addr += hr; + + new_bi->handle = old_bi->handle & mask; + new_bi->handle += rx_ring->xsk_umem->headroom; + + old_bi->addr = NULL; +} + +/** + * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations + * @alloc: Zero-copy allocator + * @handle: Buffer handle + **/ +void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle) +{ + struct i40e_rx_buffer *bi; + struct i40e_ring *rx_ring; + u64 hr, mask; + u16 nta; + + rx_ring = container_of(alloc, struct i40e_ring, zca); + hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; + mask = rx_ring->xsk_umem->chunk_mask; + + nta = rx_ring->next_to_alloc; + bi = &rx_ring->rx_bi[nta]; + + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + + handle &= mask; + + bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle); + bi->dma += hr; + + bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); + bi->addr += hr; + + bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; +} + +/** + * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer + * @rx_ring: Rx ring + * @bi: Rx buffer + * @xdp: xdp_buff + * + * This functions allocates a new skb from a zero-copy Rx buffer. + * + * Returns the skb, or NULL on failure. + **/ +static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, + struct i40e_rx_buffer *bi, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + unsigned int datasize = xdp->data_end - xdp->data; + struct sk_buff *skb; + + /* allocate a skb to store the frags */ + skb = __napi_alloc_skb(&rx_ring->q_vector->napi, + xdp->data_end - xdp->data_hard_start, + GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!skb)) + return NULL; + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + memcpy(__skb_put(skb, datasize), xdp->data, datasize); + if (metasize) + skb_metadata_set(skb, metasize); + + i40e_reuse_rx_buffer_zc(rx_ring, bi); + return skb; +} + +/** + * i40e_inc_ntc: Advance the next_to_clean index + * @rx_ring: Rx ring + **/ +static void i40e_inc_ntc(struct i40e_ring *rx_ring) +{ + u32 ntc = rx_ring->next_to_clean + 1; + + ntc = (ntc < rx_ring->count) ? ntc : 0; + rx_ring->next_to_clean = ntc; + prefetch(I40E_RX_DESC(rx_ring, ntc)); +} + +/** + * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring + * @rx_ring: Rx ring + * @budget: NAPI budget + * + * Returns amount of work completed + **/ +int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) +{ + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + unsigned int xdp_res, xdp_xmit = 0; + bool failure = false; + struct sk_buff *skb; + struct xdp_buff xdp; + + xdp.rxq = &rx_ring->xdp_rxq; + + while (likely(total_rx_packets < (unsigned int)budget)) { + struct i40e_rx_buffer *bi; + union i40e_rx_desc *rx_desc; + unsigned int size; + u16 vlan_tag; + u8 rx_ptype; + u64 qword; + + if (cleaned_count >= I40E_RX_BUFFER_WRITE) { + failure = failure || + !i40e_alloc_rx_buffers_zc(rx_ring, + cleaned_count); + cleaned_count = 0; + } + + rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + + /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we have + * verified the descriptor has been written back. + */ + dma_rmb(); + + bi = i40e_clean_programming_status(rx_ring, rx_desc, + qword); + if (unlikely(bi)) { + i40e_reuse_rx_buffer_zc(rx_ring, bi); + cleaned_count++; + continue; + } + + size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> + I40E_RXD_QW1_LENGTH_PBUF_SHIFT; + if (!size) + break; + + bi = i40e_get_rx_buffer_zc(rx_ring, size); + xdp.data = bi->addr; + xdp.data_meta = xdp.data; + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp.data_end = xdp.data + size; + xdp.handle = bi->handle; + + xdp_res = i40e_run_xdp_zc(rx_ring, &xdp); + if (xdp_res) { + if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) { + xdp_xmit |= xdp_res; + bi->addr = NULL; + } else { + i40e_reuse_rx_buffer_zc(rx_ring, bi); + } + + total_rx_bytes += size; + total_rx_packets++; + + cleaned_count++; + i40e_inc_ntc(rx_ring); + continue; + } + + /* XDP_PASS path */ + + /* NB! We are not checking for errors using + * i40e_test_staterr with + * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that + * SBP is *not* set in PRT_SBPVSI (default not set). + */ + skb = i40e_construct_skb_zc(rx_ring, bi, &xdp); + if (!skb) { + rx_ring->rx_stats.alloc_buff_failed++; + break; + } + + cleaned_count++; + i40e_inc_ntc(rx_ring); + + if (eth_skb_pad(skb)) + continue; + + total_rx_bytes += skb->len; + total_rx_packets++; + + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT; + i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); + + vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ? + le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0; + i40e_receive_skb(rx_ring, skb, vlan_tag); + } + + i40e_finalize_xdp_rx(rx_ring, xdp_xmit); + i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); + return failure ? budget : (int)total_rx_packets; +} + +/** + * i40e_xmit_zc - Performs zero-copy Tx AF_XDP + * @xdp_ring: XDP Tx ring + * @budget: NAPI budget + * + * Returns true if the work is finished. + **/ +static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) +{ + struct i40e_tx_desc *tx_desc = NULL; + struct i40e_tx_buffer *tx_bi; + bool work_done = true; + dma_addr_t dma; + u32 len; + + while (budget-- > 0) { + if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { + xdp_ring->tx_stats.tx_busy++; + work_done = false; + break; + } + + if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) + break; + + dma_sync_single_for_device(xdp_ring->dev, dma, len, + DMA_BIDIRECTIONAL); + + tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; + tx_bi->bytecount = len; + + tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use); + tx_desc->buffer_addr = cpu_to_le64(dma); + tx_desc->cmd_type_offset_bsz = + build_ctob(I40E_TX_DESC_CMD_ICRC + | I40E_TX_DESC_CMD_EOP, + 0, len, 0); + + xdp_ring->next_to_use++; + if (xdp_ring->next_to_use == xdp_ring->count) + xdp_ring->next_to_use = 0; + } + + if (tx_desc) { + /* Request an interrupt for the last frame and bump tail ptr. */ + tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << + I40E_TXD_QW1_CMD_SHIFT); + i40e_xdp_ring_update_tail(xdp_ring); + + xsk_umem_consume_tx_done(xdp_ring->xsk_umem); + } + + return !!budget && work_done; +} + +/** + * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry + * @tx_ring: XDP Tx ring + * @tx_bi: Tx buffer info to clean + **/ +static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring, + struct i40e_tx_buffer *tx_bi) +{ + xdp_return_frame(tx_bi->xdpf); + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_bi, dma), + dma_unmap_len(tx_bi, len), DMA_TO_DEVICE); + dma_unmap_len_set(tx_bi, len, 0); +} + +/** + * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries + * @tx_ring: XDP Tx ring + * @tx_bi: Tx buffer info to clean + * + * Returns true if cleanup/tranmission is done. + **/ +bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget) +{ + unsigned int ntc, total_bytes = 0, budget = vsi->work_limit; + u32 i, completed_frames, frames_ready, xsk_frames = 0; + struct xdp_umem *umem = tx_ring->xsk_umem; + u32 head_idx = i40e_get_head(tx_ring); + bool work_done = true, xmit_done; + struct i40e_tx_buffer *tx_bi; + + if (head_idx < tx_ring->next_to_clean) + head_idx += tx_ring->count; + frames_ready = head_idx - tx_ring->next_to_clean; + + if (frames_ready == 0) { + goto out_xmit; + } else if (frames_ready > budget) { + completed_frames = budget; + work_done = false; + } else { + completed_frames = frames_ready; + } + + ntc = tx_ring->next_to_clean; + + for (i = 0; i < completed_frames; i++) { + tx_bi = &tx_ring->tx_bi[ntc]; + + if (tx_bi->xdpf) + i40e_clean_xdp_tx_buffer(tx_ring, tx_bi); + else + xsk_frames++; + + tx_bi->xdpf = NULL; + total_bytes += tx_bi->bytecount; + + if (++ntc >= tx_ring->count) + ntc = 0; + } + + tx_ring->next_to_clean += completed_frames; + if (unlikely(tx_ring->next_to_clean >= tx_ring->count)) + tx_ring->next_to_clean -= tx_ring->count; + + if (xsk_frames) + xsk_umem_complete_tx(umem, xsk_frames); + + i40e_arm_wb(tx_ring, vsi, budget); + i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); + +out_xmit: + xmit_done = i40e_xmit_zc(tx_ring, budget); + + return work_done && xmit_done; +} + +/** + * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit + * @dev: the netdevice + * @queue_id: queue id to wake up + * + * Returns <0 for errors, 0 otherwise. + **/ +int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_ring *ring; + + if (test_bit(__I40E_VSI_DOWN, vsi->state)) + return -ENETDOWN; + + if (!i40e_enabled_xdp_vsi(vsi)) + return -ENXIO; + + if (queue_id >= vsi->num_queue_pairs) + return -ENXIO; + + if (!vsi->xdp_rings[queue_id]->xsk_umem) + return -ENXIO; + + ring = vsi->xdp_rings[queue_id]; + + /* The idea here is that if NAPI is running, mark a miss, so + * it will run again. If not, trigger an interrupt and + * schedule the NAPI from interrupt context. If NAPI would be + * scheduled here, the interrupt affinity would not be + * honored. + */ + if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) + i40e_force_wb(vsi, ring->q_vector); + + return 0; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h new file mode 100644 index 000000000000..9038c5d5cf08 --- /dev/null +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Intel Corporation. */ + +#ifndef _I40E_XSK_H_ +#define _I40E_XSK_H_ + +struct i40e_vsi; +struct xdp_umem; +struct zero_copy_allocator; + +int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair); +int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair); +int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem, + u16 qid); +int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem, + u16 qid); +void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle); +bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count); +int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); + +bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, + struct i40e_ring *tx_ring, int napi_budget); +int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id); + +#endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 69efe0aec76a..50f65ab737c9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -6,18 +6,229 @@ #include <linux/uaccess.h> -struct i40evf_stats { +/* ethtool statistics helpers */ + +/** + * struct i40e_stats - definition for an ethtool statistic + * @stat_string: statistic name to display in ethtool -S output + * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64) + * @stat_offset: offsetof() the stat from a base pointer + * + * This structure defines a statistic to be added to the ethtool stats buffer. + * It defines a statistic as offset from a common base pointer. Stats should + * be defined in constant arrays using the I40E_STAT macro, with every element + * of the array using the same _type for calculating the sizeof_stat and + * stat_offset. + * + * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or + * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from + * the i40e_add_ethtool_stat() helper function. + * + * The @stat_string is interpreted as a format string, allowing formatted + * values to be inserted while looping over multiple structures for a given + * statistics array. Thus, every statistic string in an array should have the + * same type and number of format specifiers, to be formatted by variadic + * arguments to the i40e_add_stat_string() helper function. + **/ +struct i40e_stats { char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; int stat_offset; }; -#define I40EVF_STAT(_name, _stat) { \ +/* Helper macro to define an i40e_stat structure with proper size and type. + * Use this when defining constant statistics arrays. Note that @_type expects + * only a type name and is used multiple times. + */ +#define I40E_STAT(_type, _name, _stat) { \ .stat_string = _name, \ - .stat_offset = offsetof(struct i40evf_adapter, _stat) \ + .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ + .stat_offset = offsetof(_type, _stat) \ +} + +/* Helper macro for defining some statistics directly copied from the netdev + * stats structure. + */ +#define I40E_NETDEV_STAT(_net_stat) \ + I40E_STAT(struct rtnl_link_stats64, #_net_stat, _net_stat) + +/* Helper macro for defining some statistics related to queues */ +#define I40E_QUEUE_STAT(_name, _stat) \ + I40E_STAT(struct i40e_ring, _name, _stat) + +/* Stats associated with a Tx or Rx ring */ +static const struct i40e_stats i40e_gstrings_queue_stats[] = { + I40E_QUEUE_STAT("%s-%u.packets", stats.packets), + I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes), +}; + +/** + * i40evf_add_one_ethtool_stat - copy the stat into the supplied buffer + * @data: location to store the stat value + * @pointer: basis for where to copy from + * @stat: the stat definition + * + * Copies the stat data defined by the pointer and stat structure pair into + * the memory supplied as data. Used to implement i40e_add_ethtool_stats and + * i40evf_add_queue_stats. If the pointer is null, data will be zero'd. + */ +static void +i40evf_add_one_ethtool_stat(u64 *data, void *pointer, + const struct i40e_stats *stat) +{ + char *p; + + if (!pointer) { + /* ensure that the ethtool data buffer is zero'd for any stats + * which don't have a valid pointer. + */ + *data = 0; + return; + } + + p = (char *)pointer + stat->stat_offset; + switch (stat->sizeof_stat) { + case sizeof(u64): + *data = *((u64 *)p); + break; + case sizeof(u32): + *data = *((u32 *)p); + break; + case sizeof(u16): + *data = *((u16 *)p); + break; + case sizeof(u8): + *data = *((u8 *)p); + break; + default: + WARN_ONCE(1, "unexpected stat size for %s", + stat->stat_string); + *data = 0; + } +} + +/** + * __i40evf_add_ethtool_stats - copy stats into the ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location to copy stats from + * @stats: array of stats to copy + * @size: the size of the stats definition + * + * Copy the stats defined by the stats array using the pointer as a base into + * the data buffer supplied by ethtool. Updates the data pointer to point to + * the next empty location for successive calls to __i40evf_add_ethtool_stats. + * If pointer is null, set the data values to zero and update the pointer to + * skip these stats. + **/ +static void +__i40evf_add_ethtool_stats(u64 **data, void *pointer, + const struct i40e_stats stats[], + const unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) + i40evf_add_one_ethtool_stat((*data)++, pointer, &stats[i]); +} + +/** + * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer + * @data: ethtool stats buffer + * @pointer: location where stats are stored + * @stats: static const array of stat definitions + * + * Macro to ease the use of __i40evf_add_ethtool_stats by taking a static + * constant stats array and passing the ARRAY_SIZE(). This avoids typos by + * ensuring that we pass the size associated with the given stats array. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. + **/ +#define i40e_add_ethtool_stats(data, pointer, stats) \ + __i40evf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats)) + +/** + * i40evf_add_queue_stats - copy queue statistics into supplied buffer + * @data: ethtool stats buffer + * @ring: the ring to copy + * + * Queue statistics must be copied while protected by + * u64_stats_fetch_begin_irq, so we can't directly use i40e_add_ethtool_stats. + * Assumes that queue stats are defined in i40e_gstrings_queue_stats. If the + * ring pointer is null, zero out the queue stat values and update the data + * pointer. Otherwise safely copy the stats from the ring into the supplied + * buffer and update the data pointer when finished. + * + * This function expects to be called while under rcu_read_lock(). + **/ +static void +i40evf_add_queue_stats(u64 **data, struct i40e_ring *ring) +{ + const unsigned int size = ARRAY_SIZE(i40e_gstrings_queue_stats); + const struct i40e_stats *stats = i40e_gstrings_queue_stats; + unsigned int start; + unsigned int i; + + /* To avoid invalid statistics values, ensure that we keep retrying + * the copy until we get a consistent value according to + * u64_stats_fetch_retry_irq. But first, make sure our ring is + * non-null before attempting to access its syncp. + */ + do { + start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp); + for (i = 0; i < size; i++) { + i40evf_add_one_ethtool_stat(&(*data)[i], ring, + &stats[i]); + } + } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* Once we successfully copy the stats in, update the data pointer */ + *data += size; +} + +/** + * __i40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * @size: size of the stats array + * + * Format and copy the strings described by stats into the buffer pointed at + * by p. + **/ +static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[], + const unsigned int size, ...) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + va_list args; + + va_start(args, size); + vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args); + *p += ETH_GSTRING_LEN; + va_end(args); + } } -/* All stats are u64, so we don't need to track the size of the field. */ -static const struct i40evf_stats i40evf_gstrings_stats[] = { +/** + * 40e_add_stat_strings - copy stat strings into ethtool buffer + * @p: ethtool supplied buffer + * @stats: stat definitions array + * + * Format and copy the strings described by the const static stats value into + * the buffer pointed at by p. + * + * The parameter @stats is evaluated twice, so parameters with side effects + * should be avoided. Additionally, stats must be an array such that + * ARRAY_SIZE can be called on it. + **/ +#define i40e_add_stat_strings(p, stats, ...) \ + __i40e_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__) + +#define I40EVF_STAT(_name, _stat) \ + I40E_STAT(struct i40evf_adapter, _name, _stat) + +static const struct i40e_stats i40evf_gstrings_stats[] = { I40EVF_STAT("rx_bytes", current_stats.rx_bytes), I40EVF_STAT("rx_unicast", current_stats.rx_unicast), I40EVF_STAT("rx_multicast", current_stats.rx_multicast), @@ -32,13 +243,9 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { I40EVF_STAT("tx_errors", current_stats.tx_errors), }; -#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) -#define I40EVF_QUEUE_STATS_LEN(_dev) \ - (((struct i40evf_adapter *)\ - netdev_priv(_dev))->num_active_queues \ - * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64))) -#define I40EVF_STATS_LEN(_dev) \ - (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) +#define I40EVF_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats) + +#define I40EVF_QUEUE_STATS_LEN ARRAY_SIZE(i40e_gstrings_queue_stats) /* For now we have one and only one private flag and it is only defined * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead @@ -117,13 +324,13 @@ static int i40evf_get_link_ksettings(struct net_device *netdev, * @netdev: network interface device structure * @sset: id of string set * - * Reports size of string table. This driver only supports - * strings for statistics. + * Reports size of various string tables. **/ static int i40evf_get_sset_count(struct net_device *netdev, int sset) { if (sset == ETH_SS_STATS) - return I40EVF_STATS_LEN(netdev); + return I40EVF_STATS_LEN + + (I40EVF_QUEUE_STATS_LEN * 2 * I40EVF_MAX_REQ_QUEUES); else if (sset == ETH_SS_PRIV_FLAGS) return I40EVF_PRIV_FLAGS_STR_LEN; else @@ -142,20 +349,66 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct i40evf_adapter *adapter = netdev_priv(netdev); - unsigned int i, j; - char *p; + unsigned int i; + + i40e_add_ethtool_stats(&data, adapter, i40evf_gstrings_stats); - for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { - p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset; - data[i] = *(u64 *)p; + rcu_read_lock(); + for (i = 0; i < I40EVF_MAX_REQ_QUEUES; i++) { + struct i40e_ring *ring; + + /* Avoid accessing un-allocated queues */ + ring = (i < adapter->num_active_queues ? + &adapter->tx_rings[i] : NULL); + i40evf_add_queue_stats(&data, ring); + + /* Avoid accessing un-allocated queues */ + ring = (i < adapter->num_active_queues ? + &adapter->rx_rings[i] : NULL); + i40evf_add_queue_stats(&data, ring); } - for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->tx_rings[j].stats.packets; - data[i++] = adapter->tx_rings[j].stats.bytes; + rcu_read_unlock(); +} + +/** + * i40evf_get_priv_flag_strings - Get private flag strings + * @netdev: network interface device structure + * @data: buffer for string data + * + * Builds the private flags string table + **/ +static void i40evf_get_priv_flag_strings(struct net_device *netdev, u8 *data) +{ + unsigned int i; + + for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { + snprintf(data, ETH_GSTRING_LEN, "%s", + i40evf_gstrings_priv_flags[i].flag_string); + data += ETH_GSTRING_LEN; } - for (j = 0; j < adapter->num_active_queues; j++) { - data[i++] = adapter->rx_rings[j].stats.packets; - data[i++] = adapter->rx_rings[j].stats.bytes; +} + +/** + * i40evf_get_stat_strings - Get stat strings + * @netdev: network interface device structure + * @data: buffer for string data + * + * Builds the statistics string table + **/ +static void i40evf_get_stat_strings(struct net_device *netdev, u8 *data) +{ + unsigned int i; + + i40e_add_stat_strings(&data, i40evf_gstrings_stats); + + /* Queues are always allocated in pairs, so we just use num_tx_queues + * for both Tx and Rx queues. + */ + for (i = 0; i < netdev->num_tx_queues; i++) { + i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, + "tx", i); + i40e_add_stat_strings(&data, i40e_gstrings_queue_stats, + "rx", i); } } @@ -165,38 +418,19 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, * @sset: id of string set * @data: buffer for string data * - * Builds stats string table. + * Builds string tables for various string sets **/ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) { - struct i40evf_adapter *adapter = netdev_priv(netdev); - u8 *p = data; - int i; - - if (sset == ETH_SS_STATS) { - for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) { - memcpy(p, i40evf_gstrings_stats[i].stat_string, - ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_active_queues; i++) { - snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i); - p += ETH_GSTRING_LEN; - } - for (i = 0; i < adapter->num_active_queues; i++) { - snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i); - p += ETH_GSTRING_LEN; - snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); - p += ETH_GSTRING_LEN; - } - } else if (sset == ETH_SS_PRIV_FLAGS) { - for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { - snprintf(p, ETH_GSTRING_LEN, "%s", - i40evf_gstrings_priv_flags[i].flag_string); - p += ETH_GSTRING_LEN; - } + switch (sset) { + case ETH_SS_STATS: + i40evf_get_stat_strings(netdev, data); + break; + case ETH_SS_PRIV_FLAGS: + i40evf_get_priv_flag_strings(netdev, data); + break; + default: + break; } } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5906c1c1d19d..174d1da2857b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -3120,18 +3120,19 @@ static int i40evf_set_features(struct net_device *netdev, { struct i40evf_adapter *adapter = netdev_priv(netdev); - /* Don't allow changing VLAN_RX flag when VLAN is set for VF - * and return an error in this case + /* Don't allow changing VLAN_RX flag when adapter is not capable + * of VLAN offload */ - if (VLAN_ALLOWED(adapter)) { + if (!VLAN_ALLOWED(adapter)) { + if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) + return -EINVAL; + } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { if (features & NETIF_F_HW_VLAN_CTAG_RX) adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING; else adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING; - } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) { - return -EINVAL; } return 0; @@ -3358,6 +3359,8 @@ int i40evf_process_config(struct i40evf_adapter *adapter) if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->priv_flags |= IFF_UNICAST_FLT; + /* Do not turn on offloads when they are requested to be turned off. * TSO needs minimum 576 bytes to work correctly. */ @@ -3907,6 +3910,8 @@ static void i40evf_remove(struct pci_dev *pdev) if (adapter->watchdog_timer.function) del_timer_sync(&adapter->watchdog_timer); + cancel_work_sync(&adapter->adminq_task); + i40evf_free_rss(adapter); if (hw->aq.asq.count) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 565677de5ba3..6579dabab78c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -154,6 +154,32 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) } /** + * i40evf_validate_num_queues + * @adapter: adapter structure + * + * Validate that the number of queues the PF has sent in + * VIRTCHNL_OP_GET_VF_RESOURCES is not larger than the VF can handle. + **/ +static void i40evf_validate_num_queues(struct i40evf_adapter *adapter) +{ + if (adapter->vf_res->num_queue_pairs > I40EVF_MAX_REQ_QUEUES) { + struct virtchnl_vsi_resource *vsi_res; + int i; + + dev_info(&adapter->pdev->dev, "Received %d queues, but can only have a max of %d\n", + adapter->vf_res->num_queue_pairs, + I40EVF_MAX_REQ_QUEUES); + dev_info(&adapter->pdev->dev, "Fixing by reducing queues to %d\n", + I40EVF_MAX_REQ_QUEUES); + adapter->vf_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES; + for (i = 0; i < adapter->vf_res->num_vsis; i++) { + vsi_res = &adapter->vf_res->vsi_res[i]; + vsi_res->num_queue_pairs = I40EVF_MAX_REQ_QUEUES; + } + } +} + +/** * i40evf_get_vf_config * @adapter: private adapter structure * @@ -195,6 +221,11 @@ int i40evf_get_vf_config(struct i40evf_adapter *adapter) err = (i40e_status)le32_to_cpu(event.desc.cookie_low); memcpy(adapter->vf_res, event.msg_buf, min(event.msg_len, len)); + /* some PFs send more queues than we should have so validate that + * we aren't getting too many queues + */ + if (!err) + i40evf_validate_num_queues(adapter); i40e_vf_parse_hw_config(hw, adapter->vf_res); out_alloc: kfree(event.msg_buf); @@ -1329,9 +1360,17 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource); memcpy(adapter->vf_res, msg, min(msglen, len)); + i40evf_validate_num_queues(adapter); i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); - /* restore current mac address */ - ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + if (is_zero_ether_addr(adapter->hw.mac.addr)) { + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + /* refresh current mac address if changed */ + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, + adapter->hw.mac.addr); + } i40evf_process_config(adapter); } break; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 868f4a1d0f72..9cf233d085d8 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -62,6 +62,7 @@ extern const char ice_drv_ver[]; #define ICE_RES_VALID_BIT 0x8000 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1) #define ICE_INVAL_Q_INDEX 0xffff +#define ICE_INVAL_VFID 256 #define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4) @@ -122,6 +123,7 @@ struct ice_sw { enum ice_state { __ICE_DOWN, __ICE_NEEDS_RESTART, + __ICE_PREPARED_FOR_RESET, /* set by driver when prepared */ __ICE_RESET_RECOVERY_PENDING, /* set by driver when reset starts */ __ICE_PFR_REQ, /* set by driver and peers */ __ICE_CORER_REQ, /* set by driver and peers */ @@ -132,9 +134,11 @@ enum ice_state { __ICE_SUSPENDED, /* set on module remove path */ __ICE_RESET_FAILED, /* set by reset/rebuild */ __ICE_ADMINQ_EVENT_PENDING, + __ICE_MDD_EVENT_PENDING, __ICE_FLTR_OVERFLOW_PROMISC, __ICE_CFG_BUSY, __ICE_SERVICE_SCHED, + __ICE_SERVICE_DIS, __ICE_STATE_NBITS /* must be last */ }; @@ -270,6 +274,9 @@ struct ice_pf { struct ice_hw_port_stats stats_prev; struct ice_hw hw; u8 stat_prev_loaded; /* has previous stats been loaded */ + u32 tx_timeout_count; + unsigned long tx_timeout_last_recovery; + u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; }; diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index a0614f472658..f8dfd675486c 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -443,6 +443,8 @@ struct ice_aqc_vsi_props { u8 reserved[24]; }; +#define ICE_MAX_NUM_RECIPES 64 + /* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3) */ struct ice_aqc_sw_rules { @@ -771,9 +773,8 @@ struct ice_aqc_layer_props { u8 chunk_size; __le16 max_device_nodes; __le16 max_pf_nodes; - u8 rsvd0[2]; - __le16 max_shared_rate_lmtr; - __le16 max_children; + u8 rsvd0[4]; + __le16 max_sibl_grp_sz; __le16 max_cir_rl_profiles; __le16 max_eir_rl_profiles; __le16 max_srl_profiles; @@ -919,9 +920,11 @@ struct ice_aqc_set_phy_cfg_data { u8 caps; #define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0) #define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1) -#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) -#define ICE_AQ_PHY_ENA_LINK BIT(3) -#define ICE_AQ_PHY_ENA_ATOMIC_LINK BIT(5) +#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2) +#define ICE_AQ_PHY_ENA_LINK BIT(3) +#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5) +#define ICE_AQ_PHY_ENA_LESM BIT(6) +#define ICE_AQ_PHY_ENA_AUTO_FEC BIT(7) u8 low_power_ctrl; __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */ __le16 eeer_value; @@ -1203,6 +1206,84 @@ struct ice_aqc_dis_txq { struct ice_aqc_dis_txq_item qgrps[1]; }; +/* Configure Firmware Logging Command (indirect 0xFF09) + * Logging Information Read Response (indirect 0xFF10) + * Note: The 0xFF10 command has no input parameters. + */ +struct ice_aqc_fw_logging { + u8 log_ctrl; +#define ICE_AQC_FW_LOG_AQ_EN BIT(0) +#define ICE_AQC_FW_LOG_UART_EN BIT(1) + u8 rsvd0; + u8 log_ctrl_valid; /* Not used by 0xFF10 Response */ +#define ICE_AQC_FW_LOG_AQ_VALID BIT(0) +#define ICE_AQC_FW_LOG_UART_VALID BIT(1) + u8 rsvd1[5]; + __le32 addr_high; + __le32 addr_low; +}; + +enum ice_aqc_fw_logging_mod { + ICE_AQC_FW_LOG_ID_GENERAL = 0, + ICE_AQC_FW_LOG_ID_CTRL, + ICE_AQC_FW_LOG_ID_LINK, + ICE_AQC_FW_LOG_ID_LINK_TOPO, + ICE_AQC_FW_LOG_ID_DNL, + ICE_AQC_FW_LOG_ID_I2C, + ICE_AQC_FW_LOG_ID_SDP, + ICE_AQC_FW_LOG_ID_MDIO, + ICE_AQC_FW_LOG_ID_ADMINQ, + ICE_AQC_FW_LOG_ID_HDMA, + ICE_AQC_FW_LOG_ID_LLDP, + ICE_AQC_FW_LOG_ID_DCBX, + ICE_AQC_FW_LOG_ID_DCB, + ICE_AQC_FW_LOG_ID_NETPROXY, + ICE_AQC_FW_LOG_ID_NVM, + ICE_AQC_FW_LOG_ID_AUTH, + ICE_AQC_FW_LOG_ID_VPD, + ICE_AQC_FW_LOG_ID_IOSF, + ICE_AQC_FW_LOG_ID_PARSER, + ICE_AQC_FW_LOG_ID_SW, + ICE_AQC_FW_LOG_ID_SCHEDULER, + ICE_AQC_FW_LOG_ID_TXQ, + ICE_AQC_FW_LOG_ID_RSVD, + ICE_AQC_FW_LOG_ID_POST, + ICE_AQC_FW_LOG_ID_WATCHDOG, + ICE_AQC_FW_LOG_ID_TASK_DISPATCH, + ICE_AQC_FW_LOG_ID_MNG, + ICE_AQC_FW_LOG_ID_MAX, +}; + +/* This is the buffer for both of the logging commands. + * The entry array size depends on the datalen parameter in the descriptor. + * There will be a total of datalen / 2 entries. + */ +struct ice_aqc_fw_logging_data { + __le16 entry[1]; +#define ICE_AQC_FW_LOG_ID_S 0 +#define ICE_AQC_FW_LOG_ID_M (0xFFF << ICE_AQC_FW_LOG_ID_S) + +#define ICE_AQC_FW_LOG_CONF_SUCCESS 0 /* Used by response */ +#define ICE_AQC_FW_LOG_CONF_BAD_INDX BIT(12) /* Used by response */ + +#define ICE_AQC_FW_LOG_EN_S 12 +#define ICE_AQC_FW_LOG_EN_M (0xF << ICE_AQC_FW_LOG_EN_S) +#define ICE_AQC_FW_LOG_INFO_EN BIT(12) /* Used by command */ +#define ICE_AQC_FW_LOG_INIT_EN BIT(13) /* Used by command */ +#define ICE_AQC_FW_LOG_FLOW_EN BIT(14) /* Used by command */ +#define ICE_AQC_FW_LOG_ERR_EN BIT(15) /* Used by command */ +}; + +/* Get/Clear FW Log (indirect 0xFF11) */ +struct ice_aqc_get_clear_fw_log { + u8 flags; +#define ICE_AQC_FW_LOG_CLEAR BIT(0) +#define ICE_AQC_FW_LOG_MORE_DATA_AVAIL BIT(1) + u8 rsvd1[7]; + __le32 addr_high; + __le32 addr_low; +}; + /** * struct ice_aq_desc - Admin Queue (AQ) descriptor * @flags: ICE_AQ_FLAG_* flags @@ -1252,6 +1333,9 @@ struct ice_aq_desc { struct ice_aqc_add_txqs add_txqs; struct ice_aqc_dis_txqs dis_txqs; struct ice_aqc_add_get_update_free_vsi vsi_cmd; + struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res; + struct ice_aqc_fw_logging fw_logging; + struct ice_aqc_get_clear_fw_log get_clear_fw_log; struct ice_aqc_alloc_free_res_cmd sw_res_ctrl; struct ice_aqc_set_event_mask set_event_mask; struct ice_aqc_get_link_status get_link_status; @@ -1349,6 +1433,9 @@ enum ice_adminq_opc { /* TX queue handling commands/events */ ice_aqc_opc_add_txqs = 0x0C30, ice_aqc_opc_dis_txqs = 0x0C31, + + /* debug commands */ + ice_aqc_opc_fw_logging = 0xFF09, }; #endif /* _ICE_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 661beea6af79..0847dbf9d42f 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -7,16 +7,16 @@ #define ICE_PF_RESET_WAIT_COUNT 200 -#define ICE_NIC_FLX_ENTRY(hw, mdid, idx) \ - wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(ICE_RXDID_FLEX_NIC), \ +#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \ + wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \ ((ICE_RX_OPC_MDID << \ GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \ GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \ (((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \ GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M)) -#define ICE_NIC_FLX_FLG_ENTRY(hw, flg_0, flg_1, flg_2, flg_3, idx) \ - wr32((hw), GLFLXP_RXDID_FLAGS(ICE_RXDID_FLEX_NIC, idx), \ +#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \ + wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \ (((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \ GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \ (((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \ @@ -125,7 +125,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, * * Returns the various PHY capabilities supported on the Port (0x0600) */ -static enum ice_status +enum ice_status ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *pcaps, struct ice_sq_cd *cd) @@ -290,30 +290,85 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, } /** - * ice_init_flex_parser - initialize rx flex parser + * ice_init_flex_flags * @hw: pointer to the hardware structure + * @prof_id: Rx Descriptor Builder profile ID * - * Function to initialize flex descriptors + * Function to initialize Rx flex flags */ -static void ice_init_flex_parser(struct ice_hw *hw) +static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id) { u8 idx = 0; - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_LOW, 0); - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_HASH_HIGH, 1); - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_LOWER, 2); - ICE_NIC_FLX_ENTRY(hw, ICE_RX_MDID_FLOW_ID_HIGH, 3); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_FRG, ICE_RXFLG_UDP_GRE, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_FIN, idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_SYN, ICE_RXFLG_RST, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, - ICE_RXFLG_EVLAN_x8100, ICE_RXFLG_EVLAN_x9100, - idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_VLAN_x8100, ICE_RXFLG_TNL_VLAN, - ICE_RXFLG_TNL_MAC, ICE_RXFLG_TNL0, idx++); - ICE_NIC_FLX_FLG_ENTRY(hw, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, - ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); + /* Flex-flag fields (0-2) are programmed with FLG64 bits with layout: + * flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE + * flexiflags1[3:0] - Not used for flag programming + * flexiflags2[7:0] - Tunnel and VLAN types + * 2 invalid fields in last index + */ + switch (prof_id) { + /* Rx flex flags are currently programmed for the NIC profiles only. + * Different flag bit programming configurations can be added per + * profile as needed. + */ + case ICE_RXDID_FLEX_NIC: + case ICE_RXDID_FLEX_NIC_2: + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG, + ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI, + ICE_RXFLG_FIN, idx++); + /* flex flag 1 is not used for flexi-flag programming, skipping + * these four FLG64 bits. + */ + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100, + ICE_RXFLG_EVLAN_x9100, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100, + ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC, + ICE_RXFLG_TNL0, idx++); + ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2, + ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx); + break; + + default: + ice_debug(hw, ICE_DBG_INIT, + "Flag programming for profile ID %d not supported\n", + prof_id); + } +} + +/** + * ice_init_flex_flds + * @hw: pointer to the hardware structure + * @prof_id: Rx Descriptor Builder profile ID + * + * Function to initialize flex descriptors + */ +static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id) +{ + enum ice_flex_rx_mdid mdid; + + switch (prof_id) { + case ICE_RXDID_FLEX_NIC: + case ICE_RXDID_FLEX_NIC_2: + ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0); + ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1); + ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2); + + mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ? + ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH; + + ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3); + + ice_init_flex_flags(hw, prof_id); + break; + + default: + ice_debug(hw, ICE_DBG_INIT, + "Field init for profile ID %d not supported\n", + prof_id); + } } /** @@ -333,20 +388,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); - mutex_init(&sw->mac_list_lock); - INIT_LIST_HEAD(&sw->mac_list_head); - - mutex_init(&sw->vlan_list_lock); - INIT_LIST_HEAD(&sw->vlan_list_head); - - mutex_init(&sw->eth_m_list_lock); - INIT_LIST_HEAD(&sw->eth_m_list_head); - - mutex_init(&sw->promisc_list_lock); - INIT_LIST_HEAD(&sw->promisc_list_head); - - mutex_init(&sw->mac_vlan_list_lock); - INIT_LIST_HEAD(&sw->mac_vlan_list_head); + ice_init_def_sw_recp(hw); return 0; } @@ -360,22 +402,201 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) struct ice_switch_info *sw = hw->switch_info; struct ice_vsi_list_map_info *v_pos_map; struct ice_vsi_list_map_info *v_tmp_map; + struct ice_sw_recipe *recps; + u8 i; list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, list_entry) { list_del(&v_pos_map->list_entry); devm_kfree(ice_hw_to_dev(hw), v_pos_map); } + recps = hw->switch_info->recp_list; + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry; + + recps[i].root_rid = i; + mutex_destroy(&recps[i].filt_rule_lock); + list_for_each_entry_safe(lst_itr, tmp_entry, + &recps[i].filt_rules, list_entry) { + list_del(&lst_itr->list_entry); + devm_kfree(ice_hw_to_dev(hw), lst_itr); + } + } - mutex_destroy(&sw->mac_list_lock); - mutex_destroy(&sw->vlan_list_lock); - mutex_destroy(&sw->eth_m_list_lock); - mutex_destroy(&sw->promisc_list_lock); - mutex_destroy(&sw->mac_vlan_list_lock); - + devm_kfree(ice_hw_to_dev(hw), sw->recp_list); devm_kfree(ice_hw_to_dev(hw), sw); } +#define ICE_FW_LOG_DESC_SIZE(n) (sizeof(struct ice_aqc_fw_logging_data) + \ + (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry))) +#define ICE_FW_LOG_DESC_SIZE_MAX \ + ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX) + +/** + * ice_cfg_fw_log - configure FW logging + * @hw: pointer to the hw struct + * @enable: enable certain FW logging events if true, disable all if false + * + * This function enables/disables the FW logging via Rx CQ events and a UART + * port based on predetermined configurations. FW logging via the Rx CQ can be + * enabled/disabled for individual PF's. However, FW logging via the UART can + * only be enabled/disabled for all PFs on the same device. + * + * To enable overall FW logging, the "cq_en" and "uart_en" enable bits in + * hw->fw_log need to be set accordingly, e.g. based on user-provided input, + * before initializing the device. + * + * When re/configuring FW logging, callers need to update the "cfg" elements of + * the hw->fw_log.evnts array with the desired logging event configurations for + * modules of interest. When disabling FW logging completely, the callers can + * just pass false in the "enable" parameter. On completion, the function will + * update the "cur" element of the hw->fw_log.evnts array with the resulting + * logging event configurations of the modules that are being re/configured. FW + * logging modules that are not part of a reconfiguration operation retain their + * previous states. + * + * Before resetting the device, it is recommended that the driver disables FW + * logging before shutting down the control queue. When disabling FW logging + * ("enable" = false), the latest configurations of FW logging events stored in + * hw->fw_log.evnts[] are not overridden to allow them to be reconfigured after + * a device reset. + * + * When enabling FW logging to emit log messages via the Rx CQ during the + * device's initialization phase, a mechanism alternative to interrupt handlers + * needs to be used to extract FW log messages from the Rx CQ periodically and + * to prevent the Rx CQ from being full and stalling other types of control + * messages from FW to SW. Interrupts are typically disabled during the device's + * initialization phase. + */ +static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) +{ + struct ice_aqc_fw_logging_data *data = NULL; + struct ice_aqc_fw_logging *cmd; + enum ice_status status = 0; + u16 i, chgs = 0, len = 0; + struct ice_aq_desc desc; + u8 actv_evnts = 0; + void *buf = NULL; + + if (!hw->fw_log.cq_en && !hw->fw_log.uart_en) + return 0; + + /* Disable FW logging only when the control queue is still responsive */ + if (!enable && + (!hw->fw_log.actv_evnts || !ice_check_sq_alive(hw, &hw->adminq))) + return 0; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging); + cmd = &desc.params.fw_logging; + + /* Indicate which controls are valid */ + if (hw->fw_log.cq_en) + cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_AQ_VALID; + + if (hw->fw_log.uart_en) + cmd->log_ctrl_valid |= ICE_AQC_FW_LOG_UART_VALID; + + if (enable) { + /* Fill in an array of entries with FW logging modules and + * logging events being reconfigured. + */ + for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { + u16 val; + + /* Keep track of enabled event types */ + actv_evnts |= hw->fw_log.evnts[i].cfg; + + if (hw->fw_log.evnts[i].cfg == hw->fw_log.evnts[i].cur) + continue; + + if (!data) { + data = devm_kzalloc(ice_hw_to_dev(hw), + ICE_FW_LOG_DESC_SIZE_MAX, + GFP_KERNEL); + if (!data) + return ICE_ERR_NO_MEMORY; + } + + val = i << ICE_AQC_FW_LOG_ID_S; + val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S; + data->entry[chgs++] = cpu_to_le16(val); + } + + /* Only enable FW logging if at least one module is specified. + * If FW logging is currently enabled but all modules are not + * enabled to emit log messages, disable FW logging altogether. + */ + if (actv_evnts) { + /* Leave if there is effectively no change */ + if (!chgs) + goto out; + + if (hw->fw_log.cq_en) + cmd->log_ctrl |= ICE_AQC_FW_LOG_AQ_EN; + + if (hw->fw_log.uart_en) + cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN; + + buf = data; + len = ICE_FW_LOG_DESC_SIZE(chgs); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } + } + + status = ice_aq_send_cmd(hw, &desc, buf, len, NULL); + if (!status) { + /* Update the current configuration to reflect events enabled. + * hw->fw_log.cq_en and hw->fw_log.uart_en indicate if the FW + * logging mode is enabled for the device. They do not reflect + * actual modules being enabled to emit log messages. So, their + * values remain unchanged even when all modules are disabled. + */ + u16 cnt = enable ? chgs : (u16)ICE_AQC_FW_LOG_ID_MAX; + + hw->fw_log.actv_evnts = actv_evnts; + for (i = 0; i < cnt; i++) { + u16 v, m; + + if (!enable) { + /* When disabling all FW logging events as part + * of device's de-initialization, the original + * configurations are retained, and can be used + * to reconfigure FW logging later if the device + * is re-initialized. + */ + hw->fw_log.evnts[i].cur = 0; + continue; + } + + v = le16_to_cpu(data->entry[i]); + m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S; + hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg; + } + } + +out: + if (data) + devm_kfree(ice_hw_to_dev(hw), data); + + return status; +} + +/** + * ice_output_fw_log + * @hw: pointer to the hw struct + * @desc: pointer to the AQ message descriptor + * @buf: pointer to the buffer accompanying the AQ message + * + * Formats a FW Log message and outputs it via the standard driver logs. + */ +void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) +{ + ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n"); + ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf, + le16_to_cpu(desc->datalen)); + ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n"); +} + /** * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure @@ -410,6 +631,11 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_cqinit; + /* Enable FW logging. Not fatal if this fails. */ + status = ice_cfg_fw_log(hw, true); + if (status) + ice_debug(hw, ICE_DBG_INIT, "Failed to enable FW logging.\n"); + status = ice_clear_pf_cfg(hw); if (status) goto err_unroll_cqinit; @@ -472,6 +698,13 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_sched; + /* need a valid SW entry point to build a Tx tree */ + if (!hw->sw_entry_point_layer) { + ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n"); + status = ICE_ERR_CFG; + goto err_unroll_sched; + } + status = ice_init_fltr_mgmt_struct(hw); if (status) goto err_unroll_sched; @@ -494,7 +727,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_fltr_mgmt_struct; - ice_init_flex_parser(hw); + ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC); + ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2); return 0; @@ -515,15 +749,18 @@ err_unroll_cqinit: */ void ice_deinit_hw(struct ice_hw *hw) { + ice_cleanup_fltr_mgmt_struct(hw); + ice_sched_cleanup_all(hw); - ice_shutdown_all_ctrlq(hw); if (hw->port_info) { devm_kfree(ice_hw_to_dev(hw), hw->port_info); hw->port_info = NULL; } - ice_cleanup_fltr_mgmt_struct(hw); + /* Attempt to disable FW logging before shutting down control queues */ + ice_cfg_fw_log(hw, false); + ice_shutdown_all_ctrlq(hw); } /** @@ -652,6 +889,8 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n"); val = GLGEN_RTRIG_GLOBR_M; break; + default: + return ICE_ERR_PARAM; } val |= rd32(hw, GLGEN_RTRIG); @@ -904,7 +1143,22 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading) * @timeout: the maximum time in ms that the driver may hold the resource * @cd: pointer to command details structure or NULL * - * requests common resource using the admin queue commands (0x0008) + * Requests common resource using the admin queue commands (0x0008). + * When attempting to acquire the Global Config Lock, the driver can + * learn of three states: + * 1) ICE_SUCCESS - acquired lock, and can perform download package + * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load + * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has + * successfully downloaded the package; the driver does + * not have to download the package and can continue + * loading + * + * Note that if the caller is in an acquire lock, perform action, release lock + * phase of operation, it is possible that the FW may detect a timeout and issue + * a CORER. In this case, the driver will receive a CORER interrupt and will + * have to determine its cause. The calling thread that is handling this flow + * will likely get an error propagated back to it indicating the Download + * Package, Update Package or the Release Resource AQ commands timed out. */ static enum ice_status ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, @@ -922,13 +1176,43 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res, cmd_resp->res_id = cpu_to_le16(res); cmd_resp->access_type = cpu_to_le16(access); cmd_resp->res_number = cpu_to_le32(sdp_number); + cmd_resp->timeout = cpu_to_le32(*timeout); + *timeout = 0; status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + /* The completion specifies the maximum time in ms that the driver * may hold the resource in the Timeout field. - * If the resource is held by someone else, the command completes with - * busy return value and the timeout field indicates the maximum time - * the current owner of the resource has to free it. + */ + + /* Global config lock response utilizes an additional status field. + * + * If the Global config lock resource is held by some other driver, the + * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field + * and the timeout field indicates the maximum time the current owner + * of the resource has to free it. + */ + if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) { + if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) { + *timeout = le32_to_cpu(cmd_resp->timeout); + return 0; + } else if (le16_to_cpu(cmd_resp->status) == + ICE_AQ_RES_GLBL_IN_PROG) { + *timeout = le32_to_cpu(cmd_resp->timeout); + return ICE_ERR_AQ_ERROR; + } else if (le16_to_cpu(cmd_resp->status) == + ICE_AQ_RES_GLBL_DONE) { + return ICE_ERR_AQ_NO_WORK; + } + + /* invalid FW response, force a timeout immediately */ + *timeout = 0; + return ICE_ERR_AQ_ERROR; + } + + /* If the resource is held by some other driver, the command completes + * with a busy return value and the timeout field indicates the maximum + * time the current owner of the resource has to free it. */ if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY) *timeout = le32_to_cpu(cmd_resp->timeout); @@ -967,30 +1251,28 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number, * @hw: pointer to the HW structure * @res: resource id * @access: access type (read or write) + * @timeout: timeout in milliseconds * * This function will attempt to acquire the ownership of a resource. */ enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, - enum ice_aq_res_access_type access) + enum ice_aq_res_access_type access, u32 timeout) { #define ICE_RES_POLLING_DELAY_MS 10 u32 delay = ICE_RES_POLLING_DELAY_MS; + u32 time_left = timeout; enum ice_status status; - u32 time_left = 0; - u32 timeout; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - /* An admin queue return code of ICE_AQ_RC_EEXIST means that another - * driver has previously acquired the resource and performed any - * necessary updates; in this case the caller does not obtain the - * resource and has no further work to do. + /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has + * previously acquired the resource and performed any necessary updates; + * in this case the caller does not obtain the resource and has no + * further work to do. */ - if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { - status = ICE_ERR_AQ_NO_WORK; + if (status == ICE_ERR_AQ_NO_WORK) goto ice_acquire_res_exit; - } if (status) ice_debug(hw, ICE_DBG_RES, @@ -1003,11 +1285,9 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, timeout = (timeout > delay) ? timeout - delay : 0; status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL); - if (hw->adminq.sq_last_status == ICE_AQ_RC_EEXIST) { + if (status == ICE_ERR_AQ_NO_WORK) /* lock free, but no work to do */ - status = ICE_ERR_AQ_NO_WORK; break; - } if (!status) /* lock acquired */ @@ -1307,6 +1587,110 @@ void ice_clear_pxe_mode(struct ice_hw *hw) } /** + * ice_get_link_speed_based_on_phy_type - returns link speed + * @phy_type_low: lower part of phy_type + * + * This helper function will convert a phy_type_low to its corresponding link + * speed. + * Note: In the structure of phy_type_low, there should be one bit set, as + * this function will convert one phy type to its speed. + * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned + * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned + */ +static u16 +ice_get_link_speed_based_on_phy_type(u64 phy_type_low) +{ + u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; + + switch (phy_type_low) { + case ICE_PHY_TYPE_LOW_100BASE_TX: + case ICE_PHY_TYPE_LOW_100M_SGMII: + speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB; + break; + case ICE_PHY_TYPE_LOW_1000BASE_T: + case ICE_PHY_TYPE_LOW_1000BASE_SX: + case ICE_PHY_TYPE_LOW_1000BASE_LX: + case ICE_PHY_TYPE_LOW_1000BASE_KX: + case ICE_PHY_TYPE_LOW_1G_SGMII: + speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB; + break; + case ICE_PHY_TYPE_LOW_2500BASE_T: + case ICE_PHY_TYPE_LOW_2500BASE_X: + case ICE_PHY_TYPE_LOW_2500BASE_KX: + speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB; + break; + case ICE_PHY_TYPE_LOW_5GBASE_T: + case ICE_PHY_TYPE_LOW_5GBASE_KR: + speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB; + break; + case ICE_PHY_TYPE_LOW_10GBASE_T: + case ICE_PHY_TYPE_LOW_10G_SFI_DA: + case ICE_PHY_TYPE_LOW_10GBASE_SR: + case ICE_PHY_TYPE_LOW_10GBASE_LR: + case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB; + break; + case ICE_PHY_TYPE_LOW_25GBASE_T: + case ICE_PHY_TYPE_LOW_25GBASE_CR: + case ICE_PHY_TYPE_LOW_25GBASE_CR_S: + case ICE_PHY_TYPE_LOW_25GBASE_CR1: + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_25G_AUI_C2C: + speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB; + break; + case ICE_PHY_TYPE_LOW_40GBASE_CR4: + case ICE_PHY_TYPE_LOW_40GBASE_SR4: + case ICE_PHY_TYPE_LOW_40GBASE_LR4: + case ICE_PHY_TYPE_LOW_40GBASE_KR4: + case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_40G_XLAUI: + speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB; + break; + default: + speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; + break; + } + + return speed_phy_type_low; +} + +/** + * ice_update_phy_type + * @phy_type_low: pointer to the lower part of phy_type + * @link_speeds_bitmap: targeted link speeds bitmap + * + * Note: For the link_speeds_bitmap structure, you can check it at + * [ice_aqc_get_link_status->link_speed]. Caller can pass in + * link_speeds_bitmap include multiple speeds. + * + * The value of phy_type_low will present a certain link speed. This helper + * function will turn on bits in the phy_type_low based on the value of + * link_speeds_bitmap input parameter. + */ +void ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap) +{ + u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN; + u64 pt_low; + int index; + + /* We first check with low part of phy_type */ + for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) { + pt_low = BIT_ULL(index); + speed = ice_get_link_speed_based_on_phy_type(pt_low); + + if (link_speeds_bitmap & speed) + *phy_type_low |= BIT_ULL(index); + } +} + +/** * ice_aq_set_phy_cfg * @hw: pointer to the hw struct * @lport: logical port number @@ -1318,19 +1702,18 @@ void ice_clear_pxe_mode(struct ice_hw *hw) * mode as the PF may not have the privilege to set some of the PHY Config * parameters. This status will be indicated by the command response (0x0601). */ -static enum ice_status +enum ice_status ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd) { - struct ice_aqc_set_phy_cfg *cmd; struct ice_aq_desc desc; if (!cfg) return ICE_ERR_PARAM; - cmd = &desc.params.set_phy; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg); - cmd->lport_num = lport; + desc.params.set_phy.lport_num = lport; + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); } @@ -1379,12 +1762,12 @@ out: * ice_set_fc * @pi: port information structure * @aq_failures: pointer to status code, specific to ice_set_fc routine - * @atomic_restart: enable automatic link update + * @ena_auto_link_update: enable automatic link update * * Set the requested flow control mode. */ enum ice_status -ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart) +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { struct ice_aqc_set_phy_cfg_data cfg = { 0 }; struct ice_aqc_get_phy_caps_data *pcaps; @@ -1434,8 +1817,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart) int retry_count, retry_max = 10; /* Auto restart link so settings take effect */ - if (atomic_restart) - cfg.caps |= ICE_AQ_PHY_ENA_ATOMIC_LINK; + if (ena_auto_link_update) + cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; /* Copy over all the old settings */ cfg.phy_type_low = pcaps->phy_type_low; cfg.low_power_ctrl = pcaps->low_power_ctrl; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 9a5519130af1..aac2d6cadaaf 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -23,7 +23,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up); enum ice_status ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res, - enum ice_aq_res_access_type access); + enum ice_aq_res_access_type access, u32 timeout); void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res); enum ice_status ice_init_nvm(struct ice_hw *hw); enum ice_status @@ -58,12 +58,24 @@ enum ice_status ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, u16 buf_size, struct ice_sq_cd *cd); enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd); + +enum ice_status +ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, + struct ice_aqc_get_phy_caps_data *caps, + struct ice_sq_cd *cd); +void +ice_update_phy_type(u64 *phy_type_low, u16 link_speeds_bitmap); enum ice_status ice_aq_manage_mac_write(struct ice_hw *hw, u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); enum ice_status ice_clear_pf_cfg(struct ice_hw *hw); enum ice_status -ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool atomic_restart); +ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, + struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd); +enum ice_status +ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, + bool ena_auto_link_update); + enum ice_status ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, struct ice_sq_cd *cd); @@ -83,4 +95,5 @@ enum ice_status ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size, struct ice_sq_cd *cd); +void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 62be72fdc8f3..1fe026a65d75 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -806,6 +806,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, u16 retval = 0; u32 val = 0; + /* if reset is in progress return a soft error */ + if (hw->reset_ongoing) + return ICE_ERR_RESET_ONGOING; mutex_lock(&cq->sq_lock); cq->sq_last_status = ICE_AQ_RC_OK; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index c71a9b528d6d..db2c502ae932 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -332,58 +332,473 @@ ice_get_ethtool_stats(struct net_device *netdev, } } -static int -ice_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *ks) +/** + * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes + * @netdev: network interface device structure + * @ks: ethtool link ksettings struct to fill out + */ +static void ice_phy_type_to_ethtool(struct net_device *netdev, + struct ethtool_link_ksettings *ks) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; - bool link_up; + u64 phy_types_low; hw_link_info = &vsi->port_info->phy.link_info; - link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; + phy_types_low = vsi->port_info->phy.phy_type_low; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + + if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || + phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX || + phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X || + phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseX_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseX_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 5000baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 5000baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA || + phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || + phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || + phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC || + phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseSR4_Full); + } + if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + if (hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseLR4_Full); + } - ethtool_link_ksettings_add_link_mode(ks, supported, - 10000baseT_Full); - ethtool_link_ksettings_add_link_mode(ks, advertising, - 10000baseT_Full); + /* Autoneg PHY types */ + if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || + phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX || + phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX || + phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR || + phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || + phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 || + phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || + phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { + ethtool_link_ksettings_add_link_mode(ks, supported, + Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); + } +} - /* set speed and duplex */ - if (link_up) { - switch (hw_link_info->link_speed) { - case ICE_AQ_LINK_SPEED_100MB: - ks->base.speed = SPEED_100; - break; - case ICE_AQ_LINK_SPEED_2500MB: - ks->base.speed = SPEED_2500; - break; - case ICE_AQ_LINK_SPEED_5GB: - ks->base.speed = SPEED_5000; - break; - case ICE_AQ_LINK_SPEED_10GB: - ks->base.speed = SPEED_10000; - break; - case ICE_AQ_LINK_SPEED_25GB: - ks->base.speed = SPEED_25000; - break; - case ICE_AQ_LINK_SPEED_40GB: - ks->base.speed = SPEED_40000; - break; - default: - ks->base.speed = SPEED_UNKNOWN; - break; - } +#define TEST_SET_BITS_TIMEOUT 50 +#define TEST_SET_BITS_SLEEP_MAX 2000 +#define TEST_SET_BITS_SLEEP_MIN 1000 - ks->base.duplex = DUPLEX_FULL; - } else { - ks->base.speed = SPEED_UNKNOWN; - ks->base.duplex = DUPLEX_UNKNOWN; +/** + * ice_get_settings_link_up - Get Link settings for when link is up + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + */ +static void ice_get_settings_link_up(struct ethtool_link_ksettings *ks, + struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ethtool_link_ksettings cap_ksettings; + struct ice_link_status *link_info; + struct ice_vsi *vsi = np->vsi; + bool unrecog_phy_low = false; + + link_info = &vsi->port_info->phy.link_info; + + /* Initialize supported and advertised settings based on phy settings */ + switch (link_info->phy_type_low) { + case ICE_PHY_TYPE_LOW_100BASE_TX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); + break; + case ICE_PHY_TYPE_LOW_100M_SGMII: + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + break; + case ICE_PHY_TYPE_LOW_1000BASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_1G_SGMII: + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_1000BASE_SX: + case ICE_PHY_TYPE_LOW_1000BASE_LX: + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + break; + case ICE_PHY_TYPE_LOW_1000BASE_KX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + break; + case ICE_PHY_TYPE_LOW_2500BASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseT_Full); + break; + case ICE_PHY_TYPE_LOW_2500BASE_X: + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseX_Full); + break; + case ICE_PHY_TYPE_LOW_2500BASE_KX: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 2500baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 2500baseX_Full); + break; + case ICE_PHY_TYPE_LOW_5GBASE_T: + case ICE_PHY_TYPE_LOW_5GBASE_KR: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 5000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 5000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_T: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_10G_SFI_DA: + case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC: + case ICE_PHY_TYPE_LOW_10G_SFI_C2C: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_SR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + break; + case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + break; + case ICE_PHY_TYPE_LOW_25GBASE_T: + case ICE_PHY_TYPE_LOW_25GBASE_CR: + case ICE_PHY_TYPE_LOW_25GBASE_CR_S: + case ICE_PHY_TYPE_LOW_25GBASE_CR1: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + break; + case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + break; + case ICE_PHY_TYPE_LOW_25GBASE_SR: + case ICE_PHY_TYPE_LOW_25GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + break; + case ICE_PHY_TYPE_LOW_25GBASE_KR: + case ICE_PHY_TYPE_LOW_25GBASE_KR1: + case ICE_PHY_TYPE_LOW_25GBASE_KR_S: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_CR4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); + break; + case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC: + case ICE_PHY_TYPE_LOW_40G_XLAUI: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_SR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_LR4: + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + break; + case ICE_PHY_TYPE_LOW_40GBASE_KR4: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + break; + default: + unrecog_phy_low = true; + } + + if (unrecog_phy_low) { + /* if we got here and link is up something bad is afoot */ + netdev_info(netdev, "WARNING: Unrecognized PHY_Low (0x%llx).\n", + (u64)link_info->phy_type_low); } + /* Now that we've worked out everything that could be supported by the + * current PHY type, get what is supported by the NVM and intersect + * them to get what is truly supported + */ + memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); + ice_phy_type_to_ethtool(netdev, &cap_ksettings); + ethtool_intersect_link_masks(ks, &cap_ksettings); + + switch (link_info->link_speed) { + case ICE_AQ_LINK_SPEED_40GB: + ks->base.speed = SPEED_40000; + break; + case ICE_AQ_LINK_SPEED_25GB: + ks->base.speed = SPEED_25000; + break; + case ICE_AQ_LINK_SPEED_20GB: + ks->base.speed = SPEED_20000; + break; + case ICE_AQ_LINK_SPEED_10GB: + ks->base.speed = SPEED_10000; + break; + case ICE_AQ_LINK_SPEED_5GB: + ks->base.speed = SPEED_5000; + break; + case ICE_AQ_LINK_SPEED_2500MB: + ks->base.speed = SPEED_2500; + break; + case ICE_AQ_LINK_SPEED_1000MB: + ks->base.speed = SPEED_1000; + break; + case ICE_AQ_LINK_SPEED_100MB: + ks->base.speed = SPEED_100; + break; + default: + netdev_info(netdev, + "WARNING: Unrecognized link_speed (0x%x).\n", + link_info->link_speed); + break; + } + ks->base.duplex = DUPLEX_FULL; +} + +/** + * ice_get_settings_link_down - Get the Link settings when link is down + * @ks: ethtool ksettings to fill in + * @netdev: network interface device structure + * + * Reports link settings that can be determined when link is down + */ +static void +ice_get_settings_link_down(struct ethtool_link_ksettings *ks, + struct net_device __always_unused *netdev) +{ + /* link is down and the driver needs to fall back on + * supported phy types to figure out what info to display + */ + ice_phy_type_to_ethtool(netdev, ks); + + /* With no link, speed and duplex are unknown */ + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; +} + +/** + * ice_get_link_ksettings - Get Link Speed and Duplex settings + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Reports speed/duplex settings based on media_type + */ +static int ice_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ks) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_link_status *hw_link_info; + struct ice_vsi *vsi = np->vsi; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); + hw_link_info = &vsi->port_info->phy.link_info; + + /* set speed and duplex */ + if (hw_link_info->link_info & ICE_AQ_LINK_UP) + ice_get_settings_link_up(ks, netdev); + else + ice_get_settings_link_down(ks, netdev); + /* set autoneg settings */ - ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + ks->base.autoneg = (hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE; /* set media type settings */ switch (vsi->port_info->phy.media_type) { @@ -442,6 +857,311 @@ ice_get_link_ksettings(struct net_device *netdev, } /** + * ice_ksettings_find_adv_link_speed - Find advertising link speed + * @ks: ethtool ksettings + */ +static u16 +ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks) +{ + u16 adv_link_speed = 0; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100baseT_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_100MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 2500baseT_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 2500baseX_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 5000baseT_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_5GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKR_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseLR_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_10GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseCR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseSR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseKR_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_25GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseCR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseSR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseLR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseKR4_Full)) + adv_link_speed |= ICE_AQ_LINK_SPEED_40GB; + + return adv_link_speed; +} + +/** + * ice_setup_autoneg + * @p: port info + * @ks: ethtool_link_ksettings + * @config: configuration that will be sent down to FW + * @autoneg_enabled: autonegotiation is enabled or not + * @autoneg_changed: will there a change in autonegotiation + * @netdev: network interface device structure + * + * Setup PHY autonegotiation feature + */ +static int +ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, + struct ice_aqc_set_phy_cfg_data *config, + u8 autoneg_enabled, u8 *autoneg_changed, + struct net_device *netdev) +{ + int err = 0; + + *autoneg_changed = 0; + + /* Check autoneg */ + if (autoneg_enabled == AUTONEG_ENABLE) { + /* If autoneg was not already enabled */ + if (!(p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED)) { + /* If autoneg is not supported, return error */ + if (!ethtool_link_ksettings_test_link_mode(ks, + supported, + Autoneg)) { + netdev_info(netdev, "Autoneg not supported on this phy.\n"); + err = -EINVAL; + } else { + /* Autoneg is allowed to change */ + config->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + *autoneg_changed = 1; + } + } + } else { + /* If autoneg is currently enabled */ + if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) { + /* If autoneg is supported 10GBASE_T is the only phy + * that can disable it, so otherwise return error + */ + if (ethtool_link_ksettings_test_link_mode(ks, + supported, + Autoneg)) { + netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); + err = -EINVAL; + } else { + /* Autoneg is allowed to change */ + config->caps &= ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + *autoneg_changed = 1; + } + } + } + + return err; +} + +/** + * ice_set_link_ksettings - Set Speed and Duplex + * @netdev: network interface device structure + * @ks: ethtool ksettings + * + * Set speed/duplex per media_types advertised/forced + */ +static int ice_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ks) +{ + u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0; + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ethtool_link_ksettings safe_ks, copy_ks; + struct ice_aqc_get_phy_caps_data *abilities; + u16 adv_link_speed, curr_link_speed, idx; + struct ice_aqc_set_phy_cfg_data config; + struct ice_pf *pf = np->vsi->back; + struct ice_port_info *p; + u8 autoneg_changed = 0; + enum ice_status status; + u64 phy_type_low; + int err = 0; + bool linkup; + + p = np->vsi->port_info; + + if (!p) + return -EOPNOTSUPP; + + /* Check if this is lan vsi */ + for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) { + if (pf->vsi[idx]->type == ICE_VSI_PF) { + if (np->vsi != pf->vsi[idx]) + return -EOPNOTSUPP; + break; + } + } + + if (p->phy.media_type != ICE_MEDIA_BASET && + p->phy.media_type != ICE_MEDIA_FIBER && + p->phy.media_type != ICE_MEDIA_BACKPLANE && + p->phy.media_type != ICE_MEDIA_DA && + p->phy.link_info.link_info & ICE_AQ_LINK_UP) + return -EOPNOTSUPP; + + /* copy the ksettings to copy_ks to avoid modifying the original */ + memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); + + /* save autoneg out of ksettings */ + autoneg = copy_ks.base.autoneg; + + memset(&safe_ks, 0, sizeof(safe_ks)); + + /* Get link modes supported by hardware.*/ + ice_phy_type_to_ethtool(netdev, &safe_ks); + + /* and check against modes requested by user. + * Return an error if unsupported mode was set. + */ + if (!bitmap_subset(copy_ks.link_modes.advertising, + safe_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + /* get our own copy of the bits to check against */ + memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); + safe_ks.base.cmd = copy_ks.base.cmd; + safe_ks.base.link_mode_masks_nwords = + copy_ks.base.link_mode_masks_nwords; + ice_get_link_ksettings(netdev, &safe_ks); + + /* set autoneg back to what it currently is */ + copy_ks.base.autoneg = safe_ks.base.autoneg; + /* we don't compare the speed */ + copy_ks.base.speed = safe_ks.base.speed; + + /* If copy_ks.base and safe_ks.base are not the same now, then they are + * trying to set something that we do not support. + */ + if (memcmp(©_ks.base, &safe_ks.base, + sizeof(struct ethtool_link_settings))) + return -EOPNOTSUPP; + + while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) { + timeout--; + if (!timeout) + return -EBUSY; + usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX); + } + + abilities = devm_kzalloc(&pf->pdev->dev, sizeof(*abilities), + GFP_KERNEL); + if (!abilities) + return -ENOMEM; + + /* Get the current phy config */ + status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities, + NULL); + if (status) { + err = -EAGAIN; + goto done; + } + + /* Copy abilities to config in case autoneg is not set below */ + memset(&config, 0, sizeof(struct ice_aqc_set_phy_cfg_data)); + config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE; + if (abilities->caps & ICE_AQC_PHY_AN_MODE) + config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; + + /* Check autoneg */ + err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed, + netdev); + + if (err) + goto done; + + /* Call to get the current link speed */ + p->phy.get_link_info = true; + status = ice_get_link_status(p, &linkup); + if (status) { + err = -EAGAIN; + goto done; + } + + curr_link_speed = p->phy.link_info.link_speed; + adv_link_speed = ice_ksettings_find_adv_link_speed(ks); + + /* If speed didn't get set, set it to what it currently is. + * This is needed because if advertise is 0 (as it is when autoneg + * is disabled) then speed won't get set. + */ + if (!adv_link_speed) + adv_link_speed = curr_link_speed; + + /* Convert the advertise link speeds to their corresponded PHY_TYPE */ + ice_update_phy_type(&phy_type_low, adv_link_speed); + + if (!autoneg_changed && adv_link_speed == curr_link_speed) { + netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); + goto done; + } + + /* copy over the rest of the abilities */ + config.low_power_ctrl = abilities->low_power_ctrl; + config.eee_cap = abilities->eee_cap; + config.eeer_value = abilities->eeer_value; + config.link_fec_opt = abilities->link_fec_options; + + /* save the requested speeds */ + p->phy.link_info.req_speeds = adv_link_speed; + + /* set link and auto negotiation so changes take effect */ + config.caps |= ICE_AQ_PHY_ENA_LINK; + + if (phy_type_low) { + config.phy_type_low = cpu_to_le64(phy_type_low) & + abilities->phy_type_low; + } else { + err = -EAGAIN; + netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n"); + goto done; + } + + /* If link is up put link down */ + if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) { + /* Tell the OS link is going down, the link will go + * back up when fw says it is ready asynchronously + */ + ice_print_link_msg(np->vsi, false); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } + + /* make the aq call */ + status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL); + if (status) { + netdev_info(netdev, "Set phy config failed,\n"); + err = -EAGAIN; + } + +done: + devm_kfree(&pf->pdev->dev, abilities); + clear_bit(__ICE_CFG_BUSY, pf->state); + + return err; +} + +/** * ice_get_rxnfc - command to get RX flow classification rules * @netdev: network interface device structure * @cmd: ethtool rxnfc command @@ -933,6 +1653,7 @@ static int ice_set_rxfh(struct net_device *netdev, const u32 *indir, static const struct ethtool_ops ice_ethtool_ops = { .get_link_ksettings = ice_get_link_ksettings, + .set_link_ksettings = ice_set_link_ksettings, .get_drvinfo = ice_get_drvinfo, .get_regs_len = ice_get_regs_len, .get_regs = ice_get_regs, diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 6076fc87df9d..88f11498804b 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -6,251 +6,249 @@ #ifndef _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_ -#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) -#define PF_FW_ARQBAH 0x00080180 -#define PF_FW_ARQBAL 0x00080080 -#define PF_FW_ARQH 0x00080380 -#define PF_FW_ARQH_ARQH_S 0 -#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, PF_FW_ARQH_ARQH_S) -#define PF_FW_ARQLEN 0x00080280 -#define PF_FW_ARQLEN_ARQLEN_S 0 -#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, PF_FW_ARQLEN_ARQLEN_S) -#define PF_FW_ARQLEN_ARQVFE_S 28 -#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S) -#define PF_FW_ARQLEN_ARQOVFL_S 29 -#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S) -#define PF_FW_ARQLEN_ARQCRIT_S 30 -#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S) -#define PF_FW_ARQLEN_ARQENABLE_S 31 -#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S) -#define PF_FW_ARQT 0x00080480 -#define PF_FW_ATQBAH 0x00080100 -#define PF_FW_ATQBAL 0x00080000 -#define PF_FW_ATQH 0x00080300 -#define PF_FW_ATQH_ATQH_S 0 -#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, PF_FW_ATQH_ATQH_S) -#define PF_FW_ATQLEN 0x00080200 -#define PF_FW_ATQLEN_ATQLEN_S 0 -#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, PF_FW_ATQLEN_ATQLEN_S) -#define PF_FW_ATQLEN_ATQVFE_S 28 -#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S) -#define PF_FW_ATQLEN_ATQOVFL_S 29 -#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S) -#define PF_FW_ATQLEN_ATQCRIT_S 30 -#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S) -#define PF_FW_ATQLEN_ATQENABLE_S 31 -#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S) -#define PF_FW_ATQT 0x00080400 - +#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) +#define PF_FW_ARQBAH 0x00080180 +#define PF_FW_ARQBAL 0x00080080 +#define PF_FW_ARQH 0x00080380 +#define PF_FW_ARQH_ARQH_M ICE_M(0x3FF, 0) +#define PF_FW_ARQLEN 0x00080280 +#define PF_FW_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0) +#define PF_FW_ARQLEN_ARQVFE_M BIT(28) +#define PF_FW_ARQLEN_ARQOVFL_M BIT(29) +#define PF_FW_ARQLEN_ARQCRIT_M BIT(30) +#define PF_FW_ARQLEN_ARQENABLE_M BIT(31) +#define PF_FW_ARQT 0x00080480 +#define PF_FW_ATQBAH 0x00080100 +#define PF_FW_ATQBAL 0x00080000 +#define PF_FW_ATQH 0x00080300 +#define PF_FW_ATQH_ATQH_M ICE_M(0x3FF, 0) +#define PF_FW_ATQLEN 0x00080200 +#define PF_FW_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0) +#define PF_FW_ATQLEN_ATQVFE_M BIT(28) +#define PF_FW_ATQLEN_ATQOVFL_M BIT(29) +#define PF_FW_ATQLEN_ATQCRIT_M BIT(30) +#define PF_FW_ATQLEN_ATQENABLE_M BIT(31) +#define PF_FW_ATQT 0x00080400 #define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256)) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, 8) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, 16) #define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24 -#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) +#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, 24) #define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_0_RXDID_OPCODE_M ICE_M(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_1(_i) (0x0045c900 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_1_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_1_RXDID_OPCODE_M ICE_M(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_2(_i) (0x0045ca00 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_2_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_S) +#define GLFLXP_RXDID_FLX_WRD_2_RXDID_OPCODE_M ICE_M(0x3, 30) #define GLFLXP_RXDID_FLX_WRD_3(_i) (0x0045cb00 + ((_i) * 4)) #define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S 0 -#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_S) +#define GLFLXP_RXDID_FLX_WRD_3_PROT_MDID_M ICE_M(0xFF, 0) #define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S 30 -#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_S) - -#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) -#define QRXFLXP_CNTXT_RXDID_IDX_S 0 -#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, QRXFLXP_CNTXT_RXDID_IDX_S) -#define QRXFLXP_CNTXT_RXDID_PRIO_S 8 -#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, QRXFLXP_CNTXT_RXDID_PRIO_S) -#define QRXFLXP_CNTXT_TS_S 11 -#define QRXFLXP_CNTXT_TS_M BIT(QRXFLXP_CNTXT_TS_S) -#define GLGEN_RSTAT 0x000B8188 -#define GLGEN_RSTAT_DEVSTATE_S 0 -#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, GLGEN_RSTAT_DEVSTATE_S) -#define GLGEN_RSTCTL 0x000B8180 -#define GLGEN_RSTCTL_GRSTDEL_S 0 -#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S) -#define GLGEN_RSTAT_RESET_TYPE_S 2 -#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, GLGEN_RSTAT_RESET_TYPE_S) -#define GLGEN_RTRIG 0x000B8190 -#define GLGEN_RTRIG_CORER_S 0 -#define GLGEN_RTRIG_CORER_M BIT(GLGEN_RTRIG_CORER_S) -#define GLGEN_RTRIG_GLOBR_S 1 -#define GLGEN_RTRIG_GLOBR_M BIT(GLGEN_RTRIG_GLOBR_S) -#define GLGEN_STAT 0x000B612C -#define PFGEN_CTRL 0x00091000 -#define PFGEN_CTRL_PFSWR_S 0 -#define PFGEN_CTRL_PFSWR_M BIT(PFGEN_CTRL_PFSWR_S) -#define PFGEN_STATE 0x00088000 -#define PRTGEN_STATUS 0x000B8100 -#define PFHMC_ERRORDATA 0x00520500 -#define PFHMC_ERRORINFO 0x00520400 -#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) -#define GLINT_DYN_CTL_INTENA_S 0 -#define GLINT_DYN_CTL_INTENA_M BIT(GLINT_DYN_CTL_INTENA_S) -#define GLINT_DYN_CTL_CLEARPBA_S 1 -#define GLINT_DYN_CTL_CLEARPBA_M BIT(GLINT_DYN_CTL_CLEARPBA_S) -#define GLINT_DYN_CTL_SWINT_TRIG_S 2 -#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(GLINT_DYN_CTL_SWINT_TRIG_S) -#define GLINT_DYN_CTL_ITR_INDX_S 3 -#define GLINT_DYN_CTL_SW_ITR_INDX_S 25 -#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, GLINT_DYN_CTL_SW_ITR_INDX_S) -#define GLINT_DYN_CTL_INTENA_MSK_S 31 -#define GLINT_DYN_CTL_INTENA_MSK_M BIT(GLINT_DYN_CTL_INTENA_MSK_S) -#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) -#define PFINT_FW_CTL 0x0016C800 -#define PFINT_FW_CTL_MSIX_INDX_S 0 -#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_FW_CTL_MSIX_INDX_S) -#define PFINT_FW_CTL_ITR_INDX_S 11 -#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, PFINT_FW_CTL_ITR_INDX_S) -#define PFINT_FW_CTL_CAUSE_ENA_S 30 -#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) -#define PFINT_OICR 0x0016CA00 -#define PFINT_OICR_ECC_ERR_S 16 -#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S) -#define PFINT_OICR_MAL_DETECT_S 19 -#define PFINT_OICR_MAL_DETECT_M BIT(PFINT_OICR_MAL_DETECT_S) -#define PFINT_OICR_GRST_S 20 -#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S) -#define PFINT_OICR_PCI_EXCEPTION_S 21 -#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S) -#define PFINT_OICR_HMC_ERR_S 26 -#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S) -#define PFINT_OICR_PE_CRITERR_S 28 -#define PFINT_OICR_PE_CRITERR_M BIT(PFINT_OICR_PE_CRITERR_S) -#define PFINT_OICR_CTL 0x0016CA80 -#define PFINT_OICR_CTL_MSIX_INDX_S 0 -#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, PFINT_OICR_CTL_MSIX_INDX_S) -#define PFINT_OICR_CTL_ITR_INDX_S 11 -#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, PFINT_OICR_CTL_ITR_INDX_S) -#define PFINT_OICR_CTL_CAUSE_ENA_S 30 -#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(PFINT_OICR_CTL_CAUSE_ENA_S) -#define PFINT_OICR_ENA 0x0016C900 -#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) -#define QINT_RQCTL_MSIX_INDX_S 0 -#define QINT_RQCTL_ITR_INDX_S 11 -#define QINT_RQCTL_CAUSE_ENA_S 30 -#define QINT_RQCTL_CAUSE_ENA_M BIT(QINT_RQCTL_CAUSE_ENA_S) -#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) -#define QINT_TQCTL_MSIX_INDX_S 0 -#define QINT_TQCTL_ITR_INDX_S 11 -#define QINT_TQCTL_CAUSE_ENA_S 30 -#define QINT_TQCTL_CAUSE_ENA_M BIT(QINT_TQCTL_CAUSE_ENA_S) -#define GLLAN_RCTL_0 0x002941F8 -#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) -#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) -#define QRX_CTRL_MAX_INDEX 2047 -#define QRX_CTRL_QENA_REQ_S 0 -#define QRX_CTRL_QENA_REQ_M BIT(QRX_CTRL_QENA_REQ_S) -#define QRX_CTRL_QENA_STAT_S 2 -#define QRX_CTRL_QENA_STAT_M BIT(QRX_CTRL_QENA_STAT_S) -#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4)) -#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4)) -#define GLNVM_FLA 0x000B6108 -#define GLNVM_FLA_LOCKED_S 6 -#define GLNVM_FLA_LOCKED_M BIT(GLNVM_FLA_LOCKED_S) -#define GLNVM_GENS 0x000B6100 -#define GLNVM_GENS_SR_SIZE_S 5 -#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, GLNVM_GENS_SR_SIZE_S) -#define GLNVM_ULD 0x000B6008 -#define GLNVM_ULD_CORER_DONE_S 3 -#define GLNVM_ULD_CORER_DONE_M BIT(GLNVM_ULD_CORER_DONE_S) -#define GLNVM_ULD_GLOBR_DONE_S 4 -#define GLNVM_ULD_GLOBR_DONE_M BIT(GLNVM_ULD_GLOBR_DONE_S) -#define PF_FUNC_RID 0x0009E880 -#define PF_FUNC_RID_FUNC_NUM_S 0 -#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, PF_FUNC_RID_FUNC_NUM_S) -#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) -#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) -#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) -#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) -#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) -#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) -#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) -#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) -#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) -#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) -#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) -#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8)) -#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) -#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) -#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) -#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) -#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) -#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) -#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) -#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) -#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) -#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) -#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) -#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) -#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) -#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) -#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) -#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) -#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) -#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) -#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) -#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) -#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) -#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) -#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) -#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) -#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) -#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) -#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) -#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) -#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) -#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) -#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) -#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) -#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) -#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) -#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) -#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) -#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) -#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) -#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) -#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) -#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) -#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) -#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) -#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) -#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) -#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) -#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) -#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) -#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) -#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) -#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) -#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) -#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) -#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) -#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) -#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) -#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) -#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) -#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) -#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) -#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) -#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) -#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) -#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) -#define VSIQF_HKEY_MAX_INDEX 12 +#define GLFLXP_RXDID_FLX_WRD_3_RXDID_OPCODE_M ICE_M(0x3, 30) +#define QRXFLXP_CNTXT(_QRX) (0x00480000 + ((_QRX) * 4)) +#define QRXFLXP_CNTXT_RXDID_IDX_S 0 +#define QRXFLXP_CNTXT_RXDID_IDX_M ICE_M(0x3F, 0) +#define QRXFLXP_CNTXT_RXDID_PRIO_S 8 +#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8) +#define GLGEN_RSTAT 0x000B8188 +#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0) +#define GLGEN_RSTCTL 0x000B8180 +#define GLGEN_RSTCTL_GRSTDEL_S 0 +#define GLGEN_RSTCTL_GRSTDEL_M ICE_M(0x3F, GLGEN_RSTCTL_GRSTDEL_S) +#define GLGEN_RSTAT_RESET_TYPE_S 2 +#define GLGEN_RSTAT_RESET_TYPE_M ICE_M(0x3, 2) +#define GLGEN_RTRIG 0x000B8190 +#define GLGEN_RTRIG_CORER_M BIT(0) +#define GLGEN_RTRIG_GLOBR_M BIT(1) +#define GLGEN_STAT 0x000B612C +#define PFGEN_CTRL 0x00091000 +#define PFGEN_CTRL_PFSWR_M BIT(0) +#define PFGEN_STATE 0x00088000 +#define PRTGEN_STATUS 0x000B8100 +#define PFHMC_ERRORDATA 0x00520500 +#define PFHMC_ERRORINFO 0x00520400 +#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) +#define GLINT_DYN_CTL_INTENA_M BIT(0) +#define GLINT_DYN_CTL_CLEARPBA_M BIT(1) +#define GLINT_DYN_CTL_SWINT_TRIG_M BIT(2) +#define GLINT_DYN_CTL_ITR_INDX_S 3 +#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25) +#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31) +#define GLINT_ITR(_i, _INT) (0x00154000 + ((_i) * 8192 + (_INT) * 4)) +#define PFINT_FW_CTL 0x0016C800 +#define PFINT_FW_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) +#define PFINT_FW_CTL_ITR_INDX_S 11 +#define PFINT_FW_CTL_ITR_INDX_M ICE_M(0x3, 11) +#define PFINT_FW_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_OICR 0x0016CA00 +#define PFINT_OICR_ECC_ERR_M BIT(16) +#define PFINT_OICR_MAL_DETECT_M BIT(19) +#define PFINT_OICR_GRST_M BIT(20) +#define PFINT_OICR_PCI_EXCEPTION_M BIT(21) +#define PFINT_OICR_HMC_ERR_M BIT(26) +#define PFINT_OICR_PE_CRITERR_M BIT(28) +#define PFINT_OICR_CTL 0x0016CA80 +#define PFINT_OICR_CTL_MSIX_INDX_M ICE_M(0x7FF, 0) +#define PFINT_OICR_CTL_ITR_INDX_S 11 +#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, 11) +#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30) +#define PFINT_OICR_ENA 0x0016C900 +#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) +#define QINT_RQCTL_MSIX_INDX_S 0 +#define QINT_RQCTL_ITR_INDX_S 11 +#define QINT_RQCTL_CAUSE_ENA_M BIT(30) +#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4)) +#define QINT_TQCTL_MSIX_INDX_S 0 +#define QINT_TQCTL_ITR_INDX_S 11 +#define QINT_TQCTL_CAUSE_ENA_M BIT(30) +#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4)) +#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4)) +#define QRX_CTRL_MAX_INDEX 2047 +#define QRX_CTRL_QENA_REQ_S 0 +#define QRX_CTRL_QENA_REQ_M BIT(0) +#define QRX_CTRL_QENA_STAT_S 2 +#define QRX_CTRL_QENA_STAT_M BIT(2) +#define QRX_ITR(_QRX) (0x00292000 + ((_QRX) * 4)) +#define QRX_TAIL(_QRX) (0x00290000 + ((_QRX) * 4)) +#define QRX_TAIL_MAX_INDEX 2047 +#define QRX_TAIL_TAIL_S 0 +#define QRX_TAIL_TAIL_M ICE_M(0x1FFF, 0) +#define GL_MDET_RX 0x00294C00 +#define GL_MDET_RX_QNUM_S 0 +#define GL_MDET_RX_QNUM_M ICE_M(0x7FFF, 0) +#define GL_MDET_RX_VF_NUM_S 15 +#define GL_MDET_RX_VF_NUM_M ICE_M(0xFF, 15) +#define GL_MDET_RX_PF_NUM_S 23 +#define GL_MDET_RX_PF_NUM_M ICE_M(0x7, 23) +#define GL_MDET_RX_MAL_TYPE_S 26 +#define GL_MDET_RX_MAL_TYPE_M ICE_M(0x1F, 26) +#define GL_MDET_RX_VALID_M BIT(31) +#define GL_MDET_TX_PQM 0x002D2E00 +#define GL_MDET_TX_PQM_PF_NUM_S 0 +#define GL_MDET_TX_PQM_PF_NUM_M ICE_M(0x7, 0) +#define GL_MDET_TX_PQM_VF_NUM_S 4 +#define GL_MDET_TX_PQM_VF_NUM_M ICE_M(0xFF, 4) +#define GL_MDET_TX_PQM_QNUM_S 12 +#define GL_MDET_TX_PQM_QNUM_M ICE_M(0x3FFF, 12) +#define GL_MDET_TX_PQM_MAL_TYPE_S 26 +#define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26) +#define GL_MDET_TX_PQM_VALID_M BIT(31) +#define GL_MDET_TX_TCLAN 0x000FC068 +#define GL_MDET_TX_TCLAN_QNUM_S 0 +#define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0) +#define GL_MDET_TX_TCLAN_VF_NUM_S 15 +#define GL_MDET_TX_TCLAN_VF_NUM_M ICE_M(0xFF, 15) +#define GL_MDET_TX_TCLAN_PF_NUM_S 23 +#define GL_MDET_TX_TCLAN_PF_NUM_M ICE_M(0x7, 23) +#define GL_MDET_TX_TCLAN_MAL_TYPE_S 26 +#define GL_MDET_TX_TCLAN_MAL_TYPE_M ICE_M(0x1F, 26) +#define GL_MDET_TX_TCLAN_VALID_M BIT(31) +#define PF_MDET_RX 0x00294280 +#define PF_MDET_RX_VALID_M BIT(0) +#define PF_MDET_TX_PQM 0x002D2C80 +#define PF_MDET_TX_PQM_VALID_M BIT(0) +#define PF_MDET_TX_TCLAN 0x000FC000 +#define PF_MDET_TX_TCLAN_VALID_M BIT(0) +#define GLNVM_FLA 0x000B6108 +#define GLNVM_FLA_LOCKED_M BIT(6) +#define GLNVM_GENS 0x000B6100 +#define GLNVM_GENS_SR_SIZE_S 5 +#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5) +#define GLNVM_ULD 0x000B6008 +#define GLNVM_ULD_CORER_DONE_M BIT(3) +#define GLNVM_ULD_GLOBR_DONE_M BIT(4) +#define PF_FUNC_RID 0x0009E880 +#define PF_FUNC_RID_FUNC_NUM_S 0 +#define PF_FUNC_RID_FUNC_NUM_M ICE_M(0x7, 0) +#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) +#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) +#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) +#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) +#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) +#define GLPRT_GORCH(_i) (0x00380004 + ((_i) * 8)) +#define GLPRT_GORCL(_i) (0x00380000 + ((_i) * 8)) +#define GLPRT_GOTCH(_i) (0x00380B44 + ((_i) * 8)) +#define GLPRT_GOTCL(_i) (0x00380B40 + ((_i) * 8)) +#define GLPRT_ILLERRC(_i) (0x003801C0 + ((_i) * 8)) +#define GLPRT_LXOFFRXC(_i) (0x003802C0 + ((_i) * 8)) +#define GLPRT_LXOFFTXC(_i) (0x00381180 + ((_i) * 8)) +#define GLPRT_LXONRXC(_i) (0x00380280 + ((_i) * 8)) +#define GLPRT_LXONTXC(_i) (0x00381140 + ((_i) * 8)) +#define GLPRT_MLFC(_i) (0x00380040 + ((_i) * 8)) +#define GLPRT_MPRCH(_i) (0x00381344 + ((_i) * 8)) +#define GLPRT_MPRCL(_i) (0x00381340 + ((_i) * 8)) +#define GLPRT_MPTCH(_i) (0x00381204 + ((_i) * 8)) +#define GLPRT_MPTCL(_i) (0x00381200 + ((_i) * 8)) +#define GLPRT_MRFC(_i) (0x00380080 + ((_i) * 8)) +#define GLPRT_PRC1023H(_i) (0x00380A04 + ((_i) * 8)) +#define GLPRT_PRC1023L(_i) (0x00380A00 + ((_i) * 8)) +#define GLPRT_PRC127H(_i) (0x00380944 + ((_i) * 8)) +#define GLPRT_PRC127L(_i) (0x00380940 + ((_i) * 8)) +#define GLPRT_PRC1522H(_i) (0x00380A44 + ((_i) * 8)) +#define GLPRT_PRC1522L(_i) (0x00380A40 + ((_i) * 8)) +#define GLPRT_PRC255H(_i) (0x00380984 + ((_i) * 8)) +#define GLPRT_PRC255L(_i) (0x00380980 + ((_i) * 8)) +#define GLPRT_PRC511H(_i) (0x003809C4 + ((_i) * 8)) +#define GLPRT_PRC511L(_i) (0x003809C0 + ((_i) * 8)) +#define GLPRT_PRC64H(_i) (0x00380904 + ((_i) * 8)) +#define GLPRT_PRC64L(_i) (0x00380900 + ((_i) * 8)) +#define GLPRT_PRC9522H(_i) (0x00380A84 + ((_i) * 8)) +#define GLPRT_PRC9522L(_i) (0x00380A80 + ((_i) * 8)) +#define GLPRT_PTC1023H(_i) (0x00380C84 + ((_i) * 8)) +#define GLPRT_PTC1023L(_i) (0x00380C80 + ((_i) * 8)) +#define GLPRT_PTC127H(_i) (0x00380BC4 + ((_i) * 8)) +#define GLPRT_PTC127L(_i) (0x00380BC0 + ((_i) * 8)) +#define GLPRT_PTC1522H(_i) (0x00380CC4 + ((_i) * 8)) +#define GLPRT_PTC1522L(_i) (0x00380CC0 + ((_i) * 8)) +#define GLPRT_PTC255H(_i) (0x00380C04 + ((_i) * 8)) +#define GLPRT_PTC255L(_i) (0x00380C00 + ((_i) * 8)) +#define GLPRT_PTC511H(_i) (0x00380C44 + ((_i) * 8)) +#define GLPRT_PTC511L(_i) (0x00380C40 + ((_i) * 8)) +#define GLPRT_PTC64H(_i) (0x00380B84 + ((_i) * 8)) +#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8)) +#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8)) +#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8)) +#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8)) +#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8)) +#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8)) +#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8)) +#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8)) +#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8)) +#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8)) +#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8)) +#define GLPRT_UPTCH(_i) (0x003811C4 + ((_i) * 8)) +#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) +#define GLV_BPRCH(_i) (0x003B6004 + ((_i) * 8)) +#define GLV_BPRCL(_i) (0x003B6000 + ((_i) * 8)) +#define GLV_BPTCH(_i) (0x0030E004 + ((_i) * 8)) +#define GLV_BPTCL(_i) (0x0030E000 + ((_i) * 8)) +#define GLV_GORCH(_i) (0x003B0004 + ((_i) * 8)) +#define GLV_GORCL(_i) (0x003B0000 + ((_i) * 8)) +#define GLV_GOTCH(_i) (0x00300004 + ((_i) * 8)) +#define GLV_GOTCL(_i) (0x00300000 + ((_i) * 8)) +#define GLV_MPRCH(_i) (0x003B4004 + ((_i) * 8)) +#define GLV_MPRCL(_i) (0x003B4000 + ((_i) * 8)) +#define GLV_MPTCH(_i) (0x0030C004 + ((_i) * 8)) +#define GLV_MPTCL(_i) (0x0030C000 + ((_i) * 8)) +#define GLV_RDPC(_i) (0x00294C04 + ((_i) * 4)) +#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) +#define GLV_UPRCH(_i) (0x003B2004 + ((_i) * 8)) +#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) +#define GLV_UPTCH(_i) (0x0030A004 + ((_i) * 8)) +#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) +#define VSIQF_HKEY_MAX_INDEX 12 #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 068dbc740b76..94504023d86e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -188,23 +188,25 @@ struct ice_32b_rx_flex_desc_nic { * with a specific metadata (profile 7 reserved for HW) */ enum ice_rxdid { - ICE_RXDID_START = 0, - ICE_RXDID_LEGACY_0 = ICE_RXDID_START, - ICE_RXDID_LEGACY_1, - ICE_RXDID_FLX_START, - ICE_RXDID_FLEX_NIC = ICE_RXDID_FLX_START, - ICE_RXDID_FLX_LAST = 63, - ICE_RXDID_LAST = ICE_RXDID_FLX_LAST + ICE_RXDID_LEGACY_0 = 0, + ICE_RXDID_LEGACY_1 = 1, + ICE_RXDID_FLEX_NIC = 2, + ICE_RXDID_FLEX_NIC_2 = 6, + ICE_RXDID_HW = 7, + ICE_RXDID_LAST = 63, }; /* Receive Flex Descriptor Rx opcode values */ #define ICE_RX_OPC_MDID 0x01 /* Receive Descriptor MDID values */ -#define ICE_RX_MDID_FLOW_ID_LOWER 5 -#define ICE_RX_MDID_FLOW_ID_HIGH 6 -#define ICE_RX_MDID_HASH_LOW 56 -#define ICE_RX_MDID_HASH_HIGH 57 +enum ice_flex_rx_mdid { + ICE_RX_MDID_FLOW_ID_LOWER = 5, + ICE_RX_MDID_FLOW_ID_HIGH, + ICE_RX_MDID_SRC_VSI = 19, + ICE_RX_MDID_HASH_LOW = 56, + ICE_RX_MDID_HASH_HIGH, +}; /* Rx Flag64 packet flag bits */ enum ice_rx_flg64_bits { diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f1e80eed2fd6..1b49a605d094 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -7,7 +7,7 @@ #include "ice.h" -#define DRV_VERSION "ice-0.7.0-k" +#define DRV_VERSION "0.7.1-k" #define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver" const char ice_drv_ver[] = DRV_VERSION; static const char ice_driver_string[] = DRV_SUMMARY; @@ -32,10 +32,86 @@ static const struct net_device_ops ice_netdev_ops; static void ice_pf_dis_all_vsi(struct ice_pf *pf); static void ice_rebuild(struct ice_pf *pf); static int ice_vsi_release(struct ice_vsi *vsi); +static void ice_vsi_release_all(struct ice_pf *pf); static void ice_update_vsi_stats(struct ice_vsi *vsi); static void ice_update_pf_stats(struct ice_pf *pf); /** + * ice_get_tx_pending - returns number of Tx descriptors not processed + * @ring: the ring of descriptors + */ +static u32 ice_get_tx_pending(struct ice_ring *ring) +{ + u32 head, tail; + + head = ring->next_to_clean; + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + return 0; +} + +/** + * ice_check_for_hang_subtask - check for and recover hung queues + * @pf: pointer to PF struct + */ +static void ice_check_for_hang_subtask(struct ice_pf *pf) +{ + struct ice_vsi *vsi = NULL; + unsigned int i; + u32 v, v_idx; + int packets; + + ice_for_each_vsi(pf, v) + if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { + vsi = pf->vsi[v]; + break; + } + + if (!vsi || test_bit(__ICE_DOWN, vsi->state)) + return; + + if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) + return; + + for (i = 0; i < vsi->num_txq; i++) { + struct ice_ring *tx_ring = vsi->tx_rings[i]; + + if (tx_ring && tx_ring->desc) { + int itr = ICE_ITR_NONE; + + /* If packet counter has not changed the queue is + * likely stalled, so force an interrupt for this + * queue. + * + * prev_pkt would be negative if there was no + * pending work. + */ + packets = tx_ring->stats.pkts & INT_MAX; + if (tx_ring->tx_stats.prev_pkt == packets) { + /* Trigger sw interrupt to revive the queue */ + v_idx = tx_ring->q_vector->v_idx; + wr32(&vsi->back->hw, + GLINT_DYN_CTL(vsi->base_vector + v_idx), + (itr << GLINT_DYN_CTL_ITR_INDX_S) | + GLINT_DYN_CTL_SWINT_TRIG_M | + GLINT_DYN_CTL_INTENA_MSK_M); + continue; + } + + /* Memory barrier between read of packet count and call + * to ice_get_tx_pending() + */ + smp_rmb(); + tx_ring->tx_stats.prev_pkt = + ice_get_tx_pending(tx_ring) ? packets : -1; + } + } +} + +/** * ice_get_free_slot - get the next non-NULL location index in array * @array: array to search * @size: size of the array @@ -274,6 +350,63 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi) } /** + * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI + * @vsi: VSI to enable or disable VLAN pruning on + * @ena: set to true to enable VLAN pruning and false to disable it + * + * returns 0 if VSI is updated, negative otherwise + */ +static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena) +{ + struct ice_vsi_ctx *ctxt; + struct device *dev; + int status; + + if (!vsi) + return -EINVAL; + + dev = &vsi->back->pdev->dev; + ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ctxt->info = vsi->info; + + if (ena) { + ctxt->info.sec_flags |= + ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S; + ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } else { + ctxt->info.sec_flags &= + ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << + ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S); + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + } + + ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + ctxt->vsi_num = vsi->vsi_num; + status = ice_aq_update_vsi(&vsi->back->hw, ctxt, NULL); + if (status) { + netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI %d failed, err = %d, aq_err = %d\n", + ena ? "Ena" : "Dis", vsi->vsi_num, status, + vsi->back->hw.adminq.sq_last_status); + goto err_out; + } + + vsi->info.sec_flags = ctxt->info.sec_flags; + vsi->info.sw_flags2 = ctxt->info.sw_flags2; + + devm_kfree(dev, ctxt); + return 0; + +err_out: + devm_kfree(dev, ctxt); + return -EIO; +} + +/** * ice_vsi_sync_fltr - Update the VSI filter list to the HW * @vsi: ptr to the VSI * @@ -456,23 +589,13 @@ static void ice_prepare_for_reset(struct ice_pf *pf) { struct ice_hw *hw = &pf->hw; - u32 v; - - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - ice_remove_vsi_fltr(hw, pf->vsi[v]->vsi_num); - - dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); /* disable the VSIs and their queues that are not already DOWN */ - /* pf_dis_all_vsi modifies netdev structures -rtnl_lock needed */ ice_pf_dis_all_vsi(pf); - ice_for_each_vsi(pf, v) - if (pf->vsi[v]) - pf->vsi[v]->vsi_num = 0; - ice_shutdown_all_ctrlq(hw); + + set_bit(__ICE_PREPARED_FOR_RESET, pf->state); } /** @@ -490,26 +613,32 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) WARN_ON(in_interrupt()); /* PFR is a bit of a special case because it doesn't result in an OICR - * interrupt. So for PFR, we prepare for reset, issue the reset and - * rebuild sequentially. + * interrupt. Set pending bit here which otherwise gets set in the + * OICR handler. */ - if (reset_type == ICE_RESET_PFR) { + if (reset_type == ICE_RESET_PFR) set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); - ice_prepare_for_reset(pf); - } + + ice_prepare_for_reset(pf); /* trigger the reset */ if (ice_reset(hw, reset_type)) { dev_err(dev, "reset %d failed\n", reset_type); set_bit(__ICE_RESET_FAILED, pf->state); clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); return; } + /* PFR is a bit of a special case because it doesn't result in an OICR + * interrupt. So for PFR, rebuild after the reset and clear the reset- + * associated state bits. + */ if (reset_type == ICE_RESET_PFR) { pf->pfr_count++; ice_rebuild(pf); clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); } } @@ -519,48 +648,57 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type) */ static void ice_reset_subtask(struct ice_pf *pf) { - enum ice_reset_req reset_type; - - rtnl_lock(); + enum ice_reset_req reset_type = ICE_RESET_INVAL; /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an - * OICR interrupt. The OICR handler (ice_misc_intr) determines what - * type of reset happened and sets __ICE_RESET_RECOVERY_PENDING bit in - * pf->state. So if reset/recovery is pending (as indicated by this bit) - * we do a rebuild and return. + * OICR interrupt. The OICR handler (ice_misc_intr) determines what type + * of reset is pending and sets bits in pf->state indicating the reset + * type and __ICE_RESET_RECOVERY_PENDING. So, if the latter bit is set + * prepare for pending reset if not already (for PF software-initiated + * global resets the software should already be prepared for it as + * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated + * by firmware or software on other PFs, that bit is not set so prepare + * for the reset now), poll for reset done, rebuild and return. */ if (ice_is_reset_recovery_pending(pf->state)) { clear_bit(__ICE_GLOBR_RECV, pf->state); clear_bit(__ICE_CORER_RECV, pf->state); - ice_prepare_for_reset(pf); + if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) + ice_prepare_for_reset(pf); /* make sure we are ready to rebuild */ - if (ice_check_reset(&pf->hw)) + if (ice_check_reset(&pf->hw)) { set_bit(__ICE_RESET_FAILED, pf->state); - else + } else { + /* done with reset. start rebuild */ + pf->hw.reset_ongoing = false; ice_rebuild(pf); - clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); - goto unlock; + /* clear bit to resume normal operations, but + * ICE_NEEDS_RESTART bit is set incase rebuild failed + */ + clear_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__ICE_PREPARED_FOR_RESET, pf->state); + } + + return; } /* No pending resets to finish processing. Check for new resets */ + if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) + reset_type = ICE_RESET_PFR; + if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) + reset_type = ICE_RESET_CORER; if (test_and_clear_bit(__ICE_GLOBR_REQ, pf->state)) reset_type = ICE_RESET_GLOBR; - else if (test_and_clear_bit(__ICE_CORER_REQ, pf->state)) - reset_type = ICE_RESET_CORER; - else if (test_and_clear_bit(__ICE_PFR_REQ, pf->state)) - reset_type = ICE_RESET_PFR; - else - goto unlock; + /* If no valid reset type requested just return */ + if (reset_type == ICE_RESET_INVAL) + return; - /* reset if not already down or resetting */ + /* reset if not already down or busy */ if (!test_bit(__ICE_DOWN, pf->state) && !test_bit(__ICE_CFG_BUSY, pf->state)) { ice_do_reset(pf, reset_type); } - -unlock: - rtnl_unlock(); } /** @@ -903,6 +1041,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) dev_err(&pf->pdev->dev, "Could not handle link event\n"); break; + case ice_aqc_opc_fw_logging: + ice_output_fw_log(hw, &event.desc, event.msg_buf); + break; default: dev_dbg(&pf->pdev->dev, "%s Receive Queue unknown event 0x%04x ignored\n", @@ -966,8 +1107,9 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf) */ static void ice_service_task_schedule(struct ice_pf *pf) { - if (!test_bit(__ICE_DOWN, pf->state) && - !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state)) + if (!test_bit(__ICE_SERVICE_DIS, pf->state) && + !test_and_set_bit(__ICE_SERVICE_SCHED, pf->state) && + !test_bit(__ICE_NEEDS_RESTART, pf->state)) queue_work(ice_wq, &pf->serv_task); } @@ -985,6 +1127,22 @@ static void ice_service_task_complete(struct ice_pf *pf) } /** + * ice_service_task_stop - stop service task and cancel works + * @pf: board private structure + */ +static void ice_service_task_stop(struct ice_pf *pf) +{ + set_bit(__ICE_SERVICE_DIS, pf->state); + + if (pf->serv_tmr.function) + del_timer_sync(&pf->serv_tmr); + if (pf->serv_task.func) + cancel_work_sync(&pf->serv_task); + + clear_bit(__ICE_SERVICE_SCHED, pf->state); +} + +/** * ice_service_timer - timer callback to schedule service task * @t: pointer to timer_list */ @@ -997,6 +1155,114 @@ static void ice_service_timer(struct timer_list *t) } /** + * ice_handle_mdd_event - handle malicious driver detect event + * @pf: pointer to the PF structure + * + * Called from service task. OICR interrupt handler indicates MDD event + */ +static void ice_handle_mdd_event(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + bool mdd_detected = false; + u32 reg; + + if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state)) + return; + + /* find what triggered the MDD event */ + reg = rd32(hw, GL_MDET_TX_PQM); + if (reg & GL_MDET_TX_PQM_VALID_M) { + u8 pf_num = (reg & GL_MDET_TX_PQM_PF_NUM_M) >> + GL_MDET_TX_PQM_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_TX_PQM_VF_NUM_M) >> + GL_MDET_TX_PQM_VF_NUM_S; + u8 event = (reg & GL_MDET_TX_PQM_MAL_TYPE_M) >> + GL_MDET_TX_PQM_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_TX_PQM_QNUM_M) >> + GL_MDET_TX_PQM_QNUM_S); + + if (netif_msg_tx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_TX_PQM, 0xffffffff); + mdd_detected = true; + } + + reg = rd32(hw, GL_MDET_TX_TCLAN); + if (reg & GL_MDET_TX_TCLAN_VALID_M) { + u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >> + GL_MDET_TX_TCLAN_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_TX_TCLAN_VF_NUM_M) >> + GL_MDET_TX_TCLAN_VF_NUM_S; + u8 event = (reg & GL_MDET_TX_TCLAN_MAL_TYPE_M) >> + GL_MDET_TX_TCLAN_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >> + GL_MDET_TX_TCLAN_QNUM_S); + + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff); + mdd_detected = true; + } + + reg = rd32(hw, GL_MDET_RX); + if (reg & GL_MDET_RX_VALID_M) { + u8 pf_num = (reg & GL_MDET_RX_PF_NUM_M) >> + GL_MDET_RX_PF_NUM_S; + u16 vf_num = (reg & GL_MDET_RX_VF_NUM_M) >> + GL_MDET_RX_VF_NUM_S; + u8 event = (reg & GL_MDET_RX_MAL_TYPE_M) >> + GL_MDET_RX_MAL_TYPE_S; + u16 queue = ((reg & GL_MDET_RX_QNUM_M) >> + GL_MDET_RX_QNUM_S); + + if (netif_msg_rx_err(pf)) + dev_info(&pf->pdev->dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", + event, queue, pf_num, vf_num); + wr32(hw, GL_MDET_RX, 0xffffffff); + mdd_detected = true; + } + + if (mdd_detected) { + bool pf_mdd_detected = false; + + reg = rd32(hw, PF_MDET_TX_PQM); + if (reg & PF_MDET_TX_PQM_VALID_M) { + wr32(hw, PF_MDET_TX_PQM, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + + reg = rd32(hw, PF_MDET_TX_TCLAN); + if (reg & PF_MDET_TX_TCLAN_VALID_M) { + wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF); + dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + + reg = rd32(hw, PF_MDET_RX); + if (reg & PF_MDET_RX_VALID_M) { + wr32(hw, PF_MDET_RX, 0xFFFF); + dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); + pf_mdd_detected = true; + } + /* Queue belongs to the PF initiate a reset */ + if (pf_mdd_detected) { + set_bit(__ICE_NEEDS_RESTART, pf->state); + ice_service_task_schedule(pf); + } + } + + /* re-enable MDD interrupt cause */ + clear_bit(__ICE_MDD_EVENT_PENDING, pf->state); + reg = rd32(hw, PFINT_OICR_ENA); + reg |= PFINT_OICR_MAL_DETECT_M; + wr32(hw, PFINT_OICR_ENA, reg); + ice_flush(hw); +} + +/** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct */ @@ -1010,14 +1276,17 @@ static void ice_service_task(struct work_struct *work) /* process reset requests first */ ice_reset_subtask(pf); - /* bail if a reset/recovery cycle is pending */ + /* bail if a reset/recovery cycle is pending or rebuild failed */ if (ice_is_reset_recovery_pending(pf->state) || - test_bit(__ICE_SUSPENDED, pf->state)) { + test_bit(__ICE_SUSPENDED, pf->state) || + test_bit(__ICE_NEEDS_RESTART, pf->state)) { ice_service_task_complete(pf); return; } + ice_check_for_hang_subtask(pf); ice_sync_fltr_subtask(pf); + ice_handle_mdd_event(pf); ice_watchdog_subtask(pf); ice_clean_adminq_subtask(pf); @@ -1029,6 +1298,7 @@ static void ice_service_task(struct work_struct *work) * schedule the service task now. */ if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || + test_bit(__ICE_MDD_EVENT_PENDING, pf->state) || test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state)) mod_timer(&pf->serv_tmr, jiffies); } @@ -1157,7 +1427,7 @@ static void ice_vsi_delete(struct ice_vsi *vsi) memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props)); - status = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); + status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL); if (status) dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n", vsi->vsi_num); @@ -1420,13 +1690,13 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) } /** - * ice_vsi_add - Create a new VSI or fetch preallocated VSI + * ice_vsi_init - Create and initialize a VSI * @vsi: the VSI being configured * * This initializes a VSI context depending on the VSI type to be added and * passes it down to the add_vsi aq command to create a new VSI. */ -static int ice_vsi_add(struct ice_vsi *vsi) +static int ice_vsi_init(struct ice_vsi *vsi) { struct ice_vsi_ctx ctxt = { 0 }; struct ice_pf *pf = vsi->back; @@ -1453,13 +1723,17 @@ static int ice_vsi_add(struct ice_vsi *vsi) ctxt.info.sw_id = vsi->port_info->sw_id; ice_vsi_setup_q_map(vsi, &ctxt); - ret = ice_aq_add_vsi(hw, &ctxt, NULL); + ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL); if (ret) { - dev_err(&vsi->back->pdev->dev, - "Add VSI AQ call failed, err %d\n", ret); + dev_err(&pf->pdev->dev, + "Add VSI failed, err %d\n", ret); return -EIO; } + + /* keep context for update VSI operations */ vsi->info = ctxt.info; + + /* record VSI number returned */ vsi->vsi_num = ctxt.vsi_num; return ret; @@ -1735,8 +2009,14 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); + if (oicr & PFINT_OICR_MAL_DETECT_M) { + ena_mask &= ~PFINT_OICR_MAL_DETECT_M; + set_bit(__ICE_MDD_EVENT_PENDING, pf->state); + } + if (oicr & PFINT_OICR_GRST_M) { u32 reset; + /* we have a reset warning */ ena_mask &= ~PFINT_OICR_GRST_M; reset = (rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >> @@ -1754,7 +2034,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) * We also make note of which reset happened so that peer * devices/drivers can be informed. */ - if (!test_bit(__ICE_RESET_RECOVERY_PENDING, pf->state)) { + if (!test_and_set_bit(__ICE_RESET_RECOVERY_PENDING, + pf->state)) { if (reset == ICE_RESET_CORER) set_bit(__ICE_CORER_RECV, pf->state); else if (reset == ICE_RESET_GLOBR) @@ -1762,7 +2043,20 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) else set_bit(__ICE_EMPR_RECV, pf->state); - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* There are couple of different bits at play here. + * hw->reset_ongoing indicates whether the hardware is + * in reset. This is set to true when a reset interrupt + * is received and set back to false after the driver + * has determined that the hardware is out of reset. + * + * __ICE_RESET_RECOVERY_PENDING in pf->state indicates + * that a post reset rebuild is required before the + * driver is operational again. This is set above. + * + * As this is the start of the reset/rebuild cycle, set + * both to indicate that. + */ + hw->reset_ongoing = true; } } @@ -2635,14 +2929,12 @@ ice_vsi_cfg_rss_exit: } /** - * ice_vsi_reinit_setup - return resource and reallocate resource for a VSI - * @vsi: pointer to the ice_vsi - * - * This reallocates the VSIs queue resources + * ice_vsi_rebuild - Rebuild VSI after reset + * @vsi: vsi to be rebuild * * Returns 0 on success and negative value on failure */ -static int ice_vsi_reinit_setup(struct ice_vsi *vsi) +static int ice_vsi_rebuild(struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; int ret, i; @@ -2658,7 +2950,7 @@ static int ice_vsi_reinit_setup(struct ice_vsi *vsi) ice_vsi_set_num_qs(vsi); /* Initialize VSI struct elements and create VSI in FW */ - ret = ice_vsi_add(vsi); + ret = ice_vsi_init(vsi); if (ret < 0) goto err_vsi; @@ -2668,19 +2960,7 @@ static int ice_vsi_reinit_setup(struct ice_vsi *vsi) switch (vsi->type) { case ICE_VSI_PF: - if (!vsi->netdev) { - ret = ice_cfg_netdev(vsi); - if (ret) - goto err_rings; - - ret = register_netdev(vsi->netdev); - if (ret) - goto err_rings; - - netif_carrier_off(vsi->netdev); - netif_tx_stop_all_queues(vsi->netdev); - } - + /* fall through */ ret = ice_vsi_alloc_q_vectors(vsi); if (ret) goto err_rings; @@ -2732,21 +3012,23 @@ err_vsi: /** * ice_vsi_setup - Set up a VSI by a given type * @pf: board private structure - * @type: VSI type * @pi: pointer to the port_info instance + * @type: VSI type + * @vf_id: defines VF id to which this VSI connects. This field is meant to be + * used only for ICE_VSI_VF VSI type. For other VSI types, should + * fill-in ICE_INVAL_VFID as input. * * This allocates the sw VSI structure and its queue resources. * - * Returns pointer to the successfully allocated and configure VSI sw struct on - * success, otherwise returns NULL on failure. + * Returns pointer to the successfully allocated and configured VSI sw struct on + * success, NULL on failure. */ static struct ice_vsi * -ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type, - struct ice_port_info *pi) +ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, + enum ice_vsi_type type, u16 __always_unused vf_id) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct device *dev = &pf->pdev->dev; - struct ice_vsi_ctx ctxt = { 0 }; struct ice_vsi *vsi; int ret, i; @@ -2769,12 +3051,10 @@ ice_vsi_setup(struct ice_pf *pf, enum ice_vsi_type type, ice_vsi_set_rss_params(vsi); /* create the VSI */ - ret = ice_vsi_add(vsi); + ret = ice_vsi_init(vsi); if (ret) goto err_vsi; - ctxt.vsi_num = vsi->vsi_num; - switch (vsi->type) { case ICE_VSI_PF: ret = ice_cfg_netdev(vsi); @@ -2843,10 +3123,7 @@ err_register_netdev: vsi->netdev = NULL; } err_cfg_netdev: - ret = ice_aq_free_vsi(&pf->hw, &ctxt, false, NULL); - if (ret) - dev_err(&vsi->back->pdev->dev, - "Free VSI AQ call failed, err %d\n", ret); + ice_vsi_delete(vsi); err_vsi: ice_vsi_put_qs(vsi); err_get_qs: @@ -2858,6 +3135,20 @@ err_get_qs: } /** + * ice_pf_vsi_setup - Set up a PF VSI + * @pf: board private structure + * @pi: pointer to the port_info instance + * + * Returns pointer to the successfully allocated VSI sw struct on success, + * otherwise returns NULL on failure. + */ +static struct ice_vsi * +ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) +{ + return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID); +} + +/** * ice_vsi_add_vlan - Add vsi membership for given vlan * @vsi: the vsi being configured * @vid: vlan id to be added @@ -2908,7 +3199,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; - int ret = 0; + int ret; if (vid >= VLAN_N_VID) { netdev_err(netdev, "VLAN id requested %d is out of range %d\n", @@ -2919,6 +3210,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, if (vsi->info.pvid) return -EINVAL; + /* Enable VLAN pruning when VLAN 0 is added */ + if (unlikely(!vid)) { + ret = ice_cfg_vlan_pruning(vsi, true); + if (ret) + return ret; + } + /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is * needed to continue allowing all untagged packets since VLAN prune * list is applied to all packets by the switch @@ -2935,16 +3233,19 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev, * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN * @vsi: the VSI being configured * @vid: VLAN id to be removed + * + * Returns 0 on success and negative on failure */ -static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) +static int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) { struct ice_fltr_list_entry *list; struct ice_pf *pf = vsi->back; LIST_HEAD(tmp_add_list); + int status = 0; list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL); if (!list) - return; + return -ENOMEM; list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN; list->fltr_info.fwd_id.vsi_id = vsi->vsi_num; @@ -2956,11 +3257,14 @@ static void ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) INIT_LIST_HEAD(&list->list_entry); list_add(&list->list_entry, &tmp_add_list); - if (ice_remove_vlan(&pf->hw, &tmp_add_list)) + if (ice_remove_vlan(&pf->hw, &tmp_add_list)) { dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n", vid, vsi->vsi_num); + status = -EIO; + } ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); + return status; } /** @@ -2976,19 +3280,25 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev, { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; + int status; if (vsi->info.pvid) return -EINVAL; - /* return code is ignored as there is nothing a user - * can do about failure to remove and a log message was - * already printed from the other function + /* Make sure ice_vsi_kill_vlan is successful before updating VLAN + * information */ - ice_vsi_kill_vlan(vsi, vid); + status = ice_vsi_kill_vlan(vsi, vid); + if (status) + return status; clear_bit(vid, vsi->active_vlans); - return 0; + /* Disable VLAN pruning when VLAN 0 is removed */ + if (unlikely(!vid)) + status = ice_cfg_vlan_pruning(vsi, false); + + return status; } /** @@ -3004,50 +3314,48 @@ static int ice_setup_pf_sw(struct ice_pf *pf) struct ice_vsi *vsi; int status = 0; - if (!ice_is_reset_recovery_pending(pf->state)) { - vsi = ice_vsi_setup(pf, ICE_VSI_PF, pf->hw.port_info); - if (!vsi) { - status = -ENOMEM; - goto error_exit; - } - } else { - vsi = pf->vsi[0]; - status = ice_vsi_reinit_setup(vsi); - if (status < 0) - return -EIO; + if (ice_is_reset_recovery_pending(pf->state)) + return -EBUSY; + + vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); + if (!vsi) { + status = -ENOMEM; + goto unroll_vsi_setup; } - /* tmp_add_list contains a list of MAC addresses for which MAC - * filters need to be programmed. Add the VSI's unicast MAC to - * this list + /* To add a MAC filter, first add the MAC to a list and then + * pass the list to ice_add_mac. */ + + /* Add a unicast MAC filter so the VSI can get its packets */ status = ice_add_mac_to_list(vsi, &tmp_add_list, vsi->port_info->mac.perm_addr); if (status) - goto error_exit; + goto unroll_vsi_setup; /* VSI needs to receive broadcast traffic, so add the broadcast - * MAC address to the list. + * MAC address to the list as well. */ eth_broadcast_addr(broadcast); status = ice_add_mac_to_list(vsi, &tmp_add_list, broadcast); if (status) - goto error_exit; + goto free_mac_list; /* program MAC filters for entries in tmp_add_list */ status = ice_add_mac(&pf->hw, &tmp_add_list); if (status) { dev_err(&pf->pdev->dev, "Could not add MAC filters\n"); status = -ENOMEM; - goto error_exit; + goto free_mac_list; } ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); return status; -error_exit: +free_mac_list: ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); +unroll_vsi_setup: if (vsi) { ice_vsi_free_q_vectors(vsi); if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED) @@ -3097,10 +3405,7 @@ static void ice_determine_q_usage(struct ice_pf *pf) */ static void ice_deinit_pf(struct ice_pf *pf) { - if (pf->serv_tmr.function) - del_timer_sync(&pf->serv_tmr); - if (pf->serv_task.func) - cancel_work_sync(&pf->serv_task); + ice_service_task_stop(pf); mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->avail_q_mutex); } @@ -3307,6 +3612,8 @@ static int ice_probe(struct pci_dev *pdev, pf->pdev = pdev; pci_set_drvdata(pdev, pf); set_bit(__ICE_DOWN, pf->state); + /* Disable service task until DOWN bit is cleared */ + set_bit(__ICE_SERVICE_DIS, pf->state); hw = &pf->hw; hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; @@ -3364,6 +3671,9 @@ static int ice_probe(struct pci_dev *pdev, goto err_init_interrupt_unroll; } + /* Driver is mostly up */ + clear_bit(__ICE_DOWN, pf->state); + /* In case of MSIX we are going to setup the misc vector right here * to handle admin queue events etc. In case of legacy and MSI * the misc functionality and queue processing is combined in @@ -3386,7 +3696,11 @@ static int ice_probe(struct pci_dev *pdev, goto err_msix_misc_unroll; } - pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + if (hw->evb_veb) + pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; + else + pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; + pf->first_sw->pf = pf; /* record the sw_id available for later use */ @@ -3399,8 +3713,7 @@ static int ice_probe(struct pci_dev *pdev, goto err_alloc_sw_unroll; } - /* Driver is mostly up */ - clear_bit(__ICE_DOWN, pf->state); + clear_bit(__ICE_SERVICE_DIS, pf->state); /* since everything is good, start the service timer */ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); @@ -3414,6 +3727,7 @@ static int ice_probe(struct pci_dev *pdev, return 0; err_alloc_sw_unroll: + set_bit(__ICE_SERVICE_DIS, pf->state); set_bit(__ICE_DOWN, pf->state); devm_kfree(&pf->pdev->dev, pf->first_sw); err_msix_misc_unroll: @@ -3436,24 +3750,14 @@ err_exit_unroll: static void ice_remove(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); - int i = 0; - int err; if (!pf) return; set_bit(__ICE_DOWN, pf->state); + ice_service_task_stop(pf); - for (i = 0; i < pf->num_alloc_vsi; i++) { - if (!pf->vsi[i]) - continue; - - err = ice_vsi_release(pf->vsi[i]); - if (err) - dev_dbg(&pf->pdev->dev, "Failed to release VSI index %d (err %d)\n", - i, err); - } - + ice_vsi_release_all(pf); ice_free_irq_msix_misc(pf); ice_clear_interrupt_scheme(pf); ice_deinit_pf(pf); @@ -3500,7 +3804,7 @@ static int __init ice_module_init(void) pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver); pr_info("%s\n", ice_copyright); - ice_wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; @@ -4185,7 +4489,14 @@ static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) } status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, NULL); - if (status) { + /* if the disable queue command was exercised during an active reset + * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as + * the reset operation disables queues at the hardware level anyway. + */ + if (status == ICE_ERR_RESET_ONGOING) { + dev_dbg(&pf->pdev->dev, + "Reset in progress. LAN Tx queues already disabled\n"); + } else if (status) { dev_err(&pf->pdev->dev, "Failed to disable LAN Tx queues, error: %d\n", status); @@ -5080,8 +5391,14 @@ static int ice_vsi_release(struct ice_vsi *vsi) if (!vsi->back) return -ENODEV; pf = vsi->back; - - if (vsi->netdev) { + /* do not unregister and free netdevs while driver is in the reset + * recovery pending state. Since reset/rebuild happens through PF + * service task workqueue, its not a good idea to unregister netdev + * that is associated to the PF that is running the work queue items + * currently. This is done to avoid check_flush_dependency() warning + * on this wq + */ + if (vsi->netdev && !ice_is_reset_recovery_pending(pf->state)) { unregister_netdev(vsi->netdev); free_netdev(vsi->netdev); vsi->netdev = NULL; @@ -5107,12 +5424,40 @@ static int ice_vsi_release(struct ice_vsi *vsi) pf->q_left_tx += vsi->alloc_txq; pf->q_left_rx += vsi->alloc_rxq; - ice_vsi_clear(vsi); + /* retain SW VSI data structure since it is needed to unregister and + * free VSI netdev when PF is not in reset recovery pending state,\ + * for ex: during rmmod. + */ + if (!ice_is_reset_recovery_pending(pf->state)) + ice_vsi_clear(vsi); return 0; } /** + * ice_vsi_release_all - Delete all VSIs + * @pf: PF from which all VSIs are being removed + */ +static void ice_vsi_release_all(struct ice_pf *pf) +{ + int err, i; + + if (!pf->vsi) + return; + + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (!pf->vsi[i]) + continue; + + err = ice_vsi_release(pf->vsi[i]); + if (err) + dev_dbg(&pf->pdev->dev, + "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", + i, err, pf->vsi[i]->vsi_num); + } +} + +/** * ice_dis_vsi - pause a VSI * @vsi: the VSI being paused */ @@ -5124,27 +5469,31 @@ static void ice_dis_vsi(struct ice_vsi *vsi) set_bit(__ICE_NEEDS_RESTART, vsi->state); if (vsi->netdev && netif_running(vsi->netdev) && - vsi->type == ICE_VSI_PF) + vsi->type == ICE_VSI_PF) { + rtnl_lock(); vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - - ice_vsi_close(vsi); + rtnl_unlock(); + } else { + ice_vsi_close(vsi); + } } /** * ice_ena_vsi - resume a VSI * @vsi: the VSI being resume */ -static void ice_ena_vsi(struct ice_vsi *vsi) +static int ice_ena_vsi(struct ice_vsi *vsi) { - if (!test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) - return; + int err = 0; + + if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state)) + if (vsi->netdev && netif_running(vsi->netdev)) { + rtnl_lock(); + err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); + rtnl_unlock(); + } - if (vsi->netdev && netif_running(vsi->netdev)) - vsi->netdev->netdev_ops->ndo_open(vsi->netdev); - else if (ice_vsi_open(vsi)) - /* this clears the DOWN bit */ - dev_dbg(&vsi->back->pdev->dev, "Failed open VSI 0x%04X on switch 0x%04X\n", - vsi->vsi_num, vsi->vsw->sw_id); + return err; } /** @@ -5164,13 +5513,47 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf) * ice_pf_ena_all_vsi - Resume all VSIs on a PF * @pf: the PF */ -static void ice_pf_ena_all_vsi(struct ice_pf *pf) +static int ice_pf_ena_all_vsi(struct ice_pf *pf) { int v; ice_for_each_vsi(pf, v) if (pf->vsi[v]) - ice_ena_vsi(pf->vsi[v]); + if (ice_ena_vsi(pf->vsi[v])) + return -EIO; + + return 0; +} + +/** + * ice_vsi_rebuild_all - rebuild all VSIs in pf + * @pf: the PF + */ +static int ice_vsi_rebuild_all(struct ice_pf *pf) +{ + int i; + + /* loop through pf->vsi array and reinit the VSI if found */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + int err; + + if (!pf->vsi[i]) + continue; + + err = ice_vsi_rebuild(pf->vsi[i]); + if (err) { + dev_err(&pf->pdev->dev, + "VSI at index %d rebuild failed\n", + pf->vsi[i]->idx); + return err; + } + + dev_info(&pf->pdev->dev, + "VSI at index %d rebuilt. vsi_num = 0x%x\n", + pf->vsi[i]->idx, pf->vsi[i]->vsi_num); + } + + return 0; } /** @@ -5192,13 +5575,13 @@ static void ice_rebuild(struct ice_pf *pf) ret = ice_init_all_ctrlq(hw); if (ret) { dev_err(dev, "control queues init failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; } ret = ice_clear_pf_cfg(hw); if (ret) { dev_err(dev, "clear PF configuration failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; } ice_clear_pxe_mode(hw); @@ -5206,14 +5589,24 @@ static void ice_rebuild(struct ice_pf *pf) ret = ice_get_caps(hw); if (ret) { dev_err(dev, "ice_get_caps failed %d\n", ret); - goto fail_reset; + goto err_init_ctrlq; } - /* basic nic switch setup */ - err = ice_setup_pf_sw(pf); + err = ice_sched_init_port(hw->port_info); + if (err) + goto err_sched_init_port; + + err = ice_vsi_rebuild_all(pf); if (err) { - dev_err(dev, "ice_setup_pf_sw failed\n"); - goto fail_reset; + dev_err(dev, "ice_vsi_rebuild_all failed\n"); + goto err_vsi_rebuild; + } + + ret = ice_replay_all_fltr(&pf->hw); + if (ret) { + dev_err(&pf->pdev->dev, + "error replaying switch filter rules\n"); + goto err_vsi_rebuild; } /* start misc vector */ @@ -5221,20 +5614,35 @@ static void ice_rebuild(struct ice_pf *pf) err = ice_req_irq_msix_misc(pf); if (err) { dev_err(dev, "misc vector setup failed: %d\n", err); - goto fail_reset; + goto err_vsi_rebuild; } } /* restart the VSIs that were rebuilt and running before the reset */ - ice_pf_ena_all_vsi(pf); + err = ice_pf_ena_all_vsi(pf); + if (err) { + dev_err(&pf->pdev->dev, "error enabling VSIs\n"); + /* no need to disable VSIs in tear down path in ice_rebuild() + * since its already taken care in ice_vsi_open() + */ + goto err_vsi_rebuild; + } + /* if we get here, reset flow is successful */ + clear_bit(__ICE_RESET_FAILED, pf->state); return; -fail_reset: +err_vsi_rebuild: + ice_vsi_release_all(pf); +err_sched_init_port: + ice_sched_cleanup_all(hw); +err_init_ctrlq: ice_shutdown_all_ctrlq(hw); set_bit(__ICE_RESET_FAILED, pf->state); clear_recovery: - set_bit(__ICE_RESET_RECOVERY_PENDING, pf->state); + /* set this bit in PF state to control service task scheduling */ + set_bit(__ICE_NEEDS_RESTART, pf->state); + dev_err(dev, "Rebuild failed, unload and reload driver\n"); } /** @@ -5390,6 +5798,232 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) } /** + * ice_bridge_getlink - Get the hardware bridge mode + * @skb: skb buff + * @pid: process id + * @seq: RTNL message seq + * @dev: the netdev being configured + * @filter_mask: filter mask passed in + * @nlflags: netlink flags passed in + * + * Return the bridge mode (VEB/VEPA) + */ +static int +ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, int nlflags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u16 bmode; + + bmode = pf->first_sw->bridge_mode; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bmode, 0, 0, nlflags, + filter_mask, NULL); +} + +/** + * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA) + * @vsi: Pointer to VSI structure + * @bmode: Hardware bridge mode (VEB/VEPA) + * + * Returns 0 on success, negative on failure + */ +static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) +{ + struct device *dev = &vsi->back->pdev->dev; + struct ice_aqc_vsi_props *vsi_props; + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx ctxt = { 0 }; + enum ice_status status; + + vsi_props = &vsi->info; + ctxt.info = vsi->info; + + if (bmode == BRIDGE_MODE_VEB) + /* change from VEPA to VEB mode */ + ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + else + /* change from VEB to VEPA mode */ + ctxt.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; + ctxt.vsi_num = vsi->vsi_num; + ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); + status = ice_aq_update_vsi(hw, &ctxt, NULL); + if (status) { + dev_err(dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n", + bmode, status, hw->adminq.sq_last_status); + return -EIO; + } + /* Update sw flags for book keeping */ + vsi_props->sw_flags = ctxt.info.sw_flags; + + return 0; +} + +/** + * ice_bridge_setlink - Set the hardware bridge mode + * @dev: the netdev being configured + * @nlh: RTNL message + * @flags: bridge setlink flags + * + * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is + * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if + * not already set for all VSIs connected to this switch. And also update the + * unicast switch filter rules for the corresponding switch of the netdev. + */ +static int +ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 __always_unused flags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_pf *pf = np->vsi->back; + struct nlattr *attr, *br_spec; + struct ice_hw *hw = &pf->hw; + enum ice_status status; + struct ice_sw *pf_sw; + int rem, v, err = 0; + + pf_sw = pf->first_sw; + /* find the attribute in the netlink message */ + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + mode = nla_get_u16(attr); + if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) + return -EINVAL; + /* Continue if bridge mode is not being flipped */ + if (mode == pf_sw->bridge_mode) + continue; + /* Iterates through the PF VSI list and update the loopback + * mode of the VSI + */ + ice_for_each_vsi(pf, v) { + if (!pf->vsi[v]) + continue; + err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); + if (err) + return err; + } + + hw->evb_veb = (mode == BRIDGE_MODE_VEB); + /* Update the unicast switch filter rules for the corresponding + * switch of the netdev + */ + status = ice_update_sw_rule_bridge_mode(hw); + if (status) { + netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n", + mode, status, hw->adminq.sq_last_status); + /* revert hw->evb_veb */ + hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); + return -EIO; + } + + pf_sw->bridge_mode = mode; + } + + return 0; +} + +/** + * ice_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void ice_tx_timeout(struct net_device *netdev) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_ring *tx_ring = NULL; + struct ice_vsi *vsi = np->vsi; + struct ice_pf *pf = vsi->back; + u32 head, val = 0, i; + int hung_queue = -1; + + pf->tx_timeout_count++; + + /* find the stopped queue the same way the stack does */ + for (i = 0; i < netdev->num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(netdev, i); + trans_start = q->trans_start; + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + netdev->watchdog_timeo))) { + hung_queue = i; + break; + } + } + + if (i == netdev->num_tx_queues) { + netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); + } else { + /* now that we have an index, find the tx_ring struct */ + for (i = 0; i < vsi->num_txq; i++) { + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { + if (hung_queue == + vsi->tx_rings[i]->q_index) { + tx_ring = vsi->tx_rings[i]; + break; + } + } + } + } + + /* Reset recovery level if enough time has elapsed after last timeout. + * Also ensure no new reset action happens before next timeout period. + */ + if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) + pf->tx_timeout_recovery_level = 1; + else if (time_before(jiffies, (pf->tx_timeout_last_recovery + + netdev->watchdog_timeo))) + return; + + if (tx_ring) { + head = tx_ring->next_to_clean; + /* Read interrupt register */ + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + val = rd32(&pf->hw, + GLINT_DYN_CTL(tx_ring->q_vector->v_idx + + tx_ring->vsi->base_vector - 1)); + + netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", + vsi->vsi_num, hung_queue, tx_ring->next_to_clean, + head, tx_ring->next_to_use, + readl(tx_ring->tail), val); + } + + pf->tx_timeout_last_recovery = jiffies; + netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n", + pf->tx_timeout_recovery_level, hung_queue); + + switch (pf->tx_timeout_recovery_level) { + case 1: + set_bit(__ICE_PFR_REQ, pf->state); + break; + case 2: + set_bit(__ICE_CORER_REQ, pf->state); + break; + case 3: + set_bit(__ICE_GLOBR_REQ, pf->state); + break; + default: + netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in unrecoverable state.\n"); + set_bit(__ICE_DOWN, pf->state); + set_bit(__ICE_NEEDS_RESTART, vsi->state); + set_bit(__ICE_SERVICE_DIS, pf->state); + break; + } + + ice_service_task_schedule(pf); + pf->tx_timeout_recovery_level++; +} + +/** * ice_open - Called when a network interface becomes active * @netdev: network interface device structure * @@ -5407,6 +6041,11 @@ static int ice_open(struct net_device *netdev) struct ice_vsi *vsi = np->vsi; int err; + if (test_bit(__ICE_NEEDS_RESTART, vsi->back->state)) { + netdev_err(netdev, "driver needs to be unloaded and reloaded\n"); + return -EIO; + } + netif_carrier_off(netdev); err = ice_vsi_open(vsi); @@ -5503,6 +6142,9 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, .ndo_set_features = ice_set_features, + .ndo_bridge_getlink = ice_bridge_getlink, + .ndo_bridge_setlink = ice_bridge_setlink, .ndo_fdb_add = ice_fdb_add, .ndo_fdb_del = ice_fdb_del, + .ndo_tx_timeout = ice_tx_timeout, }; diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index 295a8cd87fc1..3274c543283c 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -137,7 +137,7 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access) if (hw->nvm.blank_nvm_mode) return 0; - return ice_acquire_res(hw, ICE_NVM_RES_ID, access); + return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index eeae199469b6..9b7b50554952 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -17,7 +17,6 @@ ice_sched_add_root_node(struct ice_port_info *pi, { struct ice_sched_node *root; struct ice_hw *hw; - u16 max_children; if (!pi) return ICE_ERR_PARAM; @@ -28,8 +27,8 @@ ice_sched_add_root_node(struct ice_port_info *pi, if (!root) return ICE_ERR_NO_MEMORY; - max_children = le16_to_cpu(hw->layer_info[0].max_children); - root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, + /* coverity[suspicious_sizeof] */ + root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], sizeof(*root), GFP_KERNEL); if (!root->children) { devm_kfree(ice_hw_to_dev(hw), root); @@ -100,7 +99,6 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, struct ice_sched_node *parent; struct ice_sched_node *node; struct ice_hw *hw; - u16 max_children; if (!pi) return ICE_ERR_PARAM; @@ -120,9 +118,10 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); if (!node) return ICE_ERR_NO_MEMORY; - max_children = le16_to_cpu(hw->layer_info[layer].max_children); - if (max_children) { - node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, + if (hw->max_children[layer]) { + /* coverity[suspicious_sizeof] */ + node->children = devm_kcalloc(ice_hw_to_dev(hw), + hw->max_children[layer], sizeof(*node), GFP_KERNEL); if (!node->children) { devm_kfree(ice_hw_to_dev(hw), node); @@ -192,14 +191,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; + buf->hdr.parent_teid = parent->info.node_teid; buf->hdr.num_elems = cpu_to_le16(num_nodes); for (i = 0; i < num_nodes; i++) buf->teid[i] = cpu_to_le32(node_teids[i]); + status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, &num_groups_removed, NULL); if (status || num_groups_removed != 1) ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n"); + devm_kfree(ice_hw_to_dev(hw), buf); return status; } @@ -592,13 +594,16 @@ static void ice_sched_clear_port(struct ice_port_info *pi) */ void ice_sched_cleanup_all(struct ice_hw *hw) { - if (!hw || !hw->port_info) + if (!hw) return; - if (hw->layer_info) + if (hw->layer_info) { devm_kfree(ice_hw_to_dev(hw), hw->layer_info); + hw->layer_info = NULL; + } - ice_sched_clear_port(hw->port_info); + if (hw->port_info) + ice_sched_clear_port(hw->port_info); hw->num_tx_sched_layers = 0; hw->num_tx_sched_phys_layers = 0; @@ -671,9 +676,13 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, ICE_AQC_ELEM_VALID_EIR; buf->generic[i].data.generic = 0; buf->generic[i].data.cir_bw.bw_profile_idx = - ICE_SCHED_DFLT_RL_PROF_ID; + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->generic[i].data.cir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); buf->generic[i].data.eir_bw.bw_profile_idx = - ICE_SCHED_DFLT_RL_PROF_ID; + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); + buf->generic[i].data.eir_bw.bw_alloc = + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); } status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, @@ -697,7 +706,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, teid = le32_to_cpu(buf->generic[i].node_teid); new_node = ice_sched_find_node_by_teid(parent, teid); - if (!new_node) { ice_debug(hw, ICE_DBG_SCHED, "Node is missing for teid =%d\n", teid); @@ -710,7 +718,6 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, /* add it to previous node sibling pointer */ /* Note: siblings are not linked across branches */ prev = ice_sched_get_first_node(hw, tc_node, layer); - if (prev && prev != new_node) { while (prev->sibling) prev = prev->sibling; @@ -760,8 +767,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, return ICE_ERR_PARAM; /* max children per node per layer */ - max_child_nodes = - le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); + max_child_nodes = hw->max_children[parent->tx_sched_layer]; /* current number of children + required nodes exceed max children ? */ if ((parent->num_children + num_nodes) > max_child_nodes) { @@ -851,78 +857,6 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) } /** - * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer - * @pi: pointer to the port info struct - * @layer: layer number - * - * This function calculates the number of nodes present in the scheduler tree - * including all the branches for a given layer - */ -static u16 -ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer) -{ - struct ice_hw *hw; - u16 num_nodes = 0; - u8 i; - - if (!pi) - return num_nodes; - - hw = pi->hw; - - /* Calculate the number of nodes for all TCs */ - for (i = 0; i < pi->root->num_children; i++) { - struct ice_sched_node *tc_node, *node; - - tc_node = pi->root->children[i]; - - /* Get the first node */ - node = ice_sched_get_first_node(hw, tc_node, layer); - if (!node) - continue; - - /* count the siblings */ - while (node) { - num_nodes++; - node = node->sibling; - } - } - - return num_nodes; -} - -/** - * ice_sched_val_max_nodes - check max number of nodes reached or not - * @pi: port information structure - * @new_num_nodes_per_layer: pointer to the new number of nodes array - * - * This function checks whether the scheduler tree layers have enough space to - * add new nodes - */ -static enum ice_status -ice_sched_validate_for_max_nodes(struct ice_port_info *pi, - u16 *new_num_nodes_per_layer) -{ - struct ice_hw *hw = pi->hw; - u8 i, qg_layer; - u16 num_nodes; - - qg_layer = ice_sched_get_qgrp_layer(hw); - - /* walk through all the layers from SW entry point to qgroup layer */ - for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) { - num_nodes = ice_sched_get_num_nodes_per_layer(pi, i); - if (num_nodes + new_num_nodes_per_layer[i] > - le16_to_cpu(hw->layer_info[i].max_pf_nodes)) { - ice_debug(hw, ICE_DBG_SCHED, - "max nodes reached for layer = %d\n", i); - return ICE_ERR_CFG; - } - } - return 0; -} - -/** * ice_rm_dflt_leaf_node - remove the default leaf node in the tree * @pi: port information structure * @@ -1003,14 +937,12 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi) hw = pi->hw; /* Query the Default Topology from FW */ - buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES, - sizeof(*buf), GFP_KERNEL); + buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); if (!buf) return ICE_ERR_NO_MEMORY; /* Query default scheduling tree topology */ - status = ice_aq_get_dflt_topo(hw, pi->lport, buf, - sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES, + status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, &num_branches, NULL); if (status) goto err_init_port; @@ -1097,6 +1029,8 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) { struct ice_aqc_query_txsched_res_resp *buf; enum ice_status status = 0; + __le16 max_sibl; + u8 i; if (hw->layer_info) return status; @@ -1115,7 +1049,20 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw) hw->flattened_layers = buf->sched_props.flattening_bitmap; hw->max_cgds = buf->sched_props.max_pf_cgds; - hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, + /* max sibling group size of current layer refers to the max children + * of the below layer node. + * layer 1 node max children will be layer 2 max sibling group size + * layer 2 node max children will be layer 3 max sibling group size + * and so on. This array will be populated from root (index 0) to + * qgroup layer 7. Leaf node has no children. + */ + for (i = 0; i < hw->num_tx_sched_layers; i++) { + max_sibl = buf->layer_props[i].max_sibl_grp_sz; + hw->max_children[i] = le16_to_cpu(max_sibl); + } + + hw->layer_info = (struct ice_aqc_layer_props *) + devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, (hw->num_tx_sched_layers * sizeof(*hw->layer_info)), GFP_KERNEL); @@ -1202,7 +1149,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc, u8 qgrp_layer; qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); - max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children); + max_children = pi->hw->max_children[qgrp_layer]; list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id); if (!list_elem) @@ -1278,10 +1225,8 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes) /* calculate num nodes from q group to VSI layer */ for (i = qgl; i > vsil; i--) { - u16 max_children = le16_to_cpu(hw->layer_info[i].max_children); - /* round to the next integer if there is a remainder */ - num = DIV_ROUND_UP(num, max_children); + num = DIV_ROUND_UP(num, hw->max_children[i]); /* need at least one node */ num_nodes[i] = num ? num : 1; @@ -1311,16 +1256,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u16 num_added = 0; u8 i, qgl, vsil; - status = ice_sched_validate_for_max_nodes(pi, num_nodes); - if (status) - return status; - qgl = ice_sched_get_qgrp_layer(hw); vsil = ice_sched_get_vsi_layer(hw); parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id); for (i = vsil + 1; i <= qgl; i++) { if (!parent) return ICE_ERR_CFG; + status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, num_nodes[i], &first_node_teid, @@ -1398,8 +1340,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, struct ice_sched_node *tc_node, u16 *num_nodes) { struct ice_sched_node *node; - u16 max_child; - u8 i, vsil; + u8 vsil; + int i; vsil = ice_sched_get_vsi_layer(hw); for (i = vsil; i >= hw->sw_entry_point_layer; i--) @@ -1412,12 +1354,10 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, /* If intermediate nodes are reached max children * then add a new one. */ - node = ice_sched_get_first_node(hw, tc_node, i); - max_child = le16_to_cpu(hw->layer_info[i].max_children); - + node = ice_sched_get_first_node(hw, tc_node, (u8)i); /* scan all the siblings */ while (node) { - if (node->num_children < max_child) + if (node->num_children < hw->max_children[i]) break; node = node->sibling; } @@ -1451,10 +1391,6 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, if (!pi) return ICE_ERR_PARAM; - status = ice_sched_validate_for_max_nodes(pi, num_nodes); - if (status) - return status; - vsil = ice_sched_get_vsi_layer(pi->hw); for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, @@ -1479,6 +1415,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, if (i == vsil) parent->vsi_id = vsi_id; } + return 0; } @@ -1633,9 +1570,11 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc); if (status) return status; + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); if (!vsi_node) return ICE_ERR_CFG; + vsi->vsi_node[tc] = vsi_node; vsi_node->in_use = true; } diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h index 9a95c4ffd7d7..d2dae913d81e 100644 --- a/drivers/net/ethernet/intel/ice/ice_status.h +++ b/drivers/net/ethernet/intel/ice/ice_status.h @@ -20,6 +20,7 @@ enum ice_status { ICE_ERR_ALREADY_EXISTS = -14, ICE_ERR_DOES_NOT_EXIST = -15, ICE_ERR_MAX_LIMIT = -17, + ICE_ERR_RESET_ONGOING = -18, ICE_ERR_BUF_TOO_SHORT = -52, ICE_ERR_NVM_BLANK_MODE = -53, ICE_ERR_AQ_ERROR = -100, diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 6b7ec2ae5ad6..65b4e1cca6be 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -86,6 +86,35 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, } /** + * ice_init_def_sw_recp - initialize the recipe book keeping tables + * @hw: pointer to the hw struct + * + * Allocate memory for the entire recipe table and initialize the structures/ + * entries corresponding to basic recipes. + */ +enum ice_status +ice_init_def_sw_recp(struct ice_hw *hw) +{ + struct ice_sw_recipe *recps; + u8 i; + + recps = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_NUM_RECIPES, + sizeof(struct ice_sw_recipe), GFP_KERNEL); + if (!recps) + return ICE_ERR_NO_MEMORY; + + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + recps[i].root_rid = i; + INIT_LIST_HEAD(&recps[i].filt_rules); + mutex_init(&recps[i].filt_rule_lock); + } + + hw->switch_info->recp_list = recps; + + return 0; +} + +/** * ice_aq_get_sw_cfg - get switch configuration * @hw: pointer to the hardware structure * @buf: pointer to the result buffer @@ -140,17 +169,17 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf, * * Add a VSI context to the hardware (0x0210) */ -enum ice_status +static enum ice_status ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd) { struct ice_aqc_add_update_free_vsi_resp *res; struct ice_aqc_add_get_update_free_vsi *cmd; - enum ice_status status; struct ice_aq_desc desc; + enum ice_status status; cmd = &desc.params.vsi_cmd; - res = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; + res = &desc.params.add_update_free_vsi_res; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi); @@ -175,6 +204,42 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, } /** + * ice_aq_free_vsi + * @hw: pointer to the hw struct + * @vsi_ctx: pointer to a VSI context struct + * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources + * @cd: pointer to command details structure or NULL + * + * Free VSI context info from hardware (0x0213) + */ +static enum ice_status +ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd) +{ + struct ice_aqc_add_update_free_vsi_resp *resp; + struct ice_aqc_add_get_update_free_vsi *cmd; + struct ice_aq_desc desc; + enum ice_status status; + + cmd = &desc.params.vsi_cmd; + resp = &desc.params.add_update_free_vsi_res; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); + + cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); + if (keep_vsi_alloc) + cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); + + status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); + if (!status) { + vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); + vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); + } + + return status; +} + +/** * ice_aq_update_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a VSI context struct @@ -192,7 +257,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, enum ice_status status; cmd = &desc.params.vsi_cmd; - resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; + resp = &desc.params.add_update_free_vsi_res; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi); @@ -212,38 +277,202 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, } /** - * ice_aq_free_vsi + * ice_update_fltr_vsi_map - update given filter VSI map + * @list_head: list for which filters needs to be updated + * @list_lock: filter lock which needs to be updated + * @old_vsi_num: old VSI HW id + * @new_vsi_num: new VSI HW id + * + * update the VSI map for a given filter list + */ +static void +ice_update_fltr_vsi_map(struct list_head *list_head, + struct mutex *list_lock, u16 old_vsi_num, + u16 new_vsi_num) +{ + struct ice_fltr_mgmt_list_entry *itr; + + mutex_lock(list_lock); + if (list_empty(list_head)) + goto exit_update_map; + + list_for_each_entry(itr, list_head, list_entry) { + if (itr->vsi_list_info && + test_bit(old_vsi_num, itr->vsi_list_info->vsi_map)) { + clear_bit(old_vsi_num, itr->vsi_list_info->vsi_map); + set_bit(new_vsi_num, itr->vsi_list_info->vsi_map); + } else if (itr->fltr_info.fltr_act == ICE_FWD_TO_VSI && + itr->fltr_info.fwd_id.vsi_id == old_vsi_num) { + itr->fltr_info.fwd_id.vsi_id = new_vsi_num; + itr->fltr_info.src = new_vsi_num; + } + } +exit_update_map: + mutex_unlock(list_lock); +} + +/** + * ice_update_all_fltr_vsi_map - update all filters VSI map + * @hw: pointer to the hardware structure + * @old_vsi_num: old VSI HW id + * @new_vsi_num: new VSI HW id + * + * update all filters VSI map + */ +static void +ice_update_all_fltr_vsi_map(struct ice_hw *hw, u16 old_vsi_num, u16 new_vsi_num) +{ + struct ice_switch_info *sw = hw->switch_info; + u8 i; + + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + struct list_head *head = &sw->recp_list[i].filt_rules; + struct mutex *lock; /* Lock to protect filter rule list */ + + lock = &sw->recp_list[i].filt_rule_lock; + ice_update_fltr_vsi_map(head, lock, old_vsi_num, + new_vsi_num); + } +} + +/** + * ice_is_vsi_valid - check whether the VSI is valid or not * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * check whether the VSI is valid or not + */ +static bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle) +{ + return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle]; +} + +/** + * ice_get_hw_vsi_num - return the hw VSI number + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * return the hw VSI number + * Caution: call this function only if VSI is valid (ice_is_vsi_valid) + */ +static u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle) +{ + return hw->vsi_ctx[vsi_handle]->vsi_num; +} + +/** + * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * return the VSI context entry for a given VSI handle + */ +static struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) +{ + return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle]; +} + +/** + * ice_save_vsi_ctx - save the VSI context for a given VSI handle + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * @vsi: VSI context pointer + * + * save the VSI context entry for a given VSI handle + */ +static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, + struct ice_vsi_ctx *vsi) +{ + hw->vsi_ctx[vsi_handle] = vsi; +} + +/** + * ice_clear_vsi_ctx - clear the VSI context entry + * @hw: pointer to the hw struct + * @vsi_handle: VSI handle + * + * clear the VSI context entry + */ +static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle) +{ + struct ice_vsi_ctx *vsi; + + vsi = ice_get_vsi_ctx(hw, vsi_handle); + if (vsi) { + devm_kfree(ice_hw_to_dev(hw), vsi); + hw->vsi_ctx[vsi_handle] = NULL; + } +} + +/** + * ice_add_vsi - add VSI context to the hardware and VSI handle list + * @hw: pointer to the hw struct + * @vsi_handle: unique VSI handle provided by drivers * @vsi_ctx: pointer to a VSI context struct - * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources * @cd: pointer to command details structure or NULL * - * Get VSI context info from hardware (0x0213) + * Add a VSI context to the hardware also add it into the VSI handle list. + * If this function gets called after reset for existing VSIs then update + * with the new HW VSI number in the corresponding VSI handle list entry. */ enum ice_status -ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - bool keep_vsi_alloc, struct ice_sq_cd *cd) +ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd) { - struct ice_aqc_add_update_free_vsi_resp *resp; - struct ice_aqc_add_get_update_free_vsi *cmd; - struct ice_aq_desc desc; + struct ice_vsi_ctx *tmp_vsi_ctx; enum ice_status status; - cmd = &desc.params.vsi_cmd; - resp = (struct ice_aqc_add_update_free_vsi_resp *)&desc.params.raw; - - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi); + if (vsi_handle >= ICE_MAX_VSI) + return ICE_ERR_PARAM; + status = ice_aq_add_vsi(hw, vsi_ctx, cd); + if (status) + return status; + tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); + if (!tmp_vsi_ctx) { + /* Create a new vsi context */ + tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw), + sizeof(*tmp_vsi_ctx), GFP_KERNEL); + if (!tmp_vsi_ctx) { + ice_aq_free_vsi(hw, vsi_ctx, false, cd); + return ICE_ERR_NO_MEMORY; + } + *tmp_vsi_ctx = *vsi_ctx; + ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx); + } else { + /* update with new HW VSI num */ + if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num) { + /* update all filter lists with new HW VSI num */ + ice_update_all_fltr_vsi_map(hw, tmp_vsi_ctx->vsi_num, + vsi_ctx->vsi_num); + tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num; + } + } - cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID); - if (keep_vsi_alloc) - cmd->cmd_flags = cpu_to_le16(ICE_AQ_VSI_KEEP_ALLOC); + return status; +} - status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd); - if (!status) { - vsi_ctx->vsis_allocd = le16_to_cpu(resp->vsi_used); - vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free); - } +/** + * ice_free_vsi- free VSI context from hardware and VSI handle list + * @hw: pointer to the hw struct + * @vsi_handle: unique VSI handle + * @vsi_ctx: pointer to a VSI context struct + * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources + * @cd: pointer to command details structure or NULL + * + * Free VSI context info from hardware as well as from VSI handle list + */ +enum ice_status +ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd) +{ + enum ice_status status; + if (!ice_is_vsi_valid(hw, vsi_handle)) + return ICE_ERR_PARAM; + vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle); + status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd); + if (!status) + ice_clear_vsi_ctx(hw, vsi_handle); return status; } @@ -464,8 +693,9 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc) { u16 vlan_id = ICE_MAX_VLAN_ID + 1; - u8 eth_hdr[DUMMY_ETH_HDR_LEN]; void *daddr = NULL; + u16 eth_hdr_sz; + u8 *eth_hdr; u32 act = 0; __be16 *off; @@ -477,8 +707,11 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, return; } + eth_hdr_sz = sizeof(dummy_eth_header); + eth_hdr = s_rule->pdata.lkup_tx_rx.hdr; + /* initialize the ether header with a dummy header */ - memcpy(eth_hdr, dummy_eth_header, sizeof(dummy_eth_header)); + memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz); ice_fill_sw_info(hw, f_info); switch (f_info->fltr_act) { @@ -536,7 +769,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, daddr = f_info->l_data.ethertype_mac.mac_addr; /* fall-through */ case ICE_SW_LKUP_ETHERTYPE: - off = (__be16 *)ð_hdr[ICE_ETH_ETHTYPE_OFFSET]; + off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET); *off = cpu_to_be16(f_info->l_data.ethertype_mac.ethertype); break; case ICE_SW_LKUP_MAC_VLAN: @@ -563,18 +796,16 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info, s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act); if (daddr) - ether_addr_copy(ð_hdr[ICE_ETH_DA_OFFSET], daddr); + ether_addr_copy(eth_hdr + ICE_ETH_DA_OFFSET, daddr); if (!(vlan_id > ICE_MAX_VLAN_ID)) { - off = (__be16 *)ð_hdr[ICE_ETH_VLAN_TCI_OFFSET]; + off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET); *off = cpu_to_be16(vlan_id); } /* Create the switch rule with the final dummy Ethernet header */ if (opc != ice_aqc_opc_update_sw_rules) - s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(sizeof(eth_hdr)); - - memcpy(s_rule->pdata.lkup_tx_rx.hdr, eth_hdr, sizeof(eth_hdr)); + s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(eth_hdr_sz); } /** @@ -816,10 +1047,10 @@ static enum ice_status ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { - struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_mgmt_list_entry *fm_entry; struct ice_aqc_sw_rules_elem *s_rule; enum ice_sw_lkup_type l_type; + struct ice_sw_recipe *recp; enum ice_status status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), @@ -860,31 +1091,9 @@ ice_create_pkt_fwd_rule(struct ice_hw *hw, * calls remove filter AQ command */ l_type = fm_entry->fltr_info.lkup_type; - if (l_type == ICE_SW_LKUP_MAC) { - mutex_lock(&sw->mac_list_lock); - list_add(&fm_entry->list_entry, &sw->mac_list_head); - mutex_unlock(&sw->mac_list_lock); - } else if (l_type == ICE_SW_LKUP_VLAN) { - mutex_lock(&sw->vlan_list_lock); - list_add(&fm_entry->list_entry, &sw->vlan_list_head); - mutex_unlock(&sw->vlan_list_lock); - } else if (l_type == ICE_SW_LKUP_ETHERTYPE || - l_type == ICE_SW_LKUP_ETHERTYPE_MAC) { - mutex_lock(&sw->eth_m_list_lock); - list_add(&fm_entry->list_entry, &sw->eth_m_list_head); - mutex_unlock(&sw->eth_m_list_lock); - } else if (l_type == ICE_SW_LKUP_PROMISC || - l_type == ICE_SW_LKUP_PROMISC_VLAN) { - mutex_lock(&sw->promisc_list_lock); - list_add(&fm_entry->list_entry, &sw->promisc_list_head); - mutex_unlock(&sw->promisc_list_lock); - } else if (fm_entry->fltr_info.lkup_type == ICE_SW_LKUP_MAC_VLAN) { - mutex_lock(&sw->mac_vlan_list_lock); - list_add(&fm_entry->list_entry, &sw->mac_vlan_list_head); - mutex_unlock(&sw->mac_vlan_list_lock); - } else { - status = ICE_ERR_NOT_IMPL; - } + recp = &hw->switch_info->recp_list[l_type]; + list_add(&fm_entry->list_entry, &recp->filt_rules); + ice_create_pkt_fwd_rule_exit: devm_kfree(ice_hw_to_dev(hw), s_rule); return status; @@ -893,19 +1102,15 @@ ice_create_pkt_fwd_rule_exit: /** * ice_update_pkt_fwd_rule * @hw: pointer to the hardware structure - * @rule_id: rule of previously created switch rule to update - * @vsi_list_id: VSI list id to be updated with - * @f_info: ice_fltr_info to pull other information for switch rule + * @f_info: filter information for switch rule * * Call AQ command to update a previously created switch rule with a * VSI list id */ static enum ice_status -ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, - struct ice_fltr_info f_info) +ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info) { struct ice_aqc_sw_rules_elem *s_rule; - struct ice_fltr_info tmp_fltr; enum ice_status status; s_rule = devm_kzalloc(ice_hw_to_dev(hw), @@ -913,14 +1118,9 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, if (!s_rule) return ICE_ERR_NO_MEMORY; - tmp_fltr = f_info; - tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; - tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; + ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules); - ice_fill_sw_rule(hw, &tmp_fltr, s_rule, - ice_aqc_opc_update_sw_rules); - - s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id); + s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(f_info->fltr_rule_id); /* Update switch rule with new rule set to forward VSI list */ status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, @@ -931,7 +1131,48 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, } /** - * ice_handle_vsi_list_mgmt + * ice_update_sw_rule_bridge_mode + * @hw: pointer to the hw struct + * + * Updates unicast switch filter rules based on VEB/VEPA mode + */ +enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *fm_entry; + enum ice_status status = 0; + struct list_head *rule_head; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + + rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; + rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; + + mutex_lock(rule_lock); + list_for_each_entry(fm_entry, rule_head, list_entry) { + struct ice_fltr_info *fi = &fm_entry->fltr_info; + u8 *addr = fi->l_data.mac.mac_addr; + + /* Update unicast Tx rules to reflect the selected + * VEB/VEPA mode + */ + if ((fi->flag & ICE_FLTR_TX) && is_unicast_ether_addr(addr) && + (fi->fltr_act == ICE_FWD_TO_VSI || + fi->fltr_act == ICE_FWD_TO_VSI_LIST || + fi->fltr_act == ICE_FWD_TO_Q || + fi->fltr_act == ICE_FWD_TO_QGRP)) { + status = ice_update_pkt_fwd_rule(hw, fi); + if (status) + break; + } + } + + mutex_unlock(rule_lock); + + return status; +} + +/** + * ice_add_update_vsi_list * @hw: pointer to the hardware structure * @m_entry: pointer to current filter management list entry * @cur_fltr: filter information from the book keeping entry @@ -952,10 +1193,10 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, u16 rule_id, u16 vsi_list_id, * using the update switch rule command */ static enum ice_status -ice_handle_vsi_list_mgmt(struct ice_hw *hw, - struct ice_fltr_mgmt_list_entry *m_entry, - struct ice_fltr_info *cur_fltr, - struct ice_fltr_info *new_fltr) +ice_add_update_vsi_list(struct ice_hw *hw, + struct ice_fltr_mgmt_list_entry *m_entry, + struct ice_fltr_info *cur_fltr, + struct ice_fltr_info *new_fltr) { enum ice_status status = 0; u16 vsi_list_id = 0; @@ -975,8 +1216,8 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, * a part of a VSI list. So, create a VSI list with the old and * new VSIs. */ + struct ice_fltr_info tmp_fltr; u16 vsi_id_arr[2]; - u16 fltr_rule; /* A rule already exists with the new VSI being added */ if (cur_fltr->fwd_id.vsi_id == new_fltr->fwd_id.vsi_id) @@ -990,12 +1231,14 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, if (status) return status; - fltr_rule = cur_fltr->fltr_rule_id; + tmp_fltr = *new_fltr; + tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id; + tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; + tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; /* Update the previous switch rule of "MAC forward to VSI" to * "MAC fwd to VSI list" */ - status = ice_update_pkt_fwd_rule(hw, fltr_rule, vsi_list_id, - *new_fltr); + status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); if (status) return status; @@ -1040,54 +1283,245 @@ ice_handle_vsi_list_mgmt(struct ice_hw *hw, } /** - * ice_find_mac_entry + * ice_find_rule_entry - Search a rule entry * @hw: pointer to the hardware structure - * @mac_addr: MAC address to search for + * @recp_id: lookup type for which the specified rule needs to be searched + * @f_info: rule information * - * Helper function to search for a MAC entry using a given MAC address - * Returns pointer to the entry if found. + * Helper function to search for a given rule entry + * Returns pointer to entry storing the rule if found */ static struct ice_fltr_mgmt_list_entry * -ice_find_mac_entry(struct ice_hw *hw, u8 *mac_addr) +ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info) { - struct ice_fltr_mgmt_list_entry *m_list_itr, *mac_ret = NULL; + struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL; struct ice_switch_info *sw = hw->switch_info; - - mutex_lock(&sw->mac_list_lock); - list_for_each_entry(m_list_itr, &sw->mac_list_head, list_entry) { - u8 *buf = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; - - if (ether_addr_equal(buf, mac_addr)) { - mac_ret = m_list_itr; + struct list_head *list_head; + + list_head = &sw->recp_list[recp_id].filt_rules; + list_for_each_entry(list_itr, list_head, list_entry) { + if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data, + sizeof(f_info->l_data)) && + f_info->flag == list_itr->fltr_info.flag) { + ret = list_itr; break; } } - mutex_unlock(&sw->mac_list_lock); - return mac_ret; + return ret; } /** - * ice_add_shared_mac - Add one MAC shared filter rule + * ice_add_rule_internal - add rule for a given lookup type * @hw: pointer to the hardware structure + * @recp_id: lookup type (recipe id) for which rule has to be added * @f_entry: structure containing MAC forwarding information * - * Adds or updates the book keeping list for the MAC addresses + * Adds or updates the rule lists for a given recipe */ static enum ice_status -ice_add_shared_mac(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) +ice_add_rule_internal(struct ice_hw *hw, u8 recp_id, + struct ice_fltr_list_entry *f_entry) { + struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_info *new_fltr, *cur_fltr; struct ice_fltr_mgmt_list_entry *m_entry; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; - new_fltr = &f_entry->fltr_info; + rule_lock = &sw->recp_list[recp_id].filt_rule_lock; - m_entry = ice_find_mac_entry(hw, &new_fltr->l_data.mac.mac_addr[0]); - if (!m_entry) + mutex_lock(rule_lock); + new_fltr = &f_entry->fltr_info; + if (new_fltr->flag & ICE_FLTR_RX) + new_fltr->src = hw->port_info->lport; + else if (new_fltr->flag & ICE_FLTR_TX) + new_fltr->src = f_entry->fltr_info.fwd_id.vsi_id; + + m_entry = ice_find_rule_entry(hw, recp_id, new_fltr); + if (!m_entry) { + mutex_unlock(rule_lock); return ice_create_pkt_fwd_rule(hw, f_entry); + } cur_fltr = &m_entry->fltr_info; + status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr); + mutex_unlock(rule_lock); - return ice_handle_vsi_list_mgmt(hw, m_entry, cur_fltr, new_fltr); + return status; +} + +/** + * ice_remove_vsi_list_rule + * @hw: pointer to the hardware structure + * @vsi_list_id: VSI list id generated as part of allocate resource + * @lkup_type: switch rule filter lookup type + * + * The VSI list should be emptied before this function is called to remove the + * VSI list. + */ +static enum ice_status +ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, + enum ice_sw_lkup_type lkup_type) +{ + struct ice_aqc_sw_rules_elem *s_rule; + enum ice_status status; + u16 s_rule_size; + + s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); + s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); + if (!s_rule) + return ICE_ERR_NO_MEMORY; + + s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); + s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); + + /* Free the vsi_list resource that we allocated. It is assumed that the + * list is empty at this point. + */ + status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, + ice_aqc_opc_free_res); + + devm_kfree(ice_hw_to_dev(hw), s_rule); + return status; +} + +/** + * ice_rem_update_vsi_list + * @hw: pointer to the hardware structure + * @vsi_id: ID of the VSI to remove + * @fm_list: filter management entry for which the VSI list management needs to + * be done + */ +static enum ice_status +ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_id, + struct ice_fltr_mgmt_list_entry *fm_list) +{ + enum ice_sw_lkup_type lkup_type; + enum ice_status status = 0; + u16 vsi_list_id; + + if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST || + fm_list->vsi_count == 0) + return ICE_ERR_PARAM; + + /* A rule with the VSI being removed does not exist */ + if (!test_bit(vsi_id, fm_list->vsi_list_info->vsi_map)) + return ICE_ERR_DOES_NOT_EXIST; + + lkup_type = fm_list->fltr_info.lkup_type; + vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id; + + status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + fm_list->vsi_count--; + clear_bit(vsi_id, fm_list->vsi_list_info->vsi_map); + + if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) || + (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) { + struct ice_vsi_list_map_info *vsi_list_info = + fm_list->vsi_list_info; + u16 rem_vsi_id; + + rem_vsi_id = find_first_bit(vsi_list_info->vsi_map, + ICE_MAX_VSI); + if (rem_vsi_id == ICE_MAX_VSI) + return ICE_ERR_OUT_OF_RANGE; + + status = ice_update_vsi_list_rule(hw, &rem_vsi_id, 1, + vsi_list_id, true, + ice_aqc_opc_update_sw_rules, + lkup_type); + if (status) + return status; + + /* Remove the VSI list since it is no longer used */ + status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); + if (status) + return status; + + /* Change the list entry action from VSI_LIST to VSI */ + fm_list->fltr_info.fltr_act = ICE_FWD_TO_VSI; + fm_list->fltr_info.fwd_id.vsi_id = rem_vsi_id; + + list_del(&vsi_list_info->list_entry); + devm_kfree(ice_hw_to_dev(hw), vsi_list_info); + fm_list->vsi_list_info = NULL; + } + + return status; +} + +/** + * ice_remove_rule_internal - Remove a filter rule of a given type + * @hw: pointer to the hardware structure + * @recp_id: recipe id for which the rule needs to removed + * @f_entry: rule entry containing filter information + */ +static enum ice_status +ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id, + struct ice_fltr_list_entry *f_entry) +{ + struct ice_switch_info *sw = hw->switch_info; + struct ice_fltr_mgmt_list_entry *list_elem; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; + bool remove_rule = false; + u16 vsi_id; + + rule_lock = &sw->recp_list[recp_id].filt_rule_lock; + mutex_lock(rule_lock); + list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info); + if (!list_elem) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } + + if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) { + remove_rule = true; + } else { + vsi_id = f_entry->fltr_info.fwd_id.vsi_id; + status = ice_rem_update_vsi_list(hw, vsi_id, list_elem); + if (status) + goto exit; + /* if vsi count goes to zero after updating the vsi list */ + if (list_elem->vsi_count == 0) + remove_rule = true; + } + + if (remove_rule) { + /* Remove the lookup rule */ + struct ice_aqc_sw_rules_elem *s_rule; + + s_rule = devm_kzalloc(ice_hw_to_dev(hw), + ICE_SW_RULE_RX_TX_NO_HDR_SIZE, + GFP_KERNEL); + if (!s_rule) { + status = ICE_ERR_NO_MEMORY; + goto exit; + } + + ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule, + ice_aqc_opc_remove_sw_rules); + + status = ice_aq_sw_rules(hw, s_rule, + ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, + ice_aqc_opc_remove_sw_rules, NULL); + if (status) + goto exit; + + /* Remove a book keeping from the list */ + devm_kfree(ice_hw_to_dev(hw), s_rule); + + list_del(&list_elem->list_entry); + devm_kfree(ice_hw_to_dev(hw), list_elem); + } +exit: + mutex_unlock(rule_lock); + return status; } /** @@ -1106,7 +1540,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) { struct ice_aqc_sw_rules_elem *s_rule, *r_iter; struct ice_fltr_list_entry *m_list_itr; + struct list_head *rule_head; u16 elem_sent, total_elem_left; + struct ice_switch_info *sw; + struct mutex *rule_lock; /* Lock to protect filter rule list */ enum ice_status status = 0; u16 num_unicast = 0; u16 s_rule_size; @@ -1114,48 +1551,62 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) if (!m_list || !hw) return ICE_ERR_PARAM; + s_rule = NULL; + sw = hw->switch_info; + rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; list_for_each_entry(m_list_itr, m_list, list_entry) { u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0]; - if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC) - return ICE_ERR_PARAM; - if (is_zero_ether_addr(add)) + m_list_itr->fltr_info.flag = ICE_FLTR_TX; + if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC || + is_zero_ether_addr(add)) return ICE_ERR_PARAM; if (is_unicast_ether_addr(add) && !hw->ucast_shared) { /* Don't overwrite the unicast address */ - if (ice_find_mac_entry(hw, add)) + mutex_lock(rule_lock); + if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, + &m_list_itr->fltr_info)) { + mutex_unlock(rule_lock); return ICE_ERR_ALREADY_EXISTS; + } + mutex_unlock(rule_lock); num_unicast++; } else if (is_multicast_ether_addr(add) || (is_unicast_ether_addr(add) && hw->ucast_shared)) { - status = ice_add_shared_mac(hw, m_list_itr); - if (status) { - m_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; - return status; - } - m_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + m_list_itr->status = + ice_add_rule_internal(hw, ICE_SW_LKUP_MAC, + m_list_itr); + if (m_list_itr->status) + return m_list_itr->status; } } + mutex_lock(rule_lock); /* Exit if no suitable entries were found for adding bulk switch rule */ - if (!num_unicast) - return 0; + if (!num_unicast) { + status = 0; + goto ice_add_mac_exit; + } + + rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; /* Allocate switch rule buffer for the bulk update for unicast */ s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; + if (!s_rule) { + status = ICE_ERR_NO_MEMORY; + goto ice_add_mac_exit; + } r_iter = s_rule; list_for_each_entry(m_list_itr, m_list, list_entry) { struct ice_fltr_info *f_info = &m_list_itr->fltr_info; - u8 *addr = &f_info->l_data.mac.mac_addr[0]; + u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; - if (is_unicast_ether_addr(addr)) { - ice_fill_sw_rule(hw, &m_list_itr->fltr_info, - r_iter, ice_aqc_opc_add_sw_rules); + if (is_unicast_ether_addr(mac_addr)) { + ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter, + ice_aqc_opc_add_sw_rules); r_iter = (struct ice_aqc_sw_rules_elem *) ((u8 *)r_iter + s_rule_size); } @@ -1183,11 +1634,10 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) r_iter = s_rule; list_for_each_entry(m_list_itr, m_list, list_entry) { struct ice_fltr_info *f_info = &m_list_itr->fltr_info; - u8 *addr = &f_info->l_data.mac.mac_addr[0]; - struct ice_switch_info *sw = hw->switch_info; + u8 *mac_addr = &f_info->l_data.mac.mac_addr[0]; struct ice_fltr_mgmt_list_entry *fm_entry; - if (is_unicast_ether_addr(addr)) { + if (is_unicast_ether_addr(mac_addr)) { f_info->fltr_rule_id = le16_to_cpu(r_iter->pdata.lkup_tx_rx.index); f_info->fltr_act = ICE_FWD_TO_VSI; @@ -1203,46 +1653,21 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list) /* The book keeping entries will get removed when * base driver calls remove filter AQ command */ - mutex_lock(&sw->mac_list_lock); - list_add(&fm_entry->list_entry, &sw->mac_list_head); - mutex_unlock(&sw->mac_list_lock); + list_add(&fm_entry->list_entry, rule_head); r_iter = (struct ice_aqc_sw_rules_elem *) ((u8 *)r_iter + s_rule_size); } } ice_add_mac_exit: - devm_kfree(ice_hw_to_dev(hw), s_rule); + mutex_unlock(rule_lock); + if (s_rule) + devm_kfree(ice_hw_to_dev(hw), s_rule); return status; } /** - * ice_find_vlan_entry - * @hw: pointer to the hardware structure - * @vlan_id: VLAN id to search for - * - * Helper function to search for a VLAN entry using a given VLAN id - * Returns pointer to the entry if found. - */ -static struct ice_fltr_mgmt_list_entry * -ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id) -{ - struct ice_fltr_mgmt_list_entry *vlan_list_itr, *vlan_ret = NULL; - struct ice_switch_info *sw = hw->switch_info; - - mutex_lock(&sw->vlan_list_lock); - list_for_each_entry(vlan_list_itr, &sw->vlan_list_head, list_entry) - if (vlan_list_itr->fltr_info.l_data.vlan.vlan_id == vlan_id) { - vlan_ret = vlan_list_itr; - break; - } - - mutex_unlock(&sw->vlan_list_lock); - return vlan_ret; -} - -/** * ice_add_vlan_internal - Add one VLAN based filter rule * @hw: pointer to the hardware structure * @f_entry: filter entry containing one VLAN information @@ -1250,20 +1675,22 @@ ice_find_vlan_entry(struct ice_hw *hw, u16 vlan_id) static enum ice_status ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) { + struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_info *new_fltr, *cur_fltr; struct ice_fltr_mgmt_list_entry *v_list_itr; - u16 vlan_id; + struct mutex *rule_lock; /* Lock to protect filter rule list */ + enum ice_status status = 0; new_fltr = &f_entry->fltr_info; /* VLAN id should only be 12 bits */ if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID) return ICE_ERR_PARAM; - vlan_id = new_fltr->l_data.vlan.vlan_id; - v_list_itr = ice_find_vlan_entry(hw, vlan_id); + rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; + mutex_lock(rule_lock); + v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr); if (!v_list_itr) { u16 vsi_id = ICE_VSI_INVAL_ID; - enum ice_status status; u16 vsi_list_id = 0; if (new_fltr->fltr_act == ICE_FWD_TO_VSI) { @@ -1277,26 +1704,33 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) &vsi_list_id, lkup_type); if (status) - return status; + goto exit; new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST; new_fltr->fwd_id.vsi_list_id = vsi_list_id; } status = ice_create_pkt_fwd_rule(hw, f_entry); if (!status && vsi_id != ICE_VSI_INVAL_ID) { - v_list_itr = ice_find_vlan_entry(hw, vlan_id); - if (!v_list_itr) - return ICE_ERR_DOES_NOT_EXIST; + v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, + new_fltr); + if (!v_list_itr) { + status = ICE_ERR_DOES_NOT_EXIST; + goto exit; + } v_list_itr->vsi_list_info = ice_create_vsi_list_map(hw, &vsi_id, 1, vsi_list_id); } - return status; + goto exit; } cur_fltr = &v_list_itr->fltr_info; - return ice_handle_vsi_list_mgmt(hw, v_list_itr, cur_fltr, new_fltr); + status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr, new_fltr); + +exit: + mutex_unlock(rule_lock); + return status; } /** @@ -1313,326 +1747,44 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) return ICE_ERR_PARAM; list_for_each_entry(v_list_itr, v_list, list_entry) { - enum ice_status status; - if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN) return ICE_ERR_PARAM; - - status = ice_add_vlan_internal(hw, v_list_itr); - if (status) { - v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; - return status; - } - v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + v_list_itr->fltr_info.flag = ICE_FLTR_TX; + v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; } return 0; } /** - * ice_remove_vsi_list_rule + * ice_rem_sw_rule_info * @hw: pointer to the hardware structure - * @vsi_list_id: VSI list id generated as part of allocate resource - * @lkup_type: switch rule filter lookup type + * @rule_head: pointer to the switch list structure that we want to delete */ -static enum ice_status -ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id, - enum ice_sw_lkup_type lkup_type) -{ - struct ice_aqc_sw_rules_elem *s_rule; - enum ice_status status; - u16 s_rule_size; - - s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0); - s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR); - s_rule->pdata.vsi_list.index = cpu_to_le16(vsi_list_id); - /* FW expects number of VSIs in vsi_list resource to be 0 for clear - * command. Since memory is zero'ed out during initialization, it's not - * necessary to explicitly initialize the variable to 0. - */ - - status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, - ice_aqc_opc_remove_sw_rules, NULL); - if (!status) - /* Free the vsi_list resource that we allocated */ - status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type, - ice_aqc_opc_free_res); - - devm_kfree(ice_hw_to_dev(hw), s_rule); - return status; -} - -/** - * ice_handle_rem_vsi_list_mgmt - * @hw: pointer to the hardware structure - * @vsi_id: ID of the VSI to remove - * @fm_list_itr: filter management entry for which the VSI list management - * needs to be done - */ -static enum ice_status -ice_handle_rem_vsi_list_mgmt(struct ice_hw *hw, u16 vsi_id, - struct ice_fltr_mgmt_list_entry *fm_list_itr) +static void +ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head) { - struct ice_switch_info *sw = hw->switch_info; - enum ice_status status = 0; - enum ice_sw_lkup_type lkup_type; - bool is_last_elem = true; - bool conv_list = false; - bool del_list = false; - u16 vsi_list_id; + if (!list_empty(rule_head)) { + struct ice_fltr_mgmt_list_entry *entry; + struct ice_fltr_mgmt_list_entry *tmp; - lkup_type = fm_list_itr->fltr_info.lkup_type; - vsi_list_id = fm_list_itr->fltr_info.fwd_id.vsi_list_id; - - if (fm_list_itr->vsi_count > 1) { - status = ice_update_vsi_list_rule(hw, &vsi_id, 1, vsi_list_id, - true, - ice_aqc_opc_update_sw_rules, - lkup_type); - if (status) - return status; - fm_list_itr->vsi_count--; - is_last_elem = false; - clear_bit(vsi_id, fm_list_itr->vsi_list_info->vsi_map); - } - - /* For non-VLAN rules that forward packets to a VSI list, convert them - * to forwarding packets to a VSI if there is only one VSI left in the - * list. Unused lists are then removed. - * VLAN rules need to use VSI lists even with only one VSI. - */ - if (fm_list_itr->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST) { - if (lkup_type == ICE_SW_LKUP_VLAN) { - del_list = is_last_elem; - } else if (fm_list_itr->vsi_count == 1) { - conv_list = true; - del_list = true; + list_for_each_entry_safe(entry, tmp, rule_head, list_entry) { + list_del(&entry->list_entry); + devm_kfree(ice_hw_to_dev(hw), entry); } } - - if (del_list) { - /* Remove the VSI list since it is no longer used */ - struct ice_vsi_list_map_info *vsi_list_info = - fm_list_itr->vsi_list_info; - - status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type); - if (status) - return status; - - if (conv_list) { - u16 rem_vsi_id; - - rem_vsi_id = find_first_bit(vsi_list_info->vsi_map, - ICE_MAX_VSI); - - /* Error out when the expected last element is not in - * the VSI list map - */ - if (rem_vsi_id == ICE_MAX_VSI) - return ICE_ERR_OUT_OF_RANGE; - - /* Change the list entry action from VSI_LIST to VSI */ - fm_list_itr->fltr_info.fltr_act = ICE_FWD_TO_VSI; - fm_list_itr->fltr_info.fwd_id.vsi_id = rem_vsi_id; - } - - list_del(&vsi_list_info->list_entry); - devm_kfree(ice_hw_to_dev(hw), vsi_list_info); - fm_list_itr->vsi_list_info = NULL; - } - - if (conv_list) { - /* Convert the rule's forward action to forwarding packets to - * a VSI - */ - struct ice_aqc_sw_rules_elem *s_rule; - - s_rule = devm_kzalloc(ice_hw_to_dev(hw), - ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, - GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule, - ice_aqc_opc_update_sw_rules); - - s_rule->pdata.lkup_tx_rx.index = - cpu_to_le16(fm_list_itr->fltr_info.fltr_rule_id); - - status = ice_aq_sw_rules(hw, s_rule, - ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1, - ice_aqc_opc_update_sw_rules, NULL); - devm_kfree(ice_hw_to_dev(hw), s_rule); - if (status) - return status; - } - - if (is_last_elem) { - /* Remove the lookup rule */ - struct ice_aqc_sw_rules_elem *s_rule; - - s_rule = devm_kzalloc(ice_hw_to_dev(hw), - ICE_SW_RULE_RX_TX_NO_HDR_SIZE, - GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - ice_fill_sw_rule(hw, &fm_list_itr->fltr_info, s_rule, - ice_aqc_opc_remove_sw_rules); - - status = ice_aq_sw_rules(hw, s_rule, - ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1, - ice_aqc_opc_remove_sw_rules, NULL); - if (status) - return status; - - /* Remove a book keeping entry from the MAC address list */ - mutex_lock(&sw->mac_list_lock); - list_del(&fm_list_itr->list_entry); - mutex_unlock(&sw->mac_list_lock); - devm_kfree(ice_hw_to_dev(hw), fm_list_itr); - devm_kfree(ice_hw_to_dev(hw), s_rule); - } - return status; -} - -/** - * ice_remove_mac_entry - * @hw: pointer to the hardware structure - * @f_entry: structure containing MAC forwarding information - */ -static enum ice_status -ice_remove_mac_entry(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) -{ - struct ice_fltr_mgmt_list_entry *m_entry; - u16 vsi_id; - u8 *add; - - add = &f_entry->fltr_info.l_data.mac.mac_addr[0]; - - m_entry = ice_find_mac_entry(hw, add); - if (!m_entry) - return ICE_ERR_PARAM; - - vsi_id = f_entry->fltr_info.fwd_id.vsi_id; - return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, m_entry); } /** - * ice_remove_mac - remove a MAC address based filter rule - * @hw: pointer to the hardware structure - * @m_list: list of MAC addresses and forwarding information - * - * This function removes either a MAC filter rule or a specific VSI from a - * VSI list for a multicast MAC address. - * - * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by - * ice_add_mac. Caller should be aware that this call will only work if all - * the entries passed into m_list were added previously. It will not attempt to - * do a partial remove of entries that were found. - */ -enum ice_status -ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) -{ - struct ice_aqc_sw_rules_elem *s_rule, *r_iter; - u8 s_rule_size = ICE_SW_RULE_RX_TX_NO_HDR_SIZE; - struct ice_switch_info *sw = hw->switch_info; - struct ice_fltr_mgmt_list_entry *m_entry; - struct ice_fltr_list_entry *m_list_itr; - u16 elem_sent, total_elem_left; - enum ice_status status = 0; - u16 num_unicast = 0; - - if (!m_list) - return ICE_ERR_PARAM; - - list_for_each_entry(m_list_itr, m_list, list_entry) { - u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; - - if (is_unicast_ether_addr(addr) && !hw->ucast_shared) - num_unicast++; - else if (is_multicast_ether_addr(addr) || - (is_unicast_ether_addr(addr) && hw->ucast_shared)) - ice_remove_mac_entry(hw, m_list_itr); - } - - /* Exit if no unicast addresses found. Multicast switch rules - * were added individually - */ - if (!num_unicast) - return 0; - - /* Allocate switch rule buffer for the bulk update for unicast */ - s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size, - GFP_KERNEL); - if (!s_rule) - return ICE_ERR_NO_MEMORY; - - r_iter = s_rule; - list_for_each_entry(m_list_itr, m_list, list_entry) { - u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; - - if (is_unicast_ether_addr(addr)) { - m_entry = ice_find_mac_entry(hw, addr); - if (!m_entry) { - status = ICE_ERR_DOES_NOT_EXIST; - goto ice_remove_mac_exit; - } - - ice_fill_sw_rule(hw, &m_entry->fltr_info, - r_iter, ice_aqc_opc_remove_sw_rules); - r_iter = (struct ice_aqc_sw_rules_elem *) - ((u8 *)r_iter + s_rule_size); - } - } - - /* Call AQ bulk switch rule update for all unicast addresses */ - r_iter = s_rule; - /* Call AQ switch rule in AQ_MAX chunk */ - for (total_elem_left = num_unicast; total_elem_left > 0; - total_elem_left -= elem_sent) { - struct ice_aqc_sw_rules_elem *entry = r_iter; - - elem_sent = min(total_elem_left, - (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size)); - status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size, - elem_sent, ice_aqc_opc_remove_sw_rules, - NULL); - if (status) - break; - r_iter = (struct ice_aqc_sw_rules_elem *) - ((u8 *)r_iter + s_rule_size); - } - - list_for_each_entry(m_list_itr, m_list, list_entry) { - u8 *addr = m_list_itr->fltr_info.l_data.mac.mac_addr; - - if (is_unicast_ether_addr(addr)) { - m_entry = ice_find_mac_entry(hw, addr); - if (!m_entry) - return ICE_ERR_OUT_OF_RANGE; - mutex_lock(&sw->mac_list_lock); - list_del(&m_entry->list_entry); - mutex_unlock(&sw->mac_list_lock); - devm_kfree(ice_hw_to_dev(hw), m_entry); - } - } - -ice_remove_mac_exit: - devm_kfree(ice_hw_to_dev(hw), s_rule); - return status; -} - -/** - * ice_cfg_dflt_vsi - add filter rule to set/unset given VSI as default - * VSI for the switch (represented by swid) + * ice_cfg_dflt_vsi - change state of VSI to set/clear default * @hw: pointer to the hardware structure * @vsi_id: number of VSI to set as default * @set: true to add the above mentioned switch rule, false to remove it * @direction: ICE_FLTR_RX or ICE_FLTR_TX + * + * add filter rule to set/unset given VSI as default VSI for the switch + * (represented by swid) */ enum ice_status ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction) @@ -1704,26 +1856,38 @@ out: } /** - * ice_remove_vlan_internal - Remove one VLAN based filter rule + * ice_remove_mac - remove a MAC address based filter rule * @hw: pointer to the hardware structure - * @f_entry: filter entry containing one VLAN information + * @m_list: list of MAC addresses and forwarding information + * + * This function removes either a MAC filter rule or a specific VSI from a + * VSI list for a multicast MAC address. + * + * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by + * ice_add_mac. Caller should be aware that this call will only work if all + * the entries passed into m_list were added previously. It will not attempt to + * do a partial remove of entries that were found. */ -static enum ice_status -ice_remove_vlan_internal(struct ice_hw *hw, - struct ice_fltr_list_entry *f_entry) +enum ice_status +ice_remove_mac(struct ice_hw *hw, struct list_head *m_list) { - struct ice_fltr_info *new_fltr; - struct ice_fltr_mgmt_list_entry *v_list_elem; - u16 vsi_id; + struct ice_fltr_list_entry *list_itr; - new_fltr = &f_entry->fltr_info; - - v_list_elem = ice_find_vlan_entry(hw, new_fltr->l_data.vlan.vlan_id); - if (!v_list_elem) + if (!m_list) return ICE_ERR_PARAM; - vsi_id = f_entry->fltr_info.fwd_id.vsi_id; - return ice_handle_rem_vsi_list_mgmt(hw, vsi_id, v_list_elem); + list_for_each_entry(list_itr, m_list, list_entry) { + enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type; + + if (l_type != ICE_SW_LKUP_MAC) + return ICE_ERR_PARAM; + list_itr->status = ice_remove_rule_internal(hw, + ICE_SW_LKUP_MAC, + list_itr); + if (list_itr->status) + return list_itr->status; + } + return 0; } /** @@ -1735,20 +1899,78 @@ enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) { struct ice_fltr_list_entry *v_list_itr; - enum ice_status status = 0; if (!v_list || !hw) return ICE_ERR_PARAM; list_for_each_entry(v_list_itr, v_list, list_entry) { - status = ice_remove_vlan_internal(hw, v_list_itr); - if (status) { - v_list_itr->status = ICE_FLTR_STATUS_FW_FAIL; - return status; - } - v_list_itr->status = ICE_FLTR_STATUS_FW_SUCCESS; + enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type; + + if (l_type != ICE_SW_LKUP_VLAN) + return ICE_ERR_PARAM; + v_list_itr->status = ice_remove_rule_internal(hw, + ICE_SW_LKUP_VLAN, + v_list_itr); + if (v_list_itr->status) + return v_list_itr->status; } - return status; + return 0; +} + +/** + * ice_vsi_uses_fltr - Determine if given VSI uses specified filter + * @fm_entry: filter entry to inspect + * @vsi_id: ID of VSI to compare with filter info + */ +static bool +ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_id) +{ + return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI && + fm_entry->fltr_info.fwd_id.vsi_id == vsi_id) || + (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST && + (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))); +} + +/** + * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list + * @hw: pointer to the hardware structure + * @vsi_id: ID of VSI to remove filters from + * @vsi_list_head: pointer to the list to add entry to + * @fi: pointer to fltr_info of filter entry to copy & add + * + * Helper function, used when creating a list of filters to remove from + * a specific VSI. The entry added to vsi_list_head is a COPY of the + * original filter entry, with the exception of fltr_info.fltr_act and + * fltr_info.fwd_id fields. These are set such that later logic can + * extract which VSI to remove the fltr from, and pass on that information. + */ +static enum ice_status +ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id, + struct list_head *vsi_list_head, + struct ice_fltr_info *fi) +{ + struct ice_fltr_list_entry *tmp; + + /* this memory is freed up in the caller function + * once filters for this VSI are removed + */ + tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ICE_ERR_NO_MEMORY; + + tmp->fltr_info = *fi; + + /* Overwrite these fields to indicate which VSI to remove filter from, + * so find and remove logic can extract the information from the + * list entries. Note that original entries will still have proper + * values. + */ + tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; + tmp->fltr_info.fwd_id.vsi_id = vsi_id; + + list_add(&tmp->list_entry, vsi_list_head); + + return 0; } /** @@ -1757,6 +1979,12 @@ ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list) * @vsi_id: ID of VSI to remove filters from * @lkup_list_head: pointer to the list that has certain lookup type filters * @vsi_list_head: pointer to the list pertaining to VSI with vsi_id + * + * Locates all filters in lkup_list_head that are used by the given VSI, + * and adds COPIES of those entries to vsi_list_head (intended to be used + * to remove the listed filters). + * Note that this means all entries in vsi_list_head must be explicitly + * deallocated by the caller when done with list. */ static enum ice_status ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id, @@ -1764,46 +1992,25 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_id, struct list_head *vsi_list_head) { struct ice_fltr_mgmt_list_entry *fm_entry; + enum ice_status status = 0; /* check to make sure VSI id is valid and within boundary */ - if (vsi_id >= - (sizeof(fm_entry->vsi_list_info->vsi_map) * BITS_PER_BYTE - 1)) + if (vsi_id >= ICE_MAX_VSI) return ICE_ERR_PARAM; list_for_each_entry(fm_entry, lkup_list_head, list_entry) { struct ice_fltr_info *fi; fi = &fm_entry->fltr_info; - if ((fi->fltr_act == ICE_FWD_TO_VSI && - fi->fwd_id.vsi_id == vsi_id) || - (fi->fltr_act == ICE_FWD_TO_VSI_LIST && - (test_bit(vsi_id, fm_entry->vsi_list_info->vsi_map)))) { - struct ice_fltr_list_entry *tmp; - - /* this memory is freed up in the caller function - * ice_remove_vsi_lkup_fltr() once filters for - * this VSI are removed - */ - tmp = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*tmp), - GFP_KERNEL); - if (!tmp) - return ICE_ERR_NO_MEMORY; - - memcpy(&tmp->fltr_info, fi, sizeof(*fi)); + if (!ice_vsi_uses_fltr(fm_entry, vsi_id)) + continue; - /* Expected below fields to be set to ICE_FWD_TO_VSI and - * the particular VSI id since we are only removing this - * one VSI - */ - if (fi->fltr_act == ICE_FWD_TO_VSI_LIST) { - tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI; - tmp->fltr_info.fwd_id.vsi_id = vsi_id; - } - - list_add(&tmp->list_entry, vsi_list_head); - } + status = ice_add_entry_to_vsi_fltr_list(hw, vsi_id, + vsi_list_head, fi); + if (status) + return status; } - return 0; + return status; } /** @@ -1819,46 +2026,40 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_id, struct ice_switch_info *sw = hw->switch_info; struct ice_fltr_list_entry *fm_entry; struct list_head remove_list_head; + struct list_head *rule_head; struct ice_fltr_list_entry *tmp; + struct mutex *rule_lock; /* Lock to protect filter rule list */ enum ice_status status; INIT_LIST_HEAD(&remove_list_head); + rule_lock = &sw->recp_list[lkup].filt_rule_lock; + rule_head = &sw->recp_list[lkup].filt_rules; + mutex_lock(rule_lock); + status = ice_add_to_vsi_fltr_list(hw, vsi_id, rule_head, + &remove_list_head); + mutex_unlock(rule_lock); + if (status) + return; + switch (lkup) { case ICE_SW_LKUP_MAC: - mutex_lock(&sw->mac_list_lock); - status = ice_add_to_vsi_fltr_list(hw, vsi_id, - &sw->mac_list_head, - &remove_list_head); - mutex_unlock(&sw->mac_list_lock); - if (!status) { - ice_remove_mac(hw, &remove_list_head); - goto free_fltr_list; - } + ice_remove_mac(hw, &remove_list_head); break; case ICE_SW_LKUP_VLAN: - mutex_lock(&sw->vlan_list_lock); - status = ice_add_to_vsi_fltr_list(hw, vsi_id, - &sw->vlan_list_head, - &remove_list_head); - mutex_unlock(&sw->vlan_list_lock); - if (!status) { - ice_remove_vlan(hw, &remove_list_head); - goto free_fltr_list; - } + ice_remove_vlan(hw, &remove_list_head); break; case ICE_SW_LKUP_MAC_VLAN: case ICE_SW_LKUP_ETHERTYPE: case ICE_SW_LKUP_ETHERTYPE_MAC: case ICE_SW_LKUP_PROMISC: - case ICE_SW_LKUP_PROMISC_VLAN: case ICE_SW_LKUP_DFLT: - ice_debug(hw, ICE_DBG_SW, - "Remove filters for this lookup type hasn't been implemented yet\n"); + case ICE_SW_LKUP_PROMISC_VLAN: + case ICE_SW_LKUP_LAST: + default: + ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup); break; } - return; -free_fltr_list: list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) { list_del(&fm_entry->list_entry); devm_kfree(ice_hw_to_dev(hw), fm_entry); @@ -1881,3 +2082,89 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id) ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_ETHERTYPE_MAC); ice_remove_vsi_lkup_fltr(hw, vsi_id, ICE_SW_LKUP_PROMISC_VLAN); } + +/** + * ice_replay_fltr - Replay all the filters stored by a specific list head + * @hw: pointer to the hardware structure + * @list_head: list for which filters needs to be replayed + * @recp_id: Recipe id for which rules need to be replayed + */ +static enum ice_status +ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct list_head *list_head) +{ + struct ice_fltr_mgmt_list_entry *itr; + struct list_head l_head; + enum ice_status status = 0; + + if (list_empty(list_head)) + return status; + + /* Move entries from the given list_head to a temporary l_head so that + * they can be replayed. Otherwise when trying to re-add the same + * filter, the function will return already exists + */ + list_replace_init(list_head, &l_head); + + /* Mark the given list_head empty by reinitializing it so filters + * could be added again by *handler + */ + list_for_each_entry(itr, &l_head, list_entry) { + struct ice_fltr_list_entry f_entry; + + f_entry.fltr_info = itr->fltr_info; + if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) { + status = ice_add_rule_internal(hw, recp_id, &f_entry); + if (status) + goto end; + continue; + } + + /* Add a filter per vsi separately */ + while (1) { + u16 vsi; + + vsi = find_first_bit(itr->vsi_list_info->vsi_map, + ICE_MAX_VSI); + if (vsi == ICE_MAX_VSI) + break; + + clear_bit(vsi, itr->vsi_list_info->vsi_map); + f_entry.fltr_info.fwd_id.vsi_id = vsi; + f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; + if (recp_id == ICE_SW_LKUP_VLAN) + status = ice_add_vlan_internal(hw, &f_entry); + else + status = ice_add_rule_internal(hw, recp_id, + &f_entry); + if (status) + goto end; + } + } +end: + /* Clear the filter management list */ + ice_rem_sw_rule_info(hw, &l_head); + return status; +} + +/** + * ice_replay_all_fltr - replay all filters stored in bookkeeping lists + * @hw: pointer to the hardware structure + * + * NOTE: This function does not clean up partially added filters on error. + * It is up to caller of the function to issue a reset or fail early. + */ +enum ice_status ice_replay_all_fltr(struct ice_hw *hw) +{ + struct ice_switch_info *sw = hw->switch_info; + enum ice_status status = 0; + u8 i; + + for (i = 0; i < ICE_SW_LKUP_LAST; i++) { + struct list_head *head = &sw->recp_list[i].filt_rules; + + status = ice_replay_fltr(hw, i, head); + if (status) + return status; + } + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 9b8ec128ee31..646389ca1238 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -39,6 +39,7 @@ enum ice_sw_lkup_type { ICE_SW_LKUP_DFLT = 5, ICE_SW_LKUP_ETHERTYPE_MAC = 8, ICE_SW_LKUP_PROMISC_VLAN = 9, + ICE_SW_LKUP_LAST }; struct ice_fltr_info { @@ -98,6 +99,31 @@ struct ice_fltr_info { u8 lan_en; /* Indicate if packet can be forwarded to the uplink */ }; +struct ice_sw_recipe { + struct list_head l_entry; + + /* To protect modification of filt_rule list + * defined below + */ + struct mutex filt_rule_lock; + + /* List of type ice_fltr_mgmt_list_entry */ + struct list_head filt_rules; + + /* linked list of type recipe_list_entry */ + struct list_head rg_list; + /* linked list of type ice_sw_fv_list_entry*/ + struct list_head fv_list; + struct ice_aqc_recipe_data_elem *r_buf; + u8 recp_count; + u8 root_rid; + u8 num_profs; + u8 *prof_ids; + + /* recipe bitmap: what all recipes makes this recipe */ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); +}; + /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */ struct ice_vsi_list_map_info { struct list_head list_entry; @@ -105,15 +131,9 @@ struct ice_vsi_list_map_info { u16 vsi_list_id; }; -enum ice_sw_fltr_status { - ICE_FLTR_STATUS_NEW = 0, - ICE_FLTR_STATUS_FW_SUCCESS, - ICE_FLTR_STATUS_FW_FAIL, -}; - struct ice_fltr_list_entry { struct list_head list_entry; - enum ice_sw_fltr_status status; + enum ice_status status; struct ice_fltr_info fltr_info; }; @@ -138,18 +158,18 @@ struct ice_fltr_mgmt_list_entry { /* VSI related commands */ enum ice_status -ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - struct ice_sq_cd *cd); -enum ice_status ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, struct ice_sq_cd *cd); enum ice_status -ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx, - bool keep_vsi_alloc, struct ice_sq_cd *cd); - +ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + struct ice_sq_cd *cd); +enum ice_status +ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, + bool keep_vsi_alloc, struct ice_sq_cd *cd); enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw); /* Switch/bridge related commands */ +enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw); enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst); enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst); void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_id); @@ -158,4 +178,8 @@ enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list); enum ice_status ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_id, bool set, u8 direction); +enum ice_status ice_replay_all_fltr(struct ice_hw *hw); + +enum ice_status ice_init_def_sw_recp(struct ice_hw *hw); + #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 6481e3d86374..5dae968d853e 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -251,6 +251,7 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring) tx_ring->next_to_use = 0; tx_ring->next_to_clean = 0; + tx_ring->tx_stats.prev_pkt = -1; return 0; err: diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index 31bc998fe200..839fd9ff6043 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -71,6 +71,7 @@ struct ice_txq_stats { u64 restart_q; u64 tx_busy; u64 tx_linearize; + int prev_pkt; /* negative if no pending Tx descriptors */ }; struct ice_rxq_stats { diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 97c366e0ca59..e681804be4d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -34,10 +34,15 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) enum ice_aq_res_ids { ICE_NVM_RES_ID = 1, ICE_SPD_RES_ID, - ICE_GLOBAL_CFG_LOCK_RES_ID, - ICE_CHANGE_LOCK_RES_ID + ICE_CHANGE_LOCK_RES_ID, + ICE_GLOBAL_CFG_LOCK_RES_ID }; +/* FW update timeout definitions are in milliseconds */ +#define ICE_NVM_TIMEOUT 180000 +#define ICE_CHANGE_LOCK_TIMEOUT 1000 +#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000 + enum ice_aq_res_access_type { ICE_RES_READ = 1, ICE_RES_WRITE @@ -144,9 +149,10 @@ struct ice_mac_info { /* Various RESET request, These are not tied with HW reset types */ enum ice_reset_req { - ICE_RESET_PFR = 0, - ICE_RESET_CORER = 1, - ICE_RESET_GLOBR = 2, + ICE_RESET_INVAL = 0, + ICE_RESET_PFR = 1, + ICE_RESET_CORER = 2, + ICE_RESET_GLOBR = 3, }; /* Bus parameters */ @@ -204,6 +210,7 @@ enum ice_agg_type { }; #define ICE_SCHED_DFLT_RL_PROF_ID 0 +#define ICE_SCHED_DFLT_BW_WT 1 /* vsi type list entry to locate corresponding vsi/ag nodes */ struct ice_sched_vsi_info { @@ -247,19 +254,26 @@ struct ice_port_info { }; struct ice_switch_info { - /* Switch VSI lists to MAC/VLAN translation */ - struct mutex mac_list_lock; /* protect MAC list */ - struct list_head mac_list_head; - struct mutex vlan_list_lock; /* protect VLAN list */ - struct list_head vlan_list_head; - struct mutex eth_m_list_lock; /* protect ethtype list */ - struct list_head eth_m_list_head; - struct mutex promisc_list_lock; /* protect promisc mode list */ - struct list_head promisc_list_head; - struct mutex mac_vlan_list_lock; /* protect MAC-VLAN list */ - struct list_head mac_vlan_list_head; - struct list_head vsi_list_map_head; + struct ice_sw_recipe *recp_list; +}; + +/* FW logging configuration */ +struct ice_fw_log_evnt { + u8 cfg : 4; /* New event enables to configure */ + u8 cur : 4; /* Current/active event enables */ +}; + +struct ice_fw_log_cfg { + u8 cq_en : 1; /* FW logging is enabled via the control queue */ + u8 uart_en : 1; /* FW logging is enabled via UART for all PFs */ + u8 actv_evnts; /* Cumulation of currently enabled log events */ + +#define ICE_FW_LOG_EVNT_INFO (ICE_AQC_FW_LOG_INFO_EN >> ICE_AQC_FW_LOG_EN_S) +#define ICE_FW_LOG_EVNT_INIT (ICE_AQC_FW_LOG_INIT_EN >> ICE_AQC_FW_LOG_EN_S) +#define ICE_FW_LOG_EVNT_FLOW (ICE_AQC_FW_LOG_FLOW_EN >> ICE_AQC_FW_LOG_EN_S) +#define ICE_FW_LOG_EVNT_ERR (ICE_AQC_FW_LOG_ERR_EN >> ICE_AQC_FW_LOG_EN_S) + struct ice_fw_log_evnt evnts[ICE_AQC_FW_LOG_ID_MAX]; }; /* Port hardware description */ @@ -286,8 +300,11 @@ struct ice_hw { u8 flattened_layers; u8 max_cgds; u8 sw_entry_point_layer; + u16 max_children[ICE_AQC_TOPO_MAX_LEVEL_NUM]; + struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI]; u8 evb_veb; /* true for VEB, false for VEPA */ + u8 reset_ongoing; /* true if hw is in reset, false otherwise */ struct ice_bus_info bus; struct ice_nvm_info nvm; struct ice_hw_dev_caps dev_caps; /* device capabilities */ @@ -308,6 +325,7 @@ struct ice_hw { u8 fw_patch; /* firmware patch version */ u32 fw_build; /* firmware build number */ + struct ice_fw_log_cfg fw_log; /* minimum allowed value for different speeds */ #define ICE_ITR_GRAN_MIN_200 1 #define ICE_ITR_GRAN_MIN_100 1 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4fc906c6166b..5c6fd42e90ed 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -605,6 +605,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_EEE_ENABLED BIT(15) #define IXGBE_FLAG2_RX_LEGACY BIT(16) #define IXGBE_FLAG2_IPSEC_ENABLED BIT(17) +#define IXGBE_FLAG2_VF_IPSEC_ENABLED BIT(18) /* Tx fast path data */ int num_tx_queues; @@ -1003,15 +1004,24 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, struct sk_buff *skb); int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, struct ixgbe_ipsec_tx_data *itd); +void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf); +int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf); +int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *mbuf, u32 vf); #else -static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { }; -static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { }; -static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { }; +static inline void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) { } +static inline void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) { } +static inline void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { } static inline void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) { }; + struct sk_buff *skb) { } static inline int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, - struct ixgbe_ipsec_tx_data *itd) { return 0; }; + struct ixgbe_ipsec_tx_data *itd) { return 0; } +static inline void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, + u32 vf) { } +static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, + u32 *mbuf, u32 vf) { return -EACCES; } +static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, + u32 *mbuf, u32 vf) { return -EACCES; } #endif /* CONFIG_XFRM_OFFLOAD */ #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 0bd1294ba517..970f71d5da04 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3485,6 +3485,17 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) } /** + * ixgbe_fw_recovery_mode - Check if in FW NVM recovery mode + * @hw: pointer to hardware structure + */ +bool ixgbe_fw_recovery_mode(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.fw_recovery_mode) + return hw->mac.ops.fw_recovery_mode(hw); + return false; +} + +/** * ixgbe_get_device_caps_generic - Get additional device capabilities * @hw: pointer to hardware structure * @device_caps: the EEPROM word with the extra device capabilities diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index e5a8461fe6a9..732b1e6ecc43 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -136,6 +136,8 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0) "legacy-rx", +#define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1) + "vf-ipsec", }; #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) @@ -3409,6 +3411,9 @@ static u32 ixgbe_get_priv_flags(struct net_device *netdev) if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; + if (adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED) + priv_flags |= IXGBE_PRIV_FLAGS_VF_IPSEC_EN; + return priv_flags; } @@ -3421,6 +3426,10 @@ static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) flags2 |= IXGBE_FLAG2_RX_LEGACY; + flags2 &= ~IXGBE_FLAG2_VF_IPSEC_ENABLED; + if (priv_flags & IXGBE_PRIV_FLAGS_VF_IPSEC_EN) + flags2 |= IXGBE_FLAG2_VF_IPSEC_ENABLED; + if (flags2 != adapter->flags2) { adapter->flags2 = flags2; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index da4322e4daed..fd1b0546fd67 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -5,6 +5,11 @@ #include <net/xfrm.h> #include <crypto/aead.h> +#define IXGBE_IPSEC_KEY_BITS 160 +static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + +static void ixgbe_ipsec_del_sa(struct xfrm_state *xs); + /** * ixgbe_ipsec_set_tx_sa - set the Tx SA registers * @hw: hw specific details @@ -113,7 +118,6 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) **/ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) { - struct ixgbe_ipsec *ipsec = adapter->ipsec; struct ixgbe_hw *hw = &adapter->hw; u32 buf[4] = {0, 0, 0, 0}; u16 idx; @@ -132,9 +136,6 @@ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); } - - ipsec->num_rx_sa = 0; - ipsec->num_tx_sa = 0; } /** @@ -290,6 +291,13 @@ static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter) /** * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset * @adapter: board private structure + * + * Reload the HW tables from the SW tables after they've been bashed + * by a chip reset. + * + * Any VF entries are removed from the SW and HW tables since either + * (a) the VF also gets reset on PF reset and will ask again for the + * offloads, or (b) the VF has been removed by a change in the num_vfs. **/ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) { @@ -305,6 +313,28 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) ixgbe_ipsec_clear_hw_tables(adapter); ixgbe_ipsec_start_engine(adapter); + /* reload the Rx and Tx keys */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + struct rx_sa *r = &ipsec->rx_tbl[i]; + struct tx_sa *t = &ipsec->tx_tbl[i]; + + if (r->used) { + if (r->mode & IXGBE_RXTXMOD_VF) + ixgbe_ipsec_del_sa(r->xs); + else + ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi, + r->key, r->salt, + r->mode, r->iptbl_ind); + } + + if (t->used) { + if (t->mode & IXGBE_RXTXMOD_VF) + ixgbe_ipsec_del_sa(t->xs); + else + ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt); + } + } + /* reload the IP addrs */ for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) { struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i]; @@ -312,20 +342,6 @@ void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) if (ipsa->used) ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr); } - - /* reload the Rx and Tx keys */ - for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { - struct rx_sa *rsa = &ipsec->rx_tbl[i]; - struct tx_sa *tsa = &ipsec->tx_tbl[i]; - - if (rsa->used) - ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi, - rsa->key, rsa->salt, - rsa->mode, rsa->iptbl_ind); - - if (tsa->used) - ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt); - } } /** @@ -382,6 +398,8 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, rcu_read_lock(); hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, (__force u32)spi) { + if (rsa->mode & IXGBE_RXTXMOD_VF) + continue; if (spi == rsa->xs->id.spi && ((ip4 && *daddr == rsa->xs->id.daddr.a4) || (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, @@ -411,7 +429,6 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, struct net_device *dev = xs->xso.dev; unsigned char *key_data; char *alg_name = NULL; - const char aes_gcm_name[] = "rfc4106(gcm(aes))"; int key_len; if (!xs->aead) { @@ -439,9 +456,9 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, * we don't need to do any byteswapping. * 160 accounts for 16 byte key and 4 byte salt */ - if (key_len == 160) { + if (key_len == IXGBE_IPSEC_KEY_BITS) { *mysalt = ((u32 *)key_data)[4]; - } else if (key_len != 128) { + } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) { netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); return -EINVAL; } else { @@ -676,6 +693,9 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) } else { struct tx_sa tsa; + if (adapter->num_vfs) + return -EOPNOTSUPP; + /* find the first unused index */ ret = ixgbe_ipsec_find_empty_idx(ipsec, false); if (ret < 0) { @@ -811,6 +831,226 @@ static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { }; /** + * ixgbe_ipsec_vf_clear - clear the tables of data for a VF + * @adapter: board private structure + * @vf: VF id to be removed + **/ +void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + int i; + + /* search rx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) { + if (!ipsec->rx_tbl[i].used) + continue; + if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF && + ipsec->rx_tbl[i].vf == vf) + ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs); + } + + /* search tx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) { + if (!ipsec->tx_tbl[i].used) + continue; + if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF && + ipsec->tx_tbl[i].vf == vf) + ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs); + } +} + +/** + * ixgbe_ipsec_vf_add_sa - translate VF request to SA add + * @adapter: board private structure + * @msgbuf: The message buffer + * @vf: the VF index + * + * Make up a new xs and algorithm info from the data sent by the VF. + * We only need to sketch in just enough to set up the HW offload. + * Put the resulting offload_handle into the return message to the VF. + * + * Returns 0 or error value + **/ +int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct xfrm_algo_desc *algo; + struct sa_mbx_msg *sam; + struct xfrm_state *xs; + size_t aead_len; + u16 sa_idx; + u32 pfsa; + int err; + + sam = (struct sa_mbx_msg *)(&msgbuf[1]); + if (!adapter->vfinfo[vf].trusted || + !(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) { + e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf); + err = -EACCES; + goto err_out; + } + + /* Tx IPsec offload doesn't seem to work on this + * device, so block these requests for now. + */ + if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) { + err = -EOPNOTSUPP; + goto err_out; + } + + xs = kzalloc(sizeof(*xs), GFP_KERNEL); + if (unlikely(!xs)) { + err = -ENOMEM; + goto err_out; + } + + xs->xso.flags = sam->flags; + xs->id.spi = sam->spi; + xs->id.proto = sam->proto; + xs->props.family = sam->family; + if (xs->props.family == AF_INET6) + memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6)); + else + memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4)); + xs->xso.dev = adapter->netdev; + + algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1); + if (unlikely(!algo)) { + err = -ENOENT; + goto err_xs; + } + + aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8; + xs->aead = kzalloc(aead_len, GFP_KERNEL); + if (unlikely(!xs->aead)) { + err = -ENOMEM; + goto err_xs; + } + + xs->props.ealgo = algo->desc.sadb_alg_id; + xs->geniv = algo->uinfo.aead.geniv; + xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS; + xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS; + memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key)); + memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name)); + + /* set up the HW offload */ + err = ixgbe_ipsec_add_sa(xs); + if (err) + goto err_aead; + + pfsa = xs->xso.offload_handle; + if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { + sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; + ipsec->rx_tbl[sa_idx].vf = vf; + ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; + } else { + sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; + ipsec->tx_tbl[sa_idx].vf = vf; + ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; + } + + msgbuf[1] = xs->xso.offload_handle; + + return 0; + +err_aead: + memset(xs->aead, 0, sizeof(*xs->aead)); + kfree(xs->aead); +err_xs: + memset(xs, 0, sizeof(*xs)); + kfree(xs); +err_out: + msgbuf[1] = err; + return err; +} + +/** + * ixgbe_ipsec_vf_del_sa - translate VF request to SA delete + * @adapter: board private structure + * @msgbuf: The message buffer + * @vf: the VF index + * + * Given the offload_handle sent by the VF, look for the related SA table + * entry and use its xs field to call for a delete of the SA. + * + * Note: We silently ignore requests to delete entries that are already + * set to unused because when a VF is set to "DOWN", the PF first + * gets a reset and clears all the VF's entries; then the VF's + * XFRM stack sends individual deletes for each entry, which the + * reset already removed. In the future it might be good to try to + * optimize this so not so many unnecessary delete messages are sent. + * + * Returns 0 or error value + **/ +int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + struct ixgbe_ipsec *ipsec = adapter->ipsec; + struct xfrm_state *xs; + u32 pfsa = msgbuf[1]; + u16 sa_idx; + + if (!adapter->vfinfo[vf].trusted) { + e_err(drv, "vf %d attempted to delete an SA\n", vf); + return -EPERM; + } + + if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { + struct rx_sa *rsa; + + sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; + if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { + e_err(drv, "vf %d SA index %d out of range\n", + vf, sa_idx); + return -EINVAL; + } + + rsa = &ipsec->rx_tbl[sa_idx]; + + if (!rsa->used) + return 0; + + if (!(rsa->mode & IXGBE_RXTXMOD_VF) || + rsa->vf != vf) { + e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx); + return -ENOENT; + } + + xs = ipsec->rx_tbl[sa_idx].xs; + } else { + struct tx_sa *tsa; + + sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; + if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { + e_err(drv, "vf %d SA index %d out of range\n", + vf, sa_idx); + return -EINVAL; + } + + tsa = &ipsec->tx_tbl[sa_idx]; + + if (!tsa->used) + return 0; + + if (!(tsa->mode & IXGBE_RXTXMOD_VF) || + tsa->vf != vf) { + e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx); + return -ENOENT; + } + + xs = ipsec->tx_tbl[sa_idx].xs; + } + + ixgbe_ipsec_del_sa(xs); + + /* remove the xs that was made-up in the add request */ + memset(xs, 0, sizeof(*xs)); + kfree(xs); + + return 0; +} + +/** * ixgbe_ipsec_tx - setup Tx flags for ipsec offload * @tx_ring: outgoing context * @first: current data packet diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h index 9ef7faadda69..d2b64ff8eb4e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.h @@ -26,6 +26,7 @@ enum ixgbe_ipsec_tbl_sel { #define IXGBE_RXMOD_PROTO_ESP 0x00000004 #define IXGBE_RXMOD_DECRYPT 0x00000008 #define IXGBE_RXMOD_IPV6 0x00000010 +#define IXGBE_RXTXMOD_VF 0x00000020 struct rx_sa { struct hlist_node hlist; @@ -37,6 +38,7 @@ struct rx_sa { u8 iptbl_ind; bool used; bool decrypt; + u32 vf; }; struct rx_ip_sa { @@ -49,8 +51,10 @@ struct tx_sa { struct xfrm_state *xs; u32 key[4]; u32 salt; + u32 mode; bool encrypt; bool used; + u32 vf; }; struct ixgbe_ipsec_tx_data { @@ -67,4 +71,13 @@ struct ixgbe_ipsec { struct tx_sa *tx_tbl; DECLARE_HASHTABLE(rx_sa_list, 10); }; + +struct sa_mbx_msg { + __be32 spi; + u8 flags; + u8 proto; + u16 family; + __be32 addr[4]; + u32 key[5]; +}; #endif /* _IXGBE_IPSEC_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9a23d33a47ed..604282f03d23 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7775,6 +7775,33 @@ static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) } /** + * ixgbe_check_fw_error - Check firmware for errors + * @adapter: the adapter private structure + * + * Check firmware errors in register FWSM + */ +static bool ixgbe_check_fw_error(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 fwsm; + + /* read fwsm.ext_err_ind register and log errors */ + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + + if (fwsm & IXGBE_FWSM_EXT_ERR_IND_MASK || + !(fwsm & IXGBE_FWSM_FW_VAL_BIT)) + e_dev_warn("Warning firmware error detected FWSM: 0x%08X\n", + fwsm); + + if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) { + e_dev_err("Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n"); + return true; + } + + return false; +} + +/** * ixgbe_service_task - manages and runs subtasks * @work: pointer to work_struct containing our data **/ @@ -7792,6 +7819,15 @@ static void ixgbe_service_task(struct work_struct *work) ixgbe_service_event_complete(adapter); return; } + if (ixgbe_check_fw_error(adapter)) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + unregister_netdev(adapter->netdev); + rtnl_unlock(); + } + ixgbe_service_event_complete(adapter); + return; + } if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { rtnl_lock(); adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; @@ -10716,6 +10752,11 @@ skip_sriov: if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) netdev->features |= NETIF_F_LRO; + if (ixgbe_check_fw_error(adapter)) { + err = -EIO; + goto err_sw_init; + } + /* make sure the EEPROM is good */ if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { e_dev_err("The EEPROM Checksum Is Not Valid\n"); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index e085b6520dac..a148534d7256 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -50,6 +50,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -80,6 +81,10 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c +/* mailbox API, version 1.4 VF requests */ +#define IXGBE_VF_IPSEC_ADD 0x0d +#define IXGBE_VF_IPSEC_DEL 0x0e + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 3c6f01c41b78..af25a8fffeb8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -496,6 +496,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: /* Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are * disabled @@ -728,6 +729,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) /* reset multicast table array for vf */ adapter->vfinfo[vf].num_vf_mc_hashes = 0; + /* clear any ipsec table info */ + ixgbe_ipsec_vf_clear(adapter, vf); + /* Flush and reset the mta with the new values */ ixgbe_set_rx_mode(adapter->netdev); @@ -1000,6 +1004,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -1025,6 +1030,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: break; default: return -1; @@ -1065,6 +1071,7 @@ static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: break; @@ -1097,6 +1104,7 @@ static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, /* verify the PF is supporting the correct API */ switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: break; @@ -1122,8 +1130,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, /* promisc introduced in 1.3 version */ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; - /* Fall threw */ + /* Fall through */ case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: break; default: return -EOPNOTSUPP; @@ -1249,6 +1258,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_UPDATE_XCAST_MODE: retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); break; + case IXGBE_VF_IPSEC_ADD: + retval = ixgbe_ipsec_vf_add_sa(adapter, msgbuf, vf); + break; + case IXGBE_VF_IPSEC_DEL: + retval = ixgbe_ipsec_vf_del_sa(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 41bcbb337e83..84f2dba39e36 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -924,6 +924,9 @@ struct ixgbe_nvm_version { /* Firmware Semaphore Register */ #define IXGBE_FWSM_MODE_MASK 0xE #define IXGBE_FWSM_FW_MODE_PT 0x4 +#define IXGBE_FWSM_FW_NVM_RECOVERY_MODE BIT(5) +#define IXGBE_FWSM_EXT_ERR_IND_MASK 0x01F80000 +#define IXGBE_FWSM_FW_VAL_BIT BIT(15) /* ARC Subsystem registers */ #define IXGBE_HICR 0x15F00 @@ -3461,6 +3464,7 @@ struct ixgbe_mac_operations { const char *); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); + bool (*fw_recovery_mode)(struct ixgbe_hw *hw); void (*disable_rx)(struct ixgbe_hw *hw); void (*enable_rx)(struct ixgbe_hw *hw); void (*set_source_address_pruning)(struct ixgbe_hw *, bool, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index a8148c7126e5..10dbaf4f6e80 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1247,6 +1247,20 @@ static s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) return 0; } +/** + * ixgbe_fw_recovery_mode - Check FW NVM recovery mode + * @hw: pointer t hardware structure + * + * Returns true if in FW NVM recovery mode. + */ +static bool ixgbe_fw_recovery_mode_X550(struct ixgbe_hw *hw) +{ + u32 fwsm; + + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); + return !!(fwsm & IXGBE_FWSM_FW_NVM_RECOVERY_MODE); +} + /** ixgbe_disable_rx_x550 - Disable RX unit * * Enables the Rx DMA unit for x550 @@ -3816,6 +3830,7 @@ static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, .enable_rx_buff = &ixgbe_enable_rx_buff_generic, \ .get_thermal_sensor_data = NULL, \ .init_thermal_sensor_thresh = NULL, \ + .fw_recovery_mode = &ixgbe_fw_recovery_mode_X550, \ .enable_rx = &ixgbe_enable_rx_generic, \ .disable_rx = &ixgbe_disable_rx_x550, \ diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile index aba1e6a37a6a..297d0f0858b5 100644 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -10,4 +10,5 @@ ixgbevf-objs := vf.o \ mbx.o \ ethtool.o \ ixgbevf_main.o +ixgbevf-$(CONFIG_XFRM_OFFLOAD) += ipsec.o diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h index 700d8eb2f6f8..6bace746eaac 100644 --- a/drivers/net/ethernet/intel/ixgbevf/defines.h +++ b/drivers/net/ethernet/intel/ixgbevf/defines.h @@ -133,9 +133,14 @@ typedef u32 ixgbe_link_speed; #define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ #define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ #define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_SECP 0x00020000 /* IPsec/MACsec pkt found */ #define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F #define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ #define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 #define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 #define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 @@ -229,7 +234,7 @@ union ixgbe_adv_rx_desc { /* Context descriptors */ struct ixgbe_adv_tx_context_desc { __le32 vlan_macip_lens; - __le32 seqnum_seed; + __le32 fceof_saidx; __le32 type_tucmd_mlhl; __le32 mss_l4len_idx; }; @@ -250,9 +255,12 @@ struct ixgbe_adv_tx_context_desc { #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 /* ESP Encrypt Enable */ #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ IXGBE_ADVTXD_POPTS_SHIFT) #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index 631c91046f39..5399787e07af 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -55,6 +55,8 @@ static struct ixgbe_stats ixgbevf_gstrings_stats[] = { IXGBEVF_STAT("alloc_rx_page", alloc_rx_page), IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed), IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + IXGBEVF_STAT("tx_ipsec", tx_ipsec), + IXGBEVF_STAT("rx_ipsec", rx_ipsec), }; #define IXGBEVF_QUEUE_STATS_LEN ( \ diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c new file mode 100644 index 000000000000..997cea675a37 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -0,0 +1,673 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ + +#include "ixgbevf.h" +#include <net/xfrm.h> +#include <crypto/aead.h> + +#define IXGBE_IPSEC_KEY_BITS 160 +static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; + +/** + * ixgbevf_ipsec_set_pf_sa - ask the PF to set up an SA + * @adapter: board private structure + * @xs: xfrm info to be sent to the PF + * + * Returns: positive offload handle from the PF, or negative error code + **/ +static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter, + struct xfrm_state *xs) +{ + u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 }; + struct ixgbe_hw *hw = &adapter->hw; + struct sa_mbx_msg *sam; + u16 msglen; + int ret; + + /* send the important bits to the PF */ + sam = (struct sa_mbx_msg *)(&msgbuf[1]); + sam->flags = xs->xso.flags; + sam->spi = xs->id.spi; + sam->proto = xs->id.proto; + sam->family = xs->props.family; + + if (xs->props.family == AF_INET6) + memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6)); + else + memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4)); + memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key)); + + msgbuf[0] = IXGBE_VF_IPSEC_ADD; + msglen = sizeof(*sam) + sizeof(msgbuf[0]); + + spin_lock_bh(&adapter->mbx_lock); + + ret = hw->mbx.ops.write_posted(hw, msgbuf, msglen); + if (ret) + goto out; + + msglen = sizeof(msgbuf[0]) * 2; + ret = hw->mbx.ops.read_posted(hw, msgbuf, msglen); + if (ret) + goto out; + + ret = (int)msgbuf[1]; + if (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK && ret >= 0) + ret = -1; + +out: + spin_unlock_bh(&adapter->mbx_lock); + + return ret; +} + +/** + * ixgbevf_ipsec_del_pf_sa - ask the PF to delete an SA + * @adapter: board private structure + * @pfsa: sa index returned from PF when created, -1 for all + * + * Returns: 0 on success, or negative error code + **/ +static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 msgbuf[2]; + int err; + + memset(msgbuf, 0, sizeof(msgbuf)); + msgbuf[0] = IXGBE_VF_IPSEC_DEL; + msgbuf[1] = (u32)pfsa; + + spin_lock_bh(&adapter->mbx_lock); + + err = hw->mbx.ops.write_posted(hw, msgbuf, sizeof(msgbuf)); + if (err) + goto out; + + err = hw->mbx.ops.read_posted(hw, msgbuf, sizeof(msgbuf)); + if (err) + goto out; + +out: + spin_unlock_bh(&adapter->mbx_lock); + return err; +} + +/** + * ixgbevf_ipsec_restore - restore the IPsec HW settings after a reset + * @adapter: board private structure + * + * Reload the HW tables from the SW tables after they've been bashed + * by a chip reset. While we're here, make sure any stale VF data is + * removed, since we go through reset when num_vfs changes. + **/ +void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *netdev = adapter->netdev; + int i; + + if (!(adapter->netdev->features & NETIF_F_HW_ESP)) + return; + + /* reload the Rx and Tx keys */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + struct rx_sa *r = &ipsec->rx_tbl[i]; + struct tx_sa *t = &ipsec->tx_tbl[i]; + int ret; + + if (r->used) { + ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs); + if (ret < 0) + netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n", + i, ret); + } + + if (t->used) { + ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs); + if (ret < 0) + netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n", + i, ret); + } + } +} + +/** + * ixgbevf_ipsec_find_empty_idx - find the first unused security parameter index + * @ipsec: pointer to IPsec struct + * @rxtable: true if we need to look in the Rx table + * + * Returns the first unused index in either the Rx or Tx SA table + **/ +static +int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable) +{ + u32 i; + + if (rxtable) { + if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search rx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->rx_tbl[i].used) + return i; + } + } else { + if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT) + return -ENOSPC; + + /* search tx sa table */ + for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { + if (!ipsec->tx_tbl[i].used) + return i; + } + } + + return -ENOSPC; +} + +/** + * ixgbevf_ipsec_find_rx_state - find the state that matches + * @ipsec: pointer to IPsec struct + * @daddr: inbound address to match + * @proto: protocol to match + * @spi: SPI to match + * @ip4: true if using an IPv4 address + * + * Returns a pointer to the matching SA state information + **/ +static +struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, + __be32 *daddr, u8 proto, + __be32 spi, bool ip4) +{ + struct xfrm_state *ret = NULL; + struct rx_sa *rsa; + + rcu_read_lock(); + hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, + (__force u32)spi) { + if (spi == rsa->xs->id.spi && + ((ip4 && *daddr == rsa->xs->id.daddr.a4) || + (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, + sizeof(rsa->xs->id.daddr.a6)))) && + proto == rsa->xs->id.proto) { + ret = rsa->xs; + xfrm_state_hold(ret); + break; + } + } + rcu_read_unlock(); + return ret; +} + +/** + * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol + * @xs: pointer to xfrm_state struct + * @mykey: pointer to key array to populate + * @mysalt: pointer to salt value to populate + * + * This copies the protocol keys and salt to our own data tables. The + * 82599 family only supports the one algorithm. + **/ +static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, + u32 *mykey, u32 *mysalt) +{ + struct net_device *dev = xs->xso.dev; + unsigned char *key_data; + char *alg_name = NULL; + int key_len; + + if (!xs->aead) { + netdev_err(dev, "Unsupported IPsec algorithm\n"); + return -EINVAL; + } + + if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) { + netdev_err(dev, "IPsec offload requires %d bit authentication\n", + IXGBE_IPSEC_AUTH_BITS); + return -EINVAL; + } + + key_data = &xs->aead->alg_key[0]; + key_len = xs->aead->alg_key_len; + alg_name = xs->aead->alg_name; + + if (strcmp(alg_name, aes_gcm_name)) { + netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", + aes_gcm_name); + return -EINVAL; + } + + /* The key bytes come down in a big endian array of bytes, so + * we don't need to do any byte swapping. + * 160 accounts for 16 byte key and 4 byte salt + */ + if (key_len > IXGBE_IPSEC_KEY_BITS) { + *mysalt = ((u32 *)key_data)[4]; + } else if (key_len == IXGBE_IPSEC_KEY_BITS) { + *mysalt = 0; + } else { + netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); + return -EINVAL; + } + memcpy(mykey, key_data, 16); + + return 0; +} + +/** + * ixgbevf_ipsec_add_sa - program device with a security association + * @xs: pointer to transformer state struct + **/ +static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbevf_adapter *adapter = netdev_priv(dev); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + u16 sa_idx; + int ret; + + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { + netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n", + xs->id.proto); + return -EINVAL; + } + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + struct rx_sa rsa; + + if (xs->calg) { + netdev_err(dev, "Compression offload not supported\n"); + return -EINVAL; + } + + /* find the first unused index */ + ret = ixgbevf_ipsec_find_empty_idx(ipsec, true); + if (ret < 0) { + netdev_err(dev, "No space for SA in Rx table!\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&rsa, 0, sizeof(rsa)); + rsa.used = true; + rsa.xs = xs; + + if (rsa.xs->id.proto & IPPROTO_ESP) + rsa.decrypt = xs->ealg || xs->aead; + + /* get the key and salt */ + ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for Rx SA table\n"); + return ret; + } + + /* get ip for rx sa table */ + if (xs->props.family == AF_INET6) + memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16); + else + memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4); + + rsa.mode = IXGBE_RXMOD_VALID; + if (rsa.xs->id.proto & IPPROTO_ESP) + rsa.mode |= IXGBE_RXMOD_PROTO_ESP; + if (rsa.decrypt) + rsa.mode |= IXGBE_RXMOD_DECRYPT; + if (rsa.xs->props.family == AF_INET6) + rsa.mode |= IXGBE_RXMOD_IPV6; + + ret = ixgbevf_ipsec_set_pf_sa(adapter, xs); + if (ret < 0) + return ret; + rsa.pfsa = ret; + + /* the preparations worked, so save the info */ + memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa)); + + xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX; + + ipsec->num_rx_sa++; + + /* hash the new entry for faster search in Rx path */ + hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, + (__force u32)rsa.xs->id.spi); + } else { + struct tx_sa tsa; + + /* find the first unused index */ + ret = ixgbevf_ipsec_find_empty_idx(ipsec, false); + if (ret < 0) { + netdev_err(dev, "No space for SA in Tx table\n"); + return ret; + } + sa_idx = (u16)ret; + + memset(&tsa, 0, sizeof(tsa)); + tsa.used = true; + tsa.xs = xs; + + if (xs->id.proto & IPPROTO_ESP) + tsa.encrypt = xs->ealg || xs->aead; + + ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); + if (ret) { + netdev_err(dev, "Failed to get key data for Tx SA table\n"); + memset(&tsa, 0, sizeof(tsa)); + return ret; + } + + ret = ixgbevf_ipsec_set_pf_sa(adapter, xs); + if (ret < 0) + return ret; + tsa.pfsa = ret; + + /* the preparations worked, so save the info */ + memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa)); + + xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX; + + ipsec->num_tx_sa++; + } + + return 0; +} + +/** + * ixgbevf_ipsec_del_sa - clear out this specific SA + * @xs: pointer to transformer state struct + **/ +static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) +{ + struct net_device *dev = xs->xso.dev; + struct ixgbevf_adapter *adapter = netdev_priv(dev); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + u16 sa_idx; + + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; + + if (!ipsec->rx_tbl[sa_idx].used) { + netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n", + sa_idx, xs->xso.offload_handle); + return; + } + + ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa); + hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist); + memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa)); + ipsec->num_rx_sa--; + } else { + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; + + if (!ipsec->tx_tbl[sa_idx].used) { + netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n", + sa_idx, xs->xso.offload_handle); + return; + } + + ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa); + memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa)); + ipsec->num_tx_sa--; + } +} + +/** + * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload + * @skb: current data packet + * @xs: pointer to transformer state struct + **/ +static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) +{ + if (xs->props.family == AF_INET) { + /* Offload with IPv4 options is not supported yet */ + if (ip_hdr(skb)->ihl != 5) + return false; + } else { + /* Offload with IPv6 extension headers is not support yet */ + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) + return false; + } + + return true; +} + +static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = { + .xdo_dev_state_add = ixgbevf_ipsec_add_sa, + .xdo_dev_state_delete = ixgbevf_ipsec_del_sa, + .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok, +}; + +/** + * ixgbevf_ipsec_tx - setup Tx flags for IPsec offload + * @tx_ring: outgoing context + * @first: current data packet + * @itd: ipsec Tx data for later use in building context descriptor + **/ +int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) +{ + struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct xfrm_state *xs; + struct tx_sa *tsa; + u16 sa_idx; + + if (unlikely(!first->skb->sp->len)) { + netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", + __func__, first->skb->sp->len); + return 0; + } + + xs = xfrm_input_state(first->skb); + if (unlikely(!xs)) { + netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", + __func__, xs); + return 0; + } + + sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; + if (unlikely(sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) { + netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", + __func__, sa_idx, xs->xso.offload_handle); + return 0; + } + + tsa = &ipsec->tx_tbl[sa_idx]; + if (unlikely(!tsa->used)) { + netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", + __func__, sa_idx); + return 0; + } + + itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX; + + first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM; + + if (xs->id.proto == IPPROTO_ESP) { + itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | + IXGBE_ADVTXD_TUCMD_L4T_TCP; + if (first->protocol == htons(ETH_P_IP)) + itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; + + /* The actual trailer length is authlen (16 bytes) plus + * 2 bytes for the proto and the padlen values, plus + * padlen bytes of padding. This ends up not the same + * as the static value found in xs->props.trailer_len (21). + * + * ... but if we're doing GSO, don't bother as the stack + * doesn't add a trailer for those. + */ + if (!skb_is_gso(first->skb)) { + /* The "correct" way to get the auth length would be + * to use + * authlen = crypto_aead_authsize(xs->data); + * but since we know we only have one size to worry + * about * we can let the compiler use the constant + * and save us a few CPU cycles. + */ + const int authlen = IXGBE_IPSEC_AUTH_BITS / 8; + struct sk_buff *skb = first->skb; + u8 padlen; + int ret; + + ret = skb_copy_bits(skb, skb->len - (authlen + 2), + &padlen, 1); + if (unlikely(ret)) + return 0; + itd->trailer_len = authlen + 2 + padlen; + } + } + if (tsa->encrypt) + itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; + + return 1; +} + +/** + * ixgbevf_ipsec_rx - decode IPsec bits from Rx descriptor + * @rx_ring: receiving ring + * @rx_desc: receive data descriptor + * @skb: current data packet + * + * Determine if there was an IPsec encapsulation noticed, and if so set up + * the resulting status for later in the receive stack. + **/ +void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) +{ + struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev); + __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | + IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct xfrm_offload *xo = NULL; + struct xfrm_state *xs = NULL; + struct ipv6hdr *ip6 = NULL; + struct iphdr *ip4 = NULL; + void *daddr; + __be32 spi; + u8 *c_hdr; + u8 proto; + + /* Find the IP and crypto headers in the data. + * We can assume no VLAN header in the way, b/c the + * hw won't recognize the IPsec packet and anyway the + * currently VLAN device doesn't support xfrm offload. + */ + if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { + ip4 = (struct iphdr *)(skb->data + ETH_HLEN); + daddr = &ip4->daddr; + c_hdr = (u8 *)ip4 + ip4->ihl * 4; + } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { + ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); + daddr = &ip6->daddr; + c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); + } else { + return; + } + + switch (pkt_info & ipsec_pkt_types) { + case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): + spi = ((struct ip_auth_hdr *)c_hdr)->spi; + proto = IPPROTO_AH; + break; + case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): + spi = ((struct ip_esp_hdr *)c_hdr)->spi; + proto = IPPROTO_ESP; + break; + default: + return; + } + + xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); + if (unlikely(!xs)) + return; + + skb->sp = secpath_dup(skb->sp); + if (unlikely(!skb->sp)) + return; + + skb->sp->xvec[skb->sp->len++] = xs; + skb->sp->olen++; + xo = xfrm_offload(skb); + xo->flags = CRYPTO_DONE; + xo->status = CRYPTO_SUCCESS; + + adapter->rx_ipsec++; +} + +/** + * ixgbevf_init_ipsec_offload - initialize registers for IPsec operation + * @adapter: board private structure + **/ +void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ipsec *ipsec; + size_t size; + + switch (adapter->hw.api_version) { + case ixgbe_mbox_api_14: + break; + default: + return; + } + + ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); + if (!ipsec) + goto err1; + hash_init(ipsec->rx_sa_list); + + size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; + ipsec->rx_tbl = kzalloc(size, GFP_KERNEL); + if (!ipsec->rx_tbl) + goto err2; + + size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; + ipsec->tx_tbl = kzalloc(size, GFP_KERNEL); + if (!ipsec->tx_tbl) + goto err2; + + ipsec->num_rx_sa = 0; + ipsec->num_tx_sa = 0; + + adapter->ipsec = ipsec; + + adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops; + +#define IXGBEVF_ESP_FEATURES (NETIF_F_HW_ESP | \ + NETIF_F_HW_ESP_TX_CSUM | \ + NETIF_F_GSO_ESP) + + adapter->netdev->features |= IXGBEVF_ESP_FEATURES; + adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES; + + return; + +err2: + kfree(ipsec->rx_tbl); + kfree(ipsec->tx_tbl); + kfree(ipsec); +err1: + netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); +} + +/** + * ixgbevf_stop_ipsec_offload - tear down the IPsec offload + * @adapter: board private structure + **/ +void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter) +{ + struct ixgbevf_ipsec *ipsec = adapter->ipsec; + + adapter->ipsec = NULL; + if (ipsec) { + kfree(ipsec->rx_tbl); + kfree(ipsec->tx_tbl); + kfree(ipsec); + } +} diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.h b/drivers/net/ethernet/intel/ixgbevf/ipsec.h new file mode 100644 index 000000000000..3740725041c3 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */ + +#ifndef _IXGBEVF_IPSEC_H_ +#define _IXGBEVF_IPSEC_H_ + +#define IXGBE_IPSEC_MAX_SA_COUNT 1024 +#define IXGBE_IPSEC_BASE_RX_INDEX 0 +#define IXGBE_IPSEC_BASE_TX_INDEX IXGBE_IPSEC_MAX_SA_COUNT +#define IXGBE_IPSEC_AUTH_BITS 128 + +#define IXGBE_RXMOD_VALID 0x00000001 +#define IXGBE_RXMOD_PROTO_ESP 0x00000004 +#define IXGBE_RXMOD_DECRYPT 0x00000008 +#define IXGBE_RXMOD_IPV6 0x00000010 + +struct rx_sa { + struct hlist_node hlist; + struct xfrm_state *xs; + __be32 ipaddr[4]; + u32 key[4]; + u32 salt; + u32 mode; + u32 pfsa; + bool used; + bool decrypt; +}; + +struct rx_ip_sa { + __be32 ipaddr[4]; + u32 ref_cnt; + bool used; +}; + +struct tx_sa { + struct xfrm_state *xs; + u32 key[4]; + u32 salt; + u32 pfsa; + bool encrypt; + bool used; +}; + +struct ixgbevf_ipsec_tx_data { + u32 flags; + u16 trailer_len; + u16 pfsa; +}; + +struct ixgbevf_ipsec { + u16 num_rx_sa; + u16 num_tx_sa; + struct rx_sa *rx_tbl; + struct tx_sa *tx_tbl; + DECLARE_HASHTABLE(rx_sa_list, 10); +}; + +struct sa_mbx_msg { + __be32 spi; + u8 flags; + u8 proto; + u16 family; + __be32 addr[4]; + u32 key[5]; +}; +#endif /* _IXGBEVF_IPSEC_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 56a1031dcc07..e399e1c0c54a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -14,6 +14,7 @@ #include <net/xdp.h> #include "vf.h" +#include "ipsec.h" #define IXGBE_MAX_TXD_PWR 14 #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) @@ -163,6 +164,7 @@ struct ixgbevf_ring { #define IXGBE_TX_FLAGS_VLAN BIT(1) #define IXGBE_TX_FLAGS_TSO BIT(2) #define IXGBE_TX_FLAGS_IPV4 BIT(3) +#define IXGBE_TX_FLAGS_IPSEC BIT(4) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 @@ -338,6 +340,7 @@ struct ixgbevf_adapter { struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ u64 restart_queue; u32 tx_timeout_count; + u64 tx_ipsec; /* RX */ int num_rx_queues; @@ -348,6 +351,7 @@ struct ixgbevf_adapter { u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; u64 alloc_rx_page; + u64 rx_ipsec; struct msix_entry *msix_entries; @@ -384,6 +388,10 @@ struct ixgbevf_adapter { u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; u32 flags; #define IXGBEVF_FLAGS_LEGACY_RX BIT(1) + +#ifdef CONFIG_XFRM + struct ixgbevf_ipsec *ipsec; +#endif /* CONFIG_XFRM */ }; enum ixbgevf_state_t { @@ -451,6 +459,31 @@ int ethtool_ioctl(struct ifreq *ifr); extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); +#ifdef CONFIG_XFRM_OFFLOAD +void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter); +void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter); +void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter); +void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb); +int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd); +#else +static inline void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) +{ } +static inline void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter) +{ } +static inline void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter) { } +static inline void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) { } +static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) +{ return 0; } +#endif /* CONFIG_XFRM_OFFLOAD */ + void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index d86446d202d5..17e23f609d74 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -40,7 +40,7 @@ static const char ixgbevf_driver_string[] = #define DRV_VERSION "4.1.0-k" const char ixgbevf_driver_version[] = DRV_VERSION; static char ixgbevf_copyright[] = - "Copyright (c) 2009 - 2015 Intel Corporation."; + "Copyright (c) 2009 - 2018 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { [board_82599_vf] = &ixgbevf_82599_vf_info, @@ -268,7 +268,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; union ixgbe_adv_tx_desc *tx_desc; - unsigned int total_bytes = 0, total_packets = 0; + unsigned int total_bytes = 0, total_packets = 0, total_ipsec = 0; unsigned int budget = tx_ring->count / 2; unsigned int i = tx_ring->next_to_clean; @@ -299,6 +299,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, /* update the statistics for this packet */ total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; + if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) + total_ipsec++; /* free the skb */ if (ring_is_xdp(tx_ring)) @@ -361,6 +363,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, u64_stats_update_end(&tx_ring->syncp); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; + adapter->tx_ipsec += total_ipsec; if (check_for_tx_hang(tx_ring) && ixgbevf_check_tx_hang(tx_ring)) { struct ixgbe_hw *hw = &adapter->hw; @@ -516,6 +519,9 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); } + if (ixgbevf_test_staterr(rx_desc, IXGBE_RXDADV_STAT_SECP)) + ixgbevf_ipsec_rx(rx_ring, rx_desc, skb); + skb->protocol = eth_type_trans(skb, rx_ring->netdev); } @@ -1012,7 +1018,7 @@ static int ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, context_desc = IXGBEVF_TX_CTXTDESC(ring, 0); context_desc->vlan_macip_lens = cpu_to_le32(ETH_HLEN << IXGBE_ADVTXD_MACLEN_SHIFT); - context_desc->seqnum_seed = 0; + context_desc->fceof_saidx = 0; context_desc->type_tucmd_mlhl = cpu_to_le32(IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT); @@ -2200,6 +2206,7 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter) ixgbevf_set_rx_mode(adapter->netdev); ixgbevf_restore_vlan(adapter); + ixgbevf_ipsec_restore(adapter); ixgbevf_configure_tx(adapter); ixgbevf_configure_rx(adapter); @@ -2246,7 +2253,8 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - int api[] = { ixgbe_mbox_api_13, + int api[] = { ixgbe_mbox_api_14, + ixgbe_mbox_api_13, ixgbe_mbox_api_12, ixgbe_mbox_api_11, ixgbe_mbox_api_10, @@ -2605,6 +2613,7 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: if (adapter->xdp_prog && hw->mac.max_tx_queues == rss) rss = rss > 3 ? 2 : 1; @@ -3700,8 +3709,8 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) } static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, - u32 vlan_macip_lens, u32 type_tucmd, - u32 mss_l4len_idx) + u32 vlan_macip_lens, u32 fceof_saidx, + u32 type_tucmd, u32 mss_l4len_idx) { struct ixgbe_adv_tx_context_desc *context_desc; u16 i = tx_ring->next_to_use; @@ -3715,14 +3724,15 @@ static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring, type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); - context_desc->seqnum_seed = 0; + context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, - u8 *hdr_len) + u8 *hdr_len, + struct ixgbevf_ipsec_tx_data *itd) { u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; @@ -3736,6 +3746,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, unsigned char *hdr; } l4; u32 paylen, l4_offset; + u32 fceof_saidx = 0; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3761,13 +3772,15 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (ip.v4->version == 4) { unsigned char *csum_start = skb_checksum_start(skb); unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); + int len = csum_start - trans_start; /* IP header will have to cancel out any data that - * is not a part of the outer IP header + * is not a part of the outer IP header, so set to + * a reverse csum if needed, else init check to 0. */ - ip.v4->check = csum_fold(csum_partial(trans_start, - csum_start - trans_start, - 0)); + ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? + csum_fold(csum_partial(trans_start, + len, 0)) : 0; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ip.v4->tot_len = 0; @@ -3799,13 +3812,16 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); + fceof_saidx |= itd->pfsa; + type_tucmd |= itd->flags | itd->trailer_len; + /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ vlan_macip_lens = l4.hdr - ip.hdr; vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, fceof_saidx, type_tucmd, + mss_l4len_idx); return 1; } @@ -3820,10 +3836,12 @@ static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) } static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer *first) + struct ixgbevf_tx_buffer *first, + struct ixgbevf_ipsec_tx_data *itd) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; + u32 fceof_saidx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3849,6 +3867,10 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, skb_checksum_help(skb); goto no_csum; } + + if (first->protocol == htons(ETH_P_IP)) + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + /* update TX checksum flag */ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; vlan_macip_lens = skb_checksum_start_offset(skb) - @@ -3858,7 +3880,11 @@ no_csum: vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + fceof_saidx |= itd->pfsa; + type_tucmd |= itd->flags | itd->trailer_len; + + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, + fceof_saidx, type_tucmd, 0); } static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) @@ -3892,8 +3918,12 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, if (tx_flags & IXGBE_TX_FLAGS_IPV4) olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IXSM); - /* use index 1 context for TSO/FSO/FCOE */ - if (tx_flags & IXGBE_TX_FLAGS_TSO) + /* enable IPsec */ + if (tx_flags & IXGBE_TX_FLAGS_IPSEC) + olinfo_status |= cpu_to_le32(IXGBE_ADVTXD_POPTS_IPSEC); + + /* use index 1 context for TSO/FSO/FCOE/IPSEC */ + if (tx_flags & (IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_IPSEC)) olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); /* Check Context must be set if Tx switch is enabled, which it @@ -4075,6 +4105,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, int tso; u32 tx_flags = 0; u16 count = TXD_USE_COUNT(skb_headlen(skb)); + struct ixgbevf_ipsec_tx_data ipsec_tx = { 0 }; #if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD unsigned short f; #endif @@ -4119,11 +4150,15 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, first->tx_flags = tx_flags; first->protocol = vlan_get_protocol(skb); - tso = ixgbevf_tso(tx_ring, first, &hdr_len); +#ifdef CONFIG_XFRM_OFFLOAD + if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) + goto out_drop; +#endif + tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); if (tso < 0) goto out_drop; else if (!tso) - ixgbevf_tx_csum(tx_ring, first); + ixgbevf_tx_csum(tx_ring, first, &ipsec_tx); ixgbevf_tx_map(tx_ring, first, hdr_len); @@ -4634,6 +4669,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); break; @@ -4669,6 +4705,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_drvdata(pdev, netdev); netif_carrier_off(netdev); + ixgbevf_init_ipsec_offload(adapter); ixgbevf_init_last_counter_stats(adapter); @@ -4735,6 +4772,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) if (netdev->reg_state == NETREG_REGISTERED) unregister_netdev(netdev); + ixgbevf_stop_ipsec_offload(adapter); ixgbevf_clear_interrupt_scheme(adapter); ixgbevf_reset_interrupt_capability(adapter); diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h index bfd9ae150808..853796c8ef0e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/mbx.h +++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h @@ -62,6 +62,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -92,6 +93,10 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_UPDATE_XCAST_MODE 0x0c +/* mailbox API, version 1.4 VF requests */ +#define IXGBE_VF_IPSEC_ADD 0x0d +#define IXGBE_VF_IPSEC_DEL 0x0e + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c index bf0577e819e1..cd3b81300cc7 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.c +++ b/drivers/net/ethernet/intel/ixgbevf/vf.c @@ -309,6 +309,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues) * is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: if (hw->mac.type < ixgbe_mac_X550_vf) @@ -376,6 +377,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key) * or if the operation is not supported for this device type. */ switch (hw->api_version) { + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: case ixgbe_mbox_api_12: if (hw->mac.type < ixgbe_mac_X550_vf) @@ -540,6 +542,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) return -EOPNOTSUPP; /* Fall threw */ + case ixgbe_mbox_api_14: case ixgbe_mbox_api_13: break; default: @@ -890,6 +893,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: case ixgbe_mbox_api_13: + case ixgbe_mbox_api_14: break; default: return 0; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index e08301d833e2..32ac9045cdae 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -365,15 +365,8 @@ ltq_etop_mdio_probe(struct net_device *dev) return PTR_ERR(phydev); } - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); + phy_attached_info(phydev); return 0; @@ -439,6 +432,7 @@ ltq_etop_open(struct net_device *dev) if (!IS_TX(i) && (!IS_RX(i))) continue; ltq_dma_open(&ch->dma); + ltq_dma_enable_irq(&ch->dma); napi_enable(&ch->napi); } phy_start(dev->phydev); diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c new file mode 100644 index 000000000000..8c5ba4b81fb7 --- /dev/null +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -0,0 +1,567 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Lantiq / Intel PMAC driver for XRX200 SoCs + * + * Copyright (C) 2010 Lantiq Deutschland + * Copyright (C) 2012 John Crispin <john@phrozen.org> + * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> + */ + +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/delay.h> + +#include <linux/of_net.h> +#include <linux/of_platform.h> + +#include <xway_dma.h> + +/* DMA */ +#define XRX200_DMA_DATA_LEN 0x600 +#define XRX200_DMA_RX 0 +#define XRX200_DMA_TX 1 + +/* cpu port mac */ +#define PMAC_RX_IPG 0x0024 +#define PMAC_RX_IPG_MASK 0xf + +#define PMAC_HD_CTL 0x0000 +/* Add Ethernet header to packets from DMA to PMAC */ +#define PMAC_HD_CTL_ADD BIT(0) +/* Add VLAN tag to Packets from DMA to PMAC */ +#define PMAC_HD_CTL_TAG BIT(1) +/* Add CRC to packets from DMA to PMAC */ +#define PMAC_HD_CTL_AC BIT(2) +/* Add status header to packets from PMAC to DMA */ +#define PMAC_HD_CTL_AS BIT(3) +/* Remove CRC from packets from PMAC to DMA */ +#define PMAC_HD_CTL_RC BIT(4) +/* Remove Layer-2 header from packets from PMAC to DMA */ +#define PMAC_HD_CTL_RL2 BIT(5) +/* Status header is present from DMA to PMAC */ +#define PMAC_HD_CTL_RXSH BIT(6) +/* Add special tag from PMAC to switch */ +#define PMAC_HD_CTL_AST BIT(7) +/* Remove specail Tag from PMAC to DMA */ +#define PMAC_HD_CTL_RST BIT(8) +/* Check CRC from DMA to PMAC */ +#define PMAC_HD_CTL_CCRC BIT(9) +/* Enable reaction to Pause frames in the PMAC */ +#define PMAC_HD_CTL_FC BIT(10) + +struct xrx200_chan { + int tx_free; + + struct napi_struct napi; + struct ltq_dma_channel dma; + struct sk_buff *skb[LTQ_DESC_NUM]; + + struct xrx200_priv *priv; +}; + +struct xrx200_priv { + struct clk *clk; + + struct xrx200_chan chan_tx; + struct xrx200_chan chan_rx; + + struct net_device *net_dev; + struct device *dev; + + __iomem void *pmac_reg; +}; + +static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset) +{ + return __raw_readl(priv->pmac_reg + offset); +} + +static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset) +{ + __raw_writel(val, priv->pmac_reg + offset); +} + +static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set, + u32 offset) +{ + u32 val = xrx200_pmac_r32(priv, offset); + + val &= ~(clear); + val |= set; + xrx200_pmac_w32(priv, val, offset); +} + +/* drop all the packets from the DMA ring */ +static void xrx200_flush_dma(struct xrx200_chan *ch) +{ + int i; + + for (i = 0; i < LTQ_DESC_NUM; i++) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) + break; + + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | + XRX200_DMA_DATA_LEN; + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + } +} + +static int xrx200_open(struct net_device *net_dev) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + + napi_enable(&priv->chan_tx.napi); + ltq_dma_open(&priv->chan_tx.dma); + ltq_dma_enable_irq(&priv->chan_tx.dma); + + napi_enable(&priv->chan_rx.napi); + ltq_dma_open(&priv->chan_rx.dma); + /* The boot loader does not always deactivate the receiving of frames + * on the ports and then some packets queue up in the PPE buffers. + * They already passed the PMAC so they do not have the tags + * configured here. Read the these packets here and drop them. + * The HW should have written them into memory after 10us + */ + usleep_range(20, 40); + xrx200_flush_dma(&priv->chan_rx); + ltq_dma_enable_irq(&priv->chan_rx.dma); + + netif_wake_queue(net_dev); + + return 0; +} + +static int xrx200_close(struct net_device *net_dev) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + + netif_stop_queue(net_dev); + + napi_disable(&priv->chan_rx.napi); + ltq_dma_close(&priv->chan_rx.dma); + + napi_disable(&priv->chan_tx.napi); + ltq_dma_close(&priv->chan_tx.dma); + + return 0; +} + +static int xrx200_alloc_skb(struct xrx200_chan *ch) +{ + int ret = 0; + + ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev, + XRX200_DMA_DATA_LEN); + if (!ch->skb[ch->dma.desc]) { + ret = -ENOMEM; + goto skip; + } + + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev, + ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(ch->priv->dev, + ch->dma.desc_base[ch->dma.desc].addr))) { + dev_kfree_skb_any(ch->skb[ch->dma.desc]); + ret = -ENOMEM; + goto skip; + } + +skip: + ch->dma.desc_base[ch->dma.desc].ctl = + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | + XRX200_DMA_DATA_LEN; + + return ret; +} + +static int xrx200_hw_receive(struct xrx200_chan *ch) +{ + struct xrx200_priv *priv = ch->priv; + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + struct sk_buff *skb = ch->skb[ch->dma.desc]; + int len = (desc->ctl & LTQ_DMA_SIZE_MASK); + struct net_device *net_dev = priv->net_dev; + int ret; + + ret = xrx200_alloc_skb(ch); + + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + + if (ret) { + netdev_err(net_dev, "failed to allocate new rx buffer\n"); + return ret; + } + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, net_dev); + netif_receive_skb(skb); + net_dev->stats.rx_packets++; + net_dev->stats.rx_bytes += len - ETH_FCS_LEN; + + return 0; +} + +static int xrx200_poll_rx(struct napi_struct *napi, int budget) +{ + struct xrx200_chan *ch = container_of(napi, + struct xrx200_chan, napi); + int rx = 0; + int ret; + + while (rx < budget) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + ret = xrx200_hw_receive(ch); + if (ret) + return ret; + rx++; + } else { + break; + } + } + + if (rx < budget) { + napi_complete(&ch->napi); + ltq_dma_enable_irq(&ch->dma); + } + + return rx; +} + +static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget) +{ + struct xrx200_chan *ch = container_of(napi, + struct xrx200_chan, napi); + struct net_device *net_dev = ch->priv->net_dev; + int pkts = 0; + int bytes = 0; + + while (pkts < budget) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + struct sk_buff *skb = ch->skb[ch->tx_free]; + + pkts++; + bytes += skb->len; + ch->skb[ch->tx_free] = NULL; + consume_skb(skb); + memset(&ch->dma.desc_base[ch->tx_free], 0, + sizeof(struct ltq_dma_desc)); + ch->tx_free++; + ch->tx_free %= LTQ_DESC_NUM; + } else { + break; + } + } + + net_dev->stats.tx_packets += pkts; + net_dev->stats.tx_bytes += bytes; + netdev_completed_queue(ch->priv->net_dev, pkts, bytes); + + if (pkts < budget) { + napi_complete(&ch->napi); + ltq_dma_enable_irq(&ch->dma); + } + + return pkts; +} + +static int xrx200_start_xmit(struct sk_buff *skb, struct net_device *net_dev) +{ + struct xrx200_priv *priv = netdev_priv(net_dev); + struct xrx200_chan *ch = &priv->chan_tx; + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + u32 byte_offset; + dma_addr_t mapping; + int len; + + skb->dev = net_dev; + if (skb_put_padto(skb, ETH_ZLEN)) { + net_dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + len = skb->len; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { + netdev_err(net_dev, "tx ring full\n"); + netif_stop_queue(net_dev); + return NETDEV_TX_BUSY; + } + + ch->skb[ch->dma.desc] = skb; + + mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, mapping))) + goto err_drop; + + /* dma needs to start on a 16 byte aligned address */ + byte_offset = mapping % 16; + + desc->addr = mapping - byte_offset; + /* Make sure the address is written before we give it to HW */ + wmb(); + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + if (ch->dma.desc == ch->tx_free) + netif_stop_queue(net_dev); + + netdev_sent_queue(net_dev, len); + + return NETDEV_TX_OK; + +err_drop: + dev_kfree_skb(skb); + net_dev->stats.tx_dropped++; + net_dev->stats.tx_errors++; + return NETDEV_TX_OK; +} + +static const struct net_device_ops xrx200_netdev_ops = { + .ndo_open = xrx200_open, + .ndo_stop = xrx200_close, + .ndo_start_xmit = xrx200_start_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + +static irqreturn_t xrx200_dma_irq(int irq, void *ptr) +{ + struct xrx200_chan *ch = ptr; + + ltq_dma_disable_irq(&ch->dma); + ltq_dma_ack_irq(&ch->dma); + + napi_schedule(&ch->napi); + + return IRQ_HANDLED; +} + +static int xrx200_dma_init(struct xrx200_priv *priv) +{ + struct xrx200_chan *ch_rx = &priv->chan_rx; + struct xrx200_chan *ch_tx = &priv->chan_tx; + int ret = 0; + int i; + + ltq_dma_init_port(DMA_PORT_ETOP); + + ch_rx->dma.nr = XRX200_DMA_RX; + ch_rx->dma.dev = priv->dev; + ch_rx->priv = priv; + + ltq_dma_alloc_rx(&ch_rx->dma); + for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; + ch_rx->dma.desc++) { + ret = xrx200_alloc_skb(ch_rx); + if (ret) + goto rx_free; + } + ch_rx->dma.desc = 0; + ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0, + "xrx200_net_rx", &priv->chan_rx); + if (ret) { + dev_err(priv->dev, "failed to request RX irq %d\n", + ch_rx->dma.irq); + goto rx_ring_free; + } + + ch_tx->dma.nr = XRX200_DMA_TX; + ch_tx->dma.dev = priv->dev; + ch_tx->priv = priv; + + ltq_dma_alloc_tx(&ch_tx->dma); + ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0, + "xrx200_net_tx", &priv->chan_tx); + if (ret) { + dev_err(priv->dev, "failed to request TX irq %d\n", + ch_tx->dma.irq); + goto tx_free; + } + + return ret; + +tx_free: + ltq_dma_free(&ch_tx->dma); + +rx_ring_free: + /* free the allocated RX ring */ + for (i = 0; i < LTQ_DESC_NUM; i++) { + if (priv->chan_rx.skb[i]) + dev_kfree_skb_any(priv->chan_rx.skb[i]); + } + +rx_free: + ltq_dma_free(&ch_rx->dma); + return ret; +} + +static void xrx200_hw_cleanup(struct xrx200_priv *priv) +{ + int i; + + ltq_dma_free(&priv->chan_tx.dma); + ltq_dma_free(&priv->chan_rx.dma); + + /* free the allocated RX ring */ + for (i = 0; i < LTQ_DESC_NUM; i++) + dev_kfree_skb_any(priv->chan_rx.skb[i]); +} + +static int xrx200_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct resource *res; + struct xrx200_priv *priv; + struct net_device *net_dev; + const u8 *mac; + int err; + + /* alloc the network device */ + net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv)); + if (!net_dev) + return -ENOMEM; + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + priv->dev = dev; + + net_dev->netdev_ops = &xrx200_netdev_ops; + SET_NETDEV_DEV(net_dev, dev); + net_dev->min_mtu = ETH_ZLEN; + net_dev->max_mtu = XRX200_DMA_DATA_LEN; + + /* load the memory ranges */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "failed to get resources\n"); + return -ENOENT; + } + + priv->pmac_reg = devm_ioremap_resource(dev, res); + if (IS_ERR(priv->pmac_reg)) { + dev_err(dev, "failed to request and remap io ranges\n"); + return PTR_ERR(priv->pmac_reg); + } + + priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx"); + if (priv->chan_rx.dma.irq < 0) { + dev_err(dev, "failed to get RX IRQ, %i\n", + priv->chan_rx.dma.irq); + return -ENOENT; + } + priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx"); + if (priv->chan_tx.dma.irq < 0) { + dev_err(dev, "failed to get TX IRQ, %i\n", + priv->chan_tx.dma.irq); + return -ENOENT; + } + + /* get the clock */ + priv->clk = devm_clk_get(dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(priv->clk); + } + + mac = of_get_mac_address(np); + if (mac && is_valid_ether_addr(mac)) + ether_addr_copy(net_dev->dev_addr, mac); + else + eth_hw_addr_random(net_dev); + + /* bring up the dma engine and IP core */ + err = xrx200_dma_init(priv); + if (err) + return err; + + /* enable clock gate */ + err = clk_prepare_enable(priv->clk); + if (err) + goto err_uninit_dma; + + /* set IPG to 12 */ + xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG); + + /* enable status header, enable CRC */ + xrx200_pmac_mask(priv, 0, + PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | + PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC, + PMAC_HD_CTL); + + /* setup NAPI */ + netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); + netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); + + platform_set_drvdata(pdev, priv); + + err = register_netdev(net_dev); + if (err) + goto err_unprepare_clk; + return err; + +err_unprepare_clk: + clk_disable_unprepare(priv->clk); + +err_uninit_dma: + xrx200_hw_cleanup(priv); + + return 0; +} + +static int xrx200_remove(struct platform_device *pdev) +{ + struct xrx200_priv *priv = platform_get_drvdata(pdev); + struct net_device *net_dev = priv->net_dev; + + /* free stack related instances */ + netif_stop_queue(net_dev); + netif_napi_del(&priv->chan_tx.napi); + netif_napi_del(&priv->chan_rx.napi); + + /* remove the actual device */ + unregister_netdev(net_dev); + + /* release the clock */ + clk_disable_unprepare(priv->clk); + + /* shut down hardware */ + xrx200_hw_cleanup(priv); + + return 0; +} + +static const struct of_device_id xrx200_match[] = { + { .compatible = "lantiq,xrx200-net" }, + {}, +}; +MODULE_DEVICE_TABLE(of, xrx200_match); + +static struct platform_driver xrx200_driver = { + .probe = xrx200_probe, + .remove = xrx200_remove, + .driver = { + .name = "lantiq,xrx200-net", + .of_match_table = xrx200_match, + }, +}; + +module_platform_driver(xrx200_driver); + +MODULE_AUTHOR("John Crispin <john@phrozen.org>"); +MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 62f204f32316..1e9bcbdc6a90 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2733,17 +2733,17 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, memset(&res, 0, sizeof(res)); if (of_irq_to_resource(pnp, 0, &res) <= 0) { - dev_err(&pdev->dev, "missing interrupt on %s\n", pnp->name); + dev_err(&pdev->dev, "missing interrupt on %pOFn\n", pnp); return -EINVAL; } if (of_property_read_u32(pnp, "reg", &ppd.port_number)) { - dev_err(&pdev->dev, "missing reg property on %s\n", pnp->name); + dev_err(&pdev->dev, "missing reg property on %pOFn\n", pnp); return -EINVAL; } if (ppd.port_number >= 3) { - dev_err(&pdev->dev, "invalid reg property on %s\n", pnp->name); + dev_err(&pdev->dev, "invalid reg property on %pOFn\n", pnp); return -EINVAL; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index bc80a678abc3..fe3edb3c2bf4 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2065,10 +2065,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi, /* Linux processing */ rxq->skb->protocol = eth_type_trans(rxq->skb, dev); - if (dev->features & NETIF_F_GRO) - napi_gro_receive(napi, rxq->skb); - else - netif_receive_skb(rxq->skb); + napi_gro_receive(napi, rxq->skb); /* clean uncomplete skb pointer in queue */ rxq->skb = NULL; @@ -2510,12 +2507,13 @@ static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) { struct mvneta_tx_queue *txq; struct netdev_queue *nq; + int cpu = smp_processor_id(); while (cause_tx_done) { txq = mvneta_tx_done_policy(pp, cause_tx_done); nq = netdev_get_tx_queue(pp->dev, txq->id); - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock(nq, cpu); if (txq->count) mvneta_txq_done(pp, txq); @@ -4598,7 +4596,8 @@ static int mvneta_probe(struct platform_device *pdev) } } - dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO; + dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_RXCSUM; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 6e6abdc399de..cc1e9a96a43b 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -355,12 +355,8 @@ static int mtk_phy_connect(struct net_device *dev) dev->phydev->speed = 0; dev->phydev->duplex = 0; - if (of_phy_is_fixed_link(mac->of_node)) - dev->phydev->supported |= - SUPPORTED_Pause | SUPPORTED_Asym_Pause; - - dev->phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause; + phy_set_max_speed(dev->phydev, SPEED_1000); + phy_support_asym_pause(dev->phydev); dev->phydev->advertising = dev->phydev->supported | ADVERTISED_Autoneg; phy_start_aneg(dev->phydev); @@ -405,7 +401,7 @@ static int mtk_mdio_init(struct mtk_eth *eth) eth->mii_bus->priv = eth; eth->mii_bus->parent = eth->dev; - snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); + snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); ret = of_mdiobus_register(eth->mii_bus, mii_np); err_put_node: diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 4bdf25059542..deef5a998985 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -614,7 +614,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, int i; buf->direct.buf = NULL; - buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; + buf->nbufs = DIV_ROUND_UP(size, PAGE_SIZE); buf->npages = buf->nbufs; buf->page_shift = PAGE_SHIFT; buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 7262c6310650..4b4351141b94 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -406,7 +406,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; if (WARN_ON(!obj_per_chunk)) return -EINVAL; - num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; + num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); if (!table->icm) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index db2cfcd21d43..01a967e717e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -204,13 +204,6 @@ struct mlx5e_umr_wqe { extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; -static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { - "rx_cqe_moder", - "tx_cqe_moder", - "rx_cqe_compress", - "rx_striding_rq", -}; - enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), @@ -905,6 +898,12 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb); /* common netdev helpers */ +void mlx5e_create_q_counters(struct mlx5e_priv *priv); +void mlx5e_destroy_q_counters(struct mlx5e_priv *priv); +int mlx5e_open_drop_rq(struct mlx5e_priv *priv, + struct mlx5e_rq *drop_rq); +void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); + int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 98dd3e0ada72..8cd338ceb237 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -135,6 +135,13 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); } +static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { + "rx_cqe_moder", + "tx_cqe_moder", + "rx_cqe_compress", + "rx_striding_rq", +}; + int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) { int i, num_stats = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5a7939e70190..d14c4051edd8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3049,8 +3049,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, return mlx5e_alloc_cq_common(mdev, param, cq); } -static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, - struct mlx5e_rq *drop_rq) +int mlx5e_open_drop_rq(struct mlx5e_priv *priv, + struct mlx5e_rq *drop_rq) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_cq_param cq_param = {}; @@ -3094,7 +3094,7 @@ err_free_cq: return err; } -static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) +void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) { mlx5e_destroy_rq(drop_rq); mlx5e_free_rq(drop_rq); @@ -4726,7 +4726,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_tls_build_netdev(priv); } -static void mlx5e_create_q_counters(struct mlx5e_priv *priv) +void mlx5e_create_q_counters(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; int err; @@ -4744,7 +4744,7 @@ static void mlx5e_create_q_counters(struct mlx5e_priv *priv) } } -static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) +void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) { if (priv->q_counter) mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); @@ -4783,9 +4783,17 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; int err; + mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_destroy_q_counters; + } + err = mlx5e_create_indirect_rqt(priv); if (err) - return err; + goto err_close_drop_rq; err = mlx5e_create_direct_rqts(priv); if (err) @@ -4821,6 +4829,10 @@ err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); +err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); return err; } @@ -4832,6 +4844,8 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); } static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) @@ -4975,7 +4989,6 @@ err_cleanup_nic: int mlx5e_attach_netdev(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; const struct mlx5e_profile *profile; int err; @@ -4986,28 +4999,16 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) if (err) goto out; - mlx5e_create_q_counters(priv); - - err = mlx5e_open_drop_rq(priv, &priv->drop_rq); - if (err) { - mlx5_core_err(mdev, "open drop rq failed, %d\n", err); - goto err_destroy_q_counters; - } - err = profile->init_rx(priv); if (err) - goto err_close_drop_rq; + goto err_cleanup_tx; if (profile->enable) profile->enable(priv); return 0; -err_close_drop_rq: - mlx5e_close_drop_rq(&priv->drop_rq); - -err_destroy_q_counters: - mlx5e_destroy_q_counters(priv); +err_cleanup_tx: profile->cleanup_tx(priv); out: @@ -5025,8 +5026,6 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) flush_workqueue(priv->wq); profile->cleanup_rx(priv); - mlx5e_close_drop_rq(&priv->drop_rq); - mlx5e_destroy_q_counters(priv); profile->cleanup_tx(priv); cancel_delayed_work_sync(&priv->update_stats_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index c9cc9747d21d..f6eead24931f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -999,14 +999,21 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_flow_handle *flow_rule; int err; mlx5e_init_l2_addr(priv); + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + return err; + } + err = mlx5e_create_direct_rqts(priv); if (err) - return err; + goto err_close_drop_rq; err = mlx5e_create_direct_tirs(priv); if (err) @@ -1027,6 +1034,8 @@ err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv); +err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); return err; } @@ -1037,6 +1046,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) mlx5_del_flow_rules(rpriv->vport_rx_rule); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_rqts(priv); + mlx5e_close_drop_rq(&priv->drop_rq); } static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 15d8ae28c040..424bc89184c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -37,6 +37,7 @@ #include <net/busy_poll.h> #include <net/ip6_checksum.h> #include <net/page_pool.h> +#include <net/inet_ecn.h> #include "en.h" #include "en_tc.h" #include "eswitch.h" @@ -690,12 +691,29 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); } -static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) +static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth, + __be16 *proto) { - __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; + *proto = ((struct ethhdr *)skb->data)->h_proto; + *proto = __vlan_get_protocol(skb, *proto, network_depth); + return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); +} + +static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) +{ + int network_depth = 0; + __be16 proto; + void *ip; + int rc; - ethertype = __vlan_get_protocol(skb, ethertype, network_depth); - return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); + if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto))) + return; + + ip = skb->data + network_depth; + rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) : + IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip)); + + rq->stats->ecn_mark += !!rc; } static __be32 mlx5e_get_fcs(struct sk_buff *skb) @@ -737,6 +755,14 @@ static __be32 mlx5e_get_fcs(struct sk_buff *skb) return fcs_bytes; } +static u8 get_ip_proto(struct sk_buff *skb, __be16 proto) +{ + void *ip_p = skb->data + sizeof(struct ethhdr); + + return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol : + ((struct ipv6hdr *)ip_p)->nexthdr; +} + static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, @@ -745,6 +771,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, { struct mlx5e_rq_stats *stats = rq->stats; int network_depth = 0; + __be16 proto; if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) goto csum_none; @@ -755,7 +782,10 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } - if (likely(is_last_ethertype_ip(skb, &network_depth))) { + if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) { + if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP)) + goto csum_unnecessary; + skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); if (network_depth > ETH_HLEN) @@ -773,6 +803,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } +csum_unnecessary: if (likely((cqe->hds_ip_ext & CQE_L3_OK) && (cqe->hds_ip_ext & CQE_L4_OK))) { skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -790,6 +821,8 @@ csum_none: stats->csum_none++; } +#define MLX5E_CE_BIT_MASK 0x80 + static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct mlx5e_rq *rq, @@ -834,6 +867,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); + /* checking CE bit in cqe - MSB in ml_path field */ + if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK)) + mlx5e_enable_ecn(rq, skb); + skb->protocol = eth_type_trans(skb, netdev); } @@ -1230,8 +1267,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - struct mlx5e_rq_stats *stats = rq->stats; struct hwtstamp_config *tstamp; + struct mlx5e_rq_stats *stats; struct net_device *netdev; struct mlx5e_priv *priv; char *pseudo_header; @@ -1254,6 +1291,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, priv = mlx5i_epriv(netdev); tstamp = &priv->tstamp; + stats = &priv->channel_stats[rq->ix].rq; g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 6839481f7697..90c7607b1f44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -53,6 +53,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, @@ -144,6 +145,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) s->rx_bytes += rq_stats->bytes; s->rx_lro_packets += rq_stats->lro_packets; s->rx_lro_bytes += rq_stats->lro_bytes; + s->rx_ecn_mark += rq_stats->ecn_mark; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_complete += rq_stats->csum_complete; @@ -1144,6 +1146,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index a4c035aedd46..a5fb3dc27f50 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -66,6 +66,7 @@ struct mlx5e_sw_stats { u64 tx_nop; u64 rx_lro_packets; u64 rx_lro_bytes; + u64 rx_ecn_mark; u64 rx_removed_vlan_packets; u64 rx_csum_unnecessary; u64 rx_csum_none; @@ -184,6 +185,7 @@ struct mlx5e_rq_stats { u64 csum_none; u64 lro_packets; u64 lro_bytes; + u64 ecn_mark; u64 removed_vlan_packets; u64 xdp_drop; u64 xdp_redirect; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 32070e5d993d..a06f83c0c2b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -36,6 +36,7 @@ #include <linux/refcount.h> #include <linux/mlx5/fs.h> #include <linux/rhashtable.h> +#include <linux/llist.h> enum fs_node_type { FS_TYPE_NAMESPACE, @@ -138,8 +139,9 @@ struct mlx5_fc_cache { }; struct mlx5_fc { - struct rb_node node; struct list_head list; + struct llist_node addlist; + struct llist_node dellist; /* last{packets,bytes} members are used when calculating the delta since * last reading @@ -148,7 +150,6 @@ struct mlx5_fc { u64 lastbytes; u32 id; - bool deleted; bool aging; struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 58af6be13dfa..09206c4acd9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -52,11 +52,13 @@ * access to counter list: * - create (user context) * - mlx5_fc_create() only adds to an addlist to be used by - * mlx5_fc_stats_query_work(). addlist is protected by a spinlock. + * mlx5_fc_stats_query_work(). addlist is a lockless single linked list + * that doesn't require any additional synchronization when adding single + * node. * - spawn thread to do the actual destroy * * - destroy (user context) - * - mark a counter as deleted + * - add a counter to lockless dellist * - spawn thread to do the actual del * * - dump (user context) @@ -71,36 +73,43 @@ * elapsed, the thread will actually query the hardware. */ -static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) +static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev, + u32 id) { - struct rb_node **new = &root->rb_node; - struct rb_node *parent = NULL; - - while (*new) { - struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node); - int result = counter->id - this->id; - - parent = *new; - if (result < 0) - new = &((*new)->rb_left); - else - new = &((*new)->rb_right); - } + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + unsigned long next_id = (unsigned long)id + 1; + struct mlx5_fc *counter; + + rcu_read_lock(); + /* skip counters that are in idr, but not yet in counters list */ + while ((counter = idr_get_next_ul(&fc_stats->counters_idr, + &next_id)) != NULL && + list_empty(&counter->list)) + next_id++; + rcu_read_unlock(); + + return counter ? &counter->list : &fc_stats->counters; +} - /* Add new node and rebalance tree. */ - rb_link_node(&counter->node, parent, new); - rb_insert_color(&counter->node, root); +static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev, + struct mlx5_fc *counter) +{ + struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id); + + list_add_tail(&counter->list, next); } -/* The function returns the last node that was queried so the caller +/* The function returns the last counter that was queried so the caller * function can continue calling it till all counters are queried. */ -static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, +static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev, struct mlx5_fc *first, u32 last_id) { + struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct mlx5_fc *counter = NULL; struct mlx5_cmd_fc_bulk *b; - struct rb_node *node = NULL; + bool more = false; u32 afirst_id; int num; int err; @@ -130,14 +139,16 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, goto out; } - for (node = &first->node; node; node = rb_next(node)) { - struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); + counter = first; + list_for_each_entry_from(counter, &fc_stats->counters, list) { struct mlx5_fc_cache *c = &counter->cache; u64 packets; u64 bytes; - if (counter->id > last_id) + if (counter->id > last_id) { + more = true; break; + } mlx5_cmd_fc_bulk_get(dev, b, counter->id, &packets, &bytes); @@ -153,7 +164,14 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, out: mlx5_cmd_fc_bulk_free(b); - return node; + return more ? counter : NULL; +} + +static void mlx5_free_fc(struct mlx5_core_dev *dev, + struct mlx5_fc *counter) +{ + mlx5_cmd_fc_free(dev, counter->id); + kfree(counter); } static void mlx5_fc_stats_work(struct work_struct *work) @@ -161,52 +179,33 @@ static void mlx5_fc_stats_work(struct work_struct *work) struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, priv.fc_stats.work.work); struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct llist_node *tmplist = llist_del_all(&fc_stats->addlist); + struct mlx5_fc *counter = NULL, *last = NULL, *tmp; unsigned long now = jiffies; - struct mlx5_fc *counter = NULL; - struct mlx5_fc *last = NULL; - struct rb_node *node; - LIST_HEAD(tmplist); - - spin_lock(&fc_stats->addlist_lock); - list_splice_tail_init(&fc_stats->addlist, &tmplist); - - if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters)) + if (tmplist || !list_empty(&fc_stats->counters)) queue_delayed_work(fc_stats->wq, &fc_stats->work, fc_stats->sampling_interval); - spin_unlock(&fc_stats->addlist_lock); - - list_for_each_entry(counter, &tmplist, list) - mlx5_fc_stats_insert(&fc_stats->counters, counter); - - node = rb_first(&fc_stats->counters); - while (node) { - counter = rb_entry(node, struct mlx5_fc, node); - - node = rb_next(node); - - if (counter->deleted) { - rb_erase(&counter->node, &fc_stats->counters); + llist_for_each_entry(counter, tmplist, addlist) + mlx5_fc_stats_insert(dev, counter); - mlx5_cmd_fc_free(dev, counter->id); - - kfree(counter); - continue; - } + tmplist = llist_del_all(&fc_stats->dellist); + llist_for_each_entry_safe(counter, tmp, tmplist, dellist) { + list_del(&counter->list); - last = counter; + mlx5_free_fc(dev, counter); } - if (time_before(now, fc_stats->next_query) || !last) + if (time_before(now, fc_stats->next_query) || + list_empty(&fc_stats->counters)) return; + last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list); - node = rb_first(&fc_stats->counters); - while (node) { - counter = rb_entry(node, struct mlx5_fc, node); - - node = mlx5_fc_stats_query(dev, counter, last->id); - } + counter = list_first_entry(&fc_stats->counters, struct mlx5_fc, + list); + while (counter) + counter = mlx5_fc_stats_query(dev, counter, last->id); fc_stats->next_query = now + fc_stats->sampling_interval; } @@ -220,24 +219,38 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) counter = kzalloc(sizeof(*counter), GFP_KERNEL); if (!counter) return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&counter->list); err = mlx5_cmd_fc_alloc(dev, &counter->id); if (err) goto err_out; if (aging) { + u32 id = counter->id; + counter->cache.lastuse = jiffies; counter->aging = true; - spin_lock(&fc_stats->addlist_lock); - list_add(&counter->list, &fc_stats->addlist); - spin_unlock(&fc_stats->addlist_lock); + idr_preload(GFP_KERNEL); + spin_lock(&fc_stats->counters_idr_lock); + + err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id, + GFP_NOWAIT); + + spin_unlock(&fc_stats->counters_idr_lock); + idr_preload_end(); + if (err) + goto err_out_alloc; + + llist_add(&counter->addlist, &fc_stats->addlist); mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); } return counter; +err_out_alloc: + mlx5_cmd_fc_free(dev, counter->id); err_out: kfree(counter); @@ -253,13 +266,16 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) return; if (counter->aging) { - counter->deleted = true; + spin_lock(&fc_stats->counters_idr_lock); + WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id)); + spin_unlock(&fc_stats->counters_idr_lock); + + llist_add(&counter->dellist, &fc_stats->dellist); mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); return; } - mlx5_cmd_fc_free(dev, counter->id); - kfree(counter); + mlx5_free_fc(dev, counter); } EXPORT_SYMBOL(mlx5_fc_destroy); @@ -267,9 +283,11 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; - fc_stats->counters = RB_ROOT; - INIT_LIST_HEAD(&fc_stats->addlist); - spin_lock_init(&fc_stats->addlist_lock); + spin_lock_init(&fc_stats->counters_idr_lock); + idr_init(&fc_stats->counters_idr); + INIT_LIST_HEAD(&fc_stats->counters); + init_llist_head(&fc_stats->addlist); + init_llist_head(&fc_stats->dellist); fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); if (!fc_stats->wq) @@ -284,34 +302,22 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) { struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; + struct llist_node *tmplist; struct mlx5_fc *counter; struct mlx5_fc *tmp; - struct rb_node *node; cancel_delayed_work_sync(&dev->priv.fc_stats.work); destroy_workqueue(dev->priv.fc_stats.wq); dev->priv.fc_stats.wq = NULL; - list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) { - list_del(&counter->list); - - mlx5_cmd_fc_free(dev, counter->id); + idr_destroy(&fc_stats->counters_idr); - kfree(counter); - } + tmplist = llist_del_all(&fc_stats->addlist); + llist_for_each_entry_safe(counter, tmp, tmplist, addlist) + mlx5_free_fc(dev, counter); - node = rb_first(&fc_stats->counters); - while (node) { - counter = rb_entry(node, struct mlx5_fc, node); - - node = rb_next(node); - - rb_erase(&counter->node, &fc_stats->counters); - - mlx5_cmd_fc_free(dev, counter->id); - - kfree(counter); - } + list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list) + mlx5_free_fc(dev, counter); } int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index e3797a44e074..a825ed093efd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -45,6 +45,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); static const struct net_device_ops mlx5i_netdev_ops = { .ndo_open = mlx5i_open, .ndo_stop = mlx5i_close, + .ndo_get_stats64 = mlx5i_get_stats, .ndo_init = mlx5i_dev_init, .ndo_uninit = mlx5i_dev_cleanup, .ndo_change_mtu = mlx5i_change_mtu, @@ -83,6 +84,7 @@ void mlx5i_init(struct mlx5_core_dev *mdev, priv->netdev = netdev; priv->profile = profile; priv->ppriv = ppriv; + priv->max_opened_tc = 1; mutex_init(&priv->state_lock); mlx5_query_port_max_mtu(mdev, &max_mtu, 1); @@ -114,6 +116,47 @@ static void mlx5i_cleanup(struct mlx5e_priv *priv) /* Do nothing .. */ } +static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv) +{ + struct mlx5e_sw_stats s = { 0 }; + int i, j; + + for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { + struct mlx5e_channel_stats *channel_stats; + struct mlx5e_rq_stats *rq_stats; + + channel_stats = &priv->channel_stats[i]; + rq_stats = &channel_stats->rq; + + s.rx_packets += rq_stats->packets; + s.rx_bytes += rq_stats->bytes; + + for (j = 0; j < priv->max_opened_tc; j++) { + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; + + s.tx_packets += sq_stats->packets; + s.tx_bytes += sq_stats->bytes; + s.tx_queue_dropped += sq_stats->dropped; + } + } + + memcpy(&priv->stats.sw, &s, sizeof(s)); +} + +void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct mlx5e_priv *priv = mlx5i_epriv(dev); + struct mlx5e_sw_stats *sstats = &priv->stats.sw; + + mlx5i_grp_sw_update_stats(priv); + + stats->rx_packets = sstats->rx_packets; + stats->rx_bytes = sstats->rx_bytes; + stats->tx_packets = sstats->tx_packets; + stats->tx_bytes = sstats->tx_bytes; + stats->tx_dropped = sstats->tx_queue_dropped; +} + int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; @@ -306,11 +349,20 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv) { + struct mlx5_core_dev *mdev = priv->mdev; int err; + mlx5e_create_q_counters(priv); + + err = mlx5e_open_drop_rq(priv, &priv->drop_rq); + if (err) { + mlx5_core_err(mdev, "open drop rq failed, %d\n", err); + goto err_destroy_q_counters; + } + err = mlx5e_create_indirect_rqt(priv); if (err) - return err; + goto err_close_drop_rq; err = mlx5e_create_direct_rqts(priv); if (err) @@ -338,6 +390,10 @@ err_destroy_direct_rqts: mlx5e_destroy_direct_rqts(priv); err_destroy_indirect_rqts: mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_close_drop_rq: + mlx5e_close_drop_rq(&priv->drop_rq); +err_destroy_q_counters: + mlx5e_destroy_q_counters(priv); return err; } @@ -348,6 +404,8 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_destroy_q_counters(priv); } static const struct mlx5e_profile mlx5i_nic_profile = { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 08eac92fc26c..2e7fb829e1b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -121,6 +121,7 @@ static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_av *av, u32 dqpn, u32 dqkey); void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); +void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); #endif /* CONFIG_MLX5_CORE_IPOIB */ #endif /* __MLX5E_IPOB_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 54a188f41f90..e3e8a5f1ac9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -146,6 +146,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_open = mlx5i_pkey_open, .ndo_stop = mlx5i_pkey_close, .ndo_init = mlx5i_pkey_dev_init, + .ndo_get_stats64 = mlx5i_get_stats, .ndo_uninit = mlx5i_pkey_dev_cleanup, .ndo_change_mtu = mlx5i_pkey_change_mtu, .ndo_do_ioctl = mlx5i_pkey_ioctl, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 3f767cde4c1d..0d90b1b4a3d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -111,10 +111,10 @@ static void mlx5_pps_out(struct work_struct *work) for (i = 0; i < clock->ptp_info.n_pins; i++) { u64 tstart; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); tstart = clock->pps_info.start[i]; clock->pps_info.start[i] = 0; - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); if (!tstart) continue; @@ -132,10 +132,10 @@ static void mlx5_timestamp_overflow(struct work_struct *work) overflow_work); unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_read(&clock->tc); mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); schedule_delayed_work(&clock->overflow_work, clock->overflow_period); } @@ -147,10 +147,10 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, u64 ns = timespec64_to_ns(ts); unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_init(&clock->tc, &clock->cycles, ns); mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); return 0; } @@ -162,9 +162,9 @@ static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) u64 ns; unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); ns = timecounter_read(&clock->tc); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); *ts = ns_to_timespec64(ns); @@ -177,10 +177,10 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ptp_info); unsigned long flags; - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_adjtime(&clock->tc, delta); mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); return 0; } @@ -203,12 +203,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) adj *= delta; diff = div_u64(adj, 1000000000ULL); - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); timecounter_read(&clock->tc); clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : clock->nominal_c_mult + diff; mlx5_update_clock_info_page(clock->mdev); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); return 0; } @@ -307,12 +307,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, ts.tv_nsec = rq->perout.start.nsec; ns = timespec64_to_ns(&ts); cycles_now = mlx5_read_internal_timer(mdev); - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_delta = ns - nsec_now; cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, clock->cycles.mult); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); time_stamp = cycles_now + cycles_delta; field_select = MLX5_MTPPS_FS_PIN_MODE | MLX5_MTPPS_FS_PATTERN | @@ -471,14 +471,14 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, ts.tv_sec += 1; ts.tv_nsec = 0; ns = timespec64_to_ns(&ts); - write_lock_irqsave(&clock->lock, flags); + write_seqlock_irqsave(&clock->lock, flags); nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_delta = ns - nsec_now; cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, clock->cycles.mult); clock->pps_info.start[pin] = cycles_now + cycles_delta; schedule_work(&clock->pps_info.out_work); - write_unlock_irqrestore(&clock->lock, flags); + write_sequnlock_irqrestore(&clock->lock, flags); break; default: mlx5_core_err(mdev, " Unhandled event\n"); @@ -498,7 +498,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); return; } - rwlock_init(&clock->lock); + seqlock_init(&clock->lock); clock->cycles.read = read_internal_timer; clock->cycles.shift = MLX5_CYCLES_SHIFT; clock->cycles.mult = clocksource_khz2mult(dev_freq, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index 02e2e4575e4f..263cb6e2aeee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -46,11 +46,13 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, u64 timestamp) { + unsigned int seq; u64 nsec; - read_lock(&clock->lock); - nsec = timecounter_cyc2time(&clock->tc, timestamp); - read_unlock(&clock->lock); + do { + seq = read_seqbegin(&clock->lock); + nsec = timecounter_cyc2time(&clock->tc, timestamp); + } while (read_seqretry(&clock->lock, seq)); return ns_to_ktime(nsec); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c index 23cc337a96c9..7e20666ce5ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c @@ -73,7 +73,7 @@ static int get_pas_size(struct mlx5_srq_attr *in) u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride); u32 page_size = 1 << log_page_size; u32 rq_sz_po = rq_sz + (page_offset * po_quanta); - u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size; + u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size); return rq_num_pas * sizeof(u64); } diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 001b5f714c1b..867cddba840f 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -999,7 +999,6 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) struct phy_device *phydev; struct net_device *netdev; int ret = -EIO; - u32 mii_adv; netdev = adapter->netdev; phydev = phy_find_first(adapter->mdiobus); @@ -1013,13 +1012,11 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter) goto return_error; /* MAC doesn't support 1000T Half */ - phydev->supported &= ~SUPPORTED_1000baseT_Half; + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); /* support both flow controls */ + phy_support_asym_pause(phydev); phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control); - phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); phy->fc_autoneg = phydev->autoneg; phy_start(phydev); diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c index ccdf9123f26f..b2109eca81fd 100644 --- a/drivers/net/ethernet/microchip/lan743x_ptp.c +++ b/drivers/net/ethernet/microchip/lan743x_ptp.c @@ -977,8 +977,8 @@ void lan743x_ptp_close(struct lan743x_adapter *adapter) lan743x_ptp_disable(adapter); } -void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter, - bool ts_insert_enable) +static void lan743x_ptp_set_sync_ts_insert(struct lan743x_adapter *adapter, + bool ts_insert_enable) { u32 ptp_tx_mod = lan743x_csr_read(adapter, PTP_TX_MOD); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index b8983e73265a..f980f103555b 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -491,7 +491,7 @@ static struct pci_driver s2io_driver = { }; /* A simplifier macro used both by init and free shared_mem Fns(). */ -#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each) +#define TXD_MEM_PAGE_CNT(len, per_each) DIV_ROUND_UP(len, per_each) /* netqueue manipulation helper functions */ static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp) diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index b157ccd8c80f..5b06f07c78cd 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -55,30 +55,21 @@ #define NFP_QMSTAT_DROP 16 #define NFP_QMSTAT_ECN 24 -static unsigned long long -nfp_abm_q_lvl_thrs(struct nfp_abm_link *alink, unsigned int queue) -{ - return alink->abm->q_lvls->addr + - (alink->queue_base + queue) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; -} - static int nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, unsigned int stride, unsigned int offset, unsigned int i, bool is_u64, u64 *res) { struct nfp_cpp *cpp = alink->abm->app->cpp; - u32 val32, mur; - u64 val, addr; + u64 val, sym_offset; + u32 val32; int err; - mur = NFP_CPP_ATOMIC_RD(sym->target, sym->domain); - - addr = sym->addr + (alink->queue_base + i) * stride + offset; + sym_offset = (alink->queue_base + i) * stride + offset; if (is_u64) - err = nfp_cpp_readq(cpp, mur, addr, &val); + err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val); else - err = nfp_cpp_readl(cpp, mur, addr, &val32); + err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32); if (err) { nfp_err(cpp, "RED offload reading stat failed on vNIC %d queue %d\n", @@ -114,13 +105,12 @@ nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val) { struct nfp_cpp *cpp = alink->abm->app->cpp; - u32 muw; + u64 sym_offset; int err; - muw = NFP_CPP_ATOMIC_WR(alink->abm->q_lvls->target, - alink->abm->q_lvls->domain); - - err = nfp_cpp_writel(cpp, muw, nfp_abm_q_lvl_thrs(alink, i), val); + sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; + err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0, + sym_offset, val); if (err) { nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n", alink->id, i); @@ -290,10 +280,10 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size) nfp_err(pf->cpp, "Symbol '%s' not found\n", name); return ERR_PTR(-ENOENT); } - if (sym->size != size) { + if (nfp_rtsym_size(sym) != size) { nfp_err(pf->cpp, "Symbol '%s' wrong size: expected %u got %llu\n", - name, size, sym->size); + name, size, nfp_rtsym_size(sym)); return ERR_PTR(-EINVAL); } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index b84a6c2d387b..305ac07dc1e7 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -540,8 +540,9 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, { struct nfp_eth_table_port *eth_port = &pf->eth_tbl->ports[id]; u8 mac_addr[ETH_ALEN]; - const char *mac_str; - char name[32]; + struct nfp_nsp *nsp; + char hwinfo[32]; + int err; if (id > pf->eth_tbl->count) { nfp_warn(pf->cpp, "No entry for persistent MAC address\n"); @@ -549,22 +550,37 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, return; } - snprintf(name, sizeof(name), "eth%u.mac.pf%u", + snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac.pf%u", eth_port->eth_index, abm->pf_id); - mac_str = nfp_hwinfo_lookup(pf->hwinfo, name); - if (!mac_str) { - nfp_warn(pf->cpp, "Can't lookup persistent MAC address (%s)\n", - name); + nsp = nfp_nsp_open(pf->cpp); + if (IS_ERR(nsp)) { + nfp_warn(pf->cpp, "Failed to access the NSP for persistent MAC address: %ld\n", + PTR_ERR(nsp)); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + if (!nfp_nsp_has_hwinfo_lookup(nsp)) { + nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); + eth_hw_addr_random(nn->dp.netdev); + return; + } + + err = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo)); + nfp_nsp_close(nsp); + if (err) { + nfp_warn(pf->cpp, "Reading persistent MAC address failed: %d\n", + err); eth_hw_addr_random(nn->dp.netdev); return; } - if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", + if (sscanf(hwinfo, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", &mac_addr[0], &mac_addr[1], &mac_addr[2], &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { nfp_warn(pf->cpp, "Can't parse persistent MAC address (%s)\n", - mac_str); + hwinfo); eth_hw_addr_random(nn->dp.netdev); return; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 4a540c5e27fe..9474a4eed8ce 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -68,6 +68,10 @@ static const struct pci_device_id nfp_pci_device_ids[] = { PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, }, + { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, + PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, + PCI_ANY_ID, 0, + }, { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, PCI_ANY_ID, 0, @@ -112,23 +116,18 @@ nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, void *out_data, u64 out_length) { - unsigned long long addr; unsigned long err_at; u64 max_data_sz; u32 val = 0; - u32 cpp_id; int n, err; if (!pf->mbox) return -EOPNOTSUPP; - cpp_id = NFP_CPP_ISLAND_ID(pf->mbox->target, NFP_CPP_ACTION_RW, 0, - pf->mbox->domain); - addr = pf->mbox->addr; - max_data_sz = pf->mbox->size - NFP_MBOX_SYM_MIN_SIZE; + max_data_sz = nfp_rtsym_size(pf->mbox) - NFP_MBOX_SYM_MIN_SIZE; /* Check if cmd field is clear */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); if (err || val) { nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n", cmd, val, err); @@ -136,30 +135,29 @@ int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, } in_length = min(in_length, max_data_sz); - n = nfp_cpp_write(pf->cpp, cpp_id, addr + NFP_MBOX_DATA, - in_data, in_length); + n = nfp_rtsym_write(pf->cpp, pf->mbox, NFP_MBOX_DATA, in_data, + in_length); if (n != in_length) return -EIO; /* Write data_len and wipe reserved */ - err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, - in_length); + err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, in_length); if (err) return err; /* Read back for ordering */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); if (err) return err; /* Write cmd and wipe return value */ - err = nfp_cpp_writeq(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, cmd); + err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_CMD, cmd); if (err) return err; err_at = jiffies + 5 * HZ; while (true) { /* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_CMD, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); if (err) return err; if (!val) @@ -172,18 +170,18 @@ int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, } /* Copy output if any (could be error info, do it before reading ret) */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_DATA_LEN, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); if (err) return err; out_length = min_t(u32, val, min(out_length, max_data_sz)); - n = nfp_cpp_read(pf->cpp, cpp_id, addr + NFP_MBOX_DATA, - out_data, out_length); + n = nfp_rtsym_read(pf->cpp, pf->mbox, NFP_MBOX_DATA, + out_data, out_length); if (n != out_length) return -EIO; /* Check if there is an error */ - err = nfp_cpp_readl(pf->cpp, cpp_id, addr + NFP_MBOX_RET, &val); + err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_RET, &val); if (err) return err; if (val) @@ -441,8 +439,11 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) } fw = nfp_net_fw_find(pdev, pf); - if (!fw) + if (!fw) { + if (nfp_nsp_has_stored_fw_load(nsp)) + nfp_nsp_load_stored_fw(nsp); return 0; + } dev_info(&pdev->dev, "Soft-reset, loading FW image\n"); err = nfp_nsp_device_soft_reset(nsp); @@ -453,7 +454,6 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) } err = nfp_nsp_load_fw(nsp, fw); - if (err < 0) { dev_err(&pdev->dev, "FW loading failed: %d\n", err); goto exit_release_fw; @@ -566,9 +566,9 @@ static int nfp_pf_find_rtsyms(struct nfp_pf *pf) /* Optional per-PCI PF mailbox */ snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol); - if (pf->mbox && pf->mbox->size < NFP_MBOX_SYM_MIN_SIZE) { + if (pf->mbox && nfp_rtsym_size(pf->mbox) < NFP_MBOX_SYM_MIN_SIZE) { nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n", - pf->mbox->size, NFP_MBOX_SYM_MIN_SIZE); + nfp_rtsym_size(pf->mbox), NFP_MBOX_SYM_MIN_SIZE); return -EINVAL; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 253bdaef1505..1aac55d66404 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2091,10 +2091,10 @@ static void nfp_ctrl_poll(unsigned long arg) { struct nfp_net_r_vector *r_vec = (void *)arg; - spin_lock_bh(&r_vec->lock); + spin_lock(&r_vec->lock); nfp_net_tx_complete(r_vec->tx_ring, 0); __nfp_ctrl_tx_queued(r_vec); - spin_unlock_bh(&r_vec->lock); + spin_unlock(&r_vec->lock); nfp_ctrl_rx(r_vec); @@ -3167,6 +3167,7 @@ static void nfp_net_stat64(struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); int r; + /* Collect software stats */ for (r = 0; r < nn->max_r_vecs; r++) { struct nfp_net_r_vector *r_vec = &nn->r_vecs[r]; u64 data[3]; @@ -3192,6 +3193,14 @@ static void nfp_net_stat64(struct net_device *netdev, stats->tx_bytes += data[1]; stats->tx_errors += data[2]; } + + /* Add in device stats */ + stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES); + stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS); + stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS); + + stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS); + stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS); } static int nfp_net_set_features(struct net_device *netdev, @@ -3762,15 +3771,18 @@ static void nfp_net_netdev_init(struct nfp_net *nn) } if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) netdev->hw_features |= NETIF_F_RXHASH; - if (nn->cap & NFP_NET_CFG_CTRL_VXLAN && - nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) { if (nn->cap & NFP_NET_CFG_CTRL_LSO) - netdev->hw_features |= NETIF_F_GSO_GRE | - NETIF_F_GSO_UDP_TUNNEL; - nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE; - - netdev->hw_enc_features = netdev->hw_features; + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN; + } + if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) { + if (nn->cap & NFP_NET_CFG_CTRL_LSO) + netdev->hw_features |= NETIF_F_GSO_GRE; + nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE; } + if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE)) + netdev->hw_enc_features = netdev->hw_features; netdev->vlan_features = netdev->hw_features; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c index bb8ed460086e..b6b897840ac5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c @@ -188,25 +188,21 @@ nfp_net_dump_load_dumpspec(struct nfp_cpp *cpp, struct nfp_rtsym_table *rtbl) const struct nfp_rtsym *specsym; struct nfp_dumpspec *dumpspec; int bytes_read; - u32 cpp_id; + u64 sym_size; specsym = nfp_rtsym_lookup(rtbl, NFP_DUMP_SPEC_RTSYM); if (!specsym) return NULL; + sym_size = nfp_rtsym_size(specsym); /* expected size of this buffer is in the order of tens of kilobytes */ - dumpspec = vmalloc(sizeof(*dumpspec) + specsym->size); + dumpspec = vmalloc(sizeof(*dumpspec) + sym_size); if (!dumpspec) return NULL; + dumpspec->size = sym_size; - dumpspec->size = specsym->size; - - cpp_id = NFP_CPP_ISLAND_ID(specsym->target, NFP_CPP_ACTION_RW, 0, - specsym->domain); - - bytes_read = nfp_cpp_read(cpp, cpp_id, specsym->addr, dumpspec->data, - specsym->size); - if (bytes_read != specsym->size) { + bytes_read = nfp_rtsym_read(cpp, specsym, 0, dumpspec->data, sym_size); + if (bytes_read != sym_size) { vfree(dumpspec); nfp_warn(cpp, "Debug dump specification read failed.\n"); return NULL; @@ -266,7 +262,6 @@ nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec) struct nfp_dumpspec_rtsym *spec_rtsym; const struct nfp_rtsym *sym; u32 tl_len, key_len; - u32 size; spec_rtsym = (struct nfp_dumpspec_rtsym *)spec; tl_len = be32_to_cpu(spec->length); @@ -278,13 +273,8 @@ nfp_calc_rtsym_dump_sz(struct nfp_pf *pf, struct nfp_dump_tl *spec) if (!sym) return nfp_dump_error_tlv_size(spec); - if (sym->type == NFP_RTSYM_TYPE_ABS) - size = sizeof(sym->addr); - else - size = sym->size; - return ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1) + - ALIGN8(size); + ALIGN8(nfp_rtsym_size(sym)); } static int @@ -644,7 +634,6 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, const struct nfp_rtsym *sym; u32 tl_len, key_len; int bytes_read; - u32 cpp_id; void *dest; int err; @@ -657,11 +646,7 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, if (!sym) return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump); - if (sym->type == NFP_RTSYM_TYPE_ABS) - sym_size = sizeof(sym->addr); - else - sym_size = sym->size; - + sym_size = nfp_rtsym_size(sym); header_size = ALIGN8(offsetof(struct nfp_dump_rtsym, rtsym) + key_len + 1); total_size = header_size + ALIGN8(sym_size); @@ -676,23 +661,20 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec, memcpy(dump_header->rtsym, spec->rtsym, key_len + 1); dump_header->cpp.dump_length = cpu_to_be32(sym_size); - if (sym->type == NFP_RTSYM_TYPE_ABS) { - *(u64 *)dest = sym->addr; - } else { + if (sym->type != NFP_RTSYM_TYPE_ABS) { cpp_params.target = sym->target; cpp_params.action = NFP_CPP_ACTION_RW; cpp_params.token = 0; cpp_params.island = sym->domain; - cpp_id = nfp_get_numeric_cpp_id(&cpp_params); dump_header->cpp.cpp_id = cpp_params; dump_header->cpp.offset = cpu_to_be32(sym->addr); - bytes_read = nfp_cpp_read(pf->cpp, cpp_id, sym->addr, dest, - sym_size); - if (bytes_read != sym_size) { - if (bytes_read >= 0) - bytes_read = -EIO; - dump_header->error = cpu_to_be32(bytes_read); - } + } + + bytes_read = nfp_rtsym_read(pf->cpp, sym, 0, dest, sym_size); + if (bytes_read != sym_size) { + if (bytes_read >= 0) + bytes_read = -EIO; + dump_header->error = cpu_to_be32(bytes_read); } return 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 28516eecccc8..0b1ac9c234d1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -470,8 +470,8 @@ static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) static int nfp_net_pci_map_mem(struct nfp_pf *pf) { + u32 min_size, cpp_id; u8 __iomem *mem; - u32 min_size; int err; min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE; @@ -519,9 +519,9 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) pf->vfcfg_tbl2 = NULL; } - mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0, - NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ, - &pf->qc_area); + cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); + mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, NFP_PCIE_QUEUE(0), + NFP_QCP_QUEUE_AREA_SZ, &pf->qc_area); if (IS_ERR(mem)) { nfp_err(pf->cpp, "Failed to map Queue Controller area.\n"); err = PTR_ERR(mem); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index c8d0b1016a64..fd63d83bdea5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -138,6 +138,7 @@ /* The number of explicit BARs to reserve. * Minimum is 0, maximum is 4 on the NFP6000. + * The NFP3800 can have only one per PF. */ #define NFP_PCIE_EXPLICIT_BARS 2 @@ -589,8 +590,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3), }; char status_msg[196] = {}; + int i, err, bars_free; struct nfp_bar *bar; - int i, bars_free; int expl_groups; char *msg, *end; @@ -643,6 +644,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) bar->iomem = ioremap_nocache(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { + int pf; + msg += snprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); atomic_inc(&bar->refcnt); bars_free--; @@ -651,22 +654,40 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + 0x1000; - if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 || - nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) { - nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); - } else { - int pf = nfp->pdev->devfn & 7; - + switch (nfp->pdev->device) { + case PCI_DEVICE_ID_NETRONOME_NFP3800: + pf = nfp->pdev->devfn & 7; nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); + break; + case PCI_DEVICE_ID_NETRONOME_NFP4000: + case PCI_DEVICE_ID_NETRONOME_NFP5000: + case PCI_DEVICE_ID_NETRONOME_NFP6000: + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); + break; + default: + dev_err(nfp->dev, "Unsupported device ID: %04hx!\n", + nfp->pdev->device); + err = -EINVAL; + goto err_unmap_bar0; } nfp->iomem.em = bar->iomem + NFP_PCIE_EM; } - if (nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP4000 || - nfp->pdev->device == PCI_DEVICE_ID_NETRONOME_NFP6000) - expl_groups = 4; - else + switch (nfp->pdev->device) { + case PCI_DEVICE_ID_NETRONOME_NFP3800: expl_groups = 1; + break; + case PCI_DEVICE_ID_NETRONOME_NFP4000: + case PCI_DEVICE_ID_NETRONOME_NFP5000: + case PCI_DEVICE_ID_NETRONOME_NFP6000: + expl_groups = 4; + break; + default: + dev_err(nfp->dev, "Unsupported device ID: %04hx!\n", + nfp->pdev->device); + err = -EINVAL; + goto err_unmap_bar0; + } /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */ bar = &nfp->bar[1]; @@ -711,6 +732,11 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars); return 0; + +err_unmap_bar0: + if (nfp->bar[0].iomem) + iounmap(nfp->bar[0].iomem); + return err; } static void disable_bars(struct nfp6000_pcie *nfp) @@ -1327,7 +1353,7 @@ struct nfp_cpp *nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev) /* Finished with card initialization. */ dev_info(&pdev->dev, - "Netronome Flow Processor NFP4000/NFP6000 PCIe Card Probe\n"); + "Netronome Flow Processor NFP4000/NFP5000/NFP6000 PCIe Card Probe\n"); pcie_print_link_status(pdev); nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h index c338d539fa96..123e29cba6d1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -56,9 +56,16 @@ dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) #define nfp_dbg(cpp, fmt, args...) \ dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_printk(level, cpp, fmt, args...) \ + dev_printk(level, nfp_cpp_device(cpp)->parent, \ + NFP_SUBSYS ": " fmt, ## args) #define PCI_64BIT_BAR_COUNT 3 +/* NFP hardware vendor/device ids. + */ +#define PCI_DEVICE_ID_NETRONOME_NFP3800 0x3800 + #define NFP_CPP_NUM_TARGETS 16 /* Max size of area it should be safe to request */ #define NFP_CPP_SAFE_AREA_SIZE SZ_2M @@ -226,6 +233,7 @@ void nfp_cpp_free(struct nfp_cpp *cpp); u32 nfp_cpp_model(struct nfp_cpp *cpp); u16 nfp_cpp_interface(struct nfp_cpp *cpp); int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial); +unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp); struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 cpp_id, @@ -286,8 +294,8 @@ int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, unsigned long long address, u64 value); u8 __iomem * -nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target, - u64 addr, unsigned long size, struct nfp_cpp_area **area); +nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 addr, + unsigned long size, struct nfp_cpp_area **area); struct nfp_cpp_mutex; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 73de57a09800..f7e1d79e735f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -75,6 +75,7 @@ struct nfp_cpp_resource { * @interface: chip interface id we are using to reach it * @serial: chip serial number * @imb_cat_table: CPP Mapping Table + * @mu_locality_lsb: MU access type bit offset * * Following fields use explicit locking: * @resource_list: NFP CPP resource list @@ -100,6 +101,7 @@ struct nfp_cpp { wait_queue_head_t waitq; u32 imb_cat_table[16]; + unsigned int mu_locality_lsb; struct mutex area_cache_mutex; struct list_head area_cache_list; @@ -266,6 +268,34 @@ int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial) return sizeof(cpp->serial); } +#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) + +static int nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp) +{ + unsigned int mode, addr40; + u32 imbcppat; + int res; + + imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU]; + mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); + addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); + + res = nfp_cppat_mu_locality_lsb(mode, addr40); + if (res < 0) + return res; + cpp->mu_locality_lsb = res; + + return 0; +} + +unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp) +{ + return cpp->mu_locality_lsb; +} + /** * nfp_cpp_area_alloc_with_name() - allocate a new CPP area * @cpp: CPP device handle @@ -1241,6 +1271,12 @@ nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3, &mask[1]); + err = nfp_cpp_set_mu_locality_lsb(cpp); + if (err < 0) { + dev_err(parent, "Can't calculate MU locality bit offset\n"); + goto err_out; + } + dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n", nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp)); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c index 20bad05e2e92..03fcde5fa137 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -294,8 +294,7 @@ exit_release: * nfp_cpp_map_area() - Helper function to map an area * @cpp: NFP CPP handler * @name: Name for the area - * @domain: CPP domain - * @target: CPP target + * @cpp_id: CPP ID for operation * @addr: CPP address * @size: Size of the area * @area: Area handle (output) @@ -306,15 +305,12 @@ exit_release: * Return: Pointer to memory mapped area or ERR_PTR */ u8 __iomem * -nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, int domain, int target, - u64 addr, unsigned long size, struct nfp_cpp_area **area) +nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 addr, + unsigned long size, struct nfp_cpp_area **area) { u8 __iomem *res; - u32 dest; - dest = NFP_CPP_ISLAND_ID(target, NFP_CPP_ACTION_RW, 0, domain); - - *area = nfp_cpp_area_alloc_acquire(cpp, name, dest, addr, size); + *area = nfp_cpp_area_alloc_acquire(cpp, name, cpp_id, addr, size); if (!*area) goto err_eio; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c index 40510860341b..a164fbc85cd3 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -156,29 +156,6 @@ static u64 nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi) return (mip_off_hi & 0xFF) << 32 | le32_to_cpu(fi->mip_offset_lo); } -#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 -#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) - -static int nfp_mip_mu_locality_lsb(struct nfp_cpp *cpp) -{ - unsigned int mode, addr40; - u32 xpbaddr, imbcppat; - int err; - - /* Hardcoded XPB IMB Base, island 0 */ - xpbaddr = 0x000a0000 + NFP_CPP_TARGET_MU * 4; - err = nfp_xpb_readl(cpp, xpbaddr, &imbcppat); - if (err < 0) - return err; - - mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); - addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); - - return nfp_cppat_mu_locality_lsb(mode, addr40); -} - static unsigned int nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) { @@ -304,14 +281,7 @@ int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off) *off = nffw_fwinfo_mip_offset_get(fwinfo); if (nffw_fwinfo_mip_mu_da_get(fwinfo)) { - int locality_off; - - if (NFP_CPP_ID_TARGET_of(*cpp_id) != NFP_CPP_TARGET_MU) - return 0; - - locality_off = nfp_mip_mu_locality_lsb(state->cpp); - if (locality_off < 0) - return locality_off; + int locality_off = nfp_cpp_mu_locality_lsb(state->cpp); *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index df599d5b6bb3..8d2cbdf4d517 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -61,10 +61,12 @@ void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size); /* Implemented in nfp_rtsym.c */ -#define NFP_RTSYM_TYPE_NONE 0 -#define NFP_RTSYM_TYPE_OBJECT 1 -#define NFP_RTSYM_TYPE_FUNCTION 2 -#define NFP_RTSYM_TYPE_ABS 3 +enum nfp_rtsym_type { + NFP_RTSYM_TYPE_NONE = 0, + NFP_RTSYM_TYPE_OBJECT = 1, + NFP_RTSYM_TYPE_FUNCTION = 2, + NFP_RTSYM_TYPE_ABS = 3, +}; #define NFP_RTSYM_TARGET_NONE 0 #define NFP_RTSYM_TARGET_LMEM -1 @@ -83,7 +85,7 @@ struct nfp_rtsym { const char *name; u64 addr; u64 size; - int type; + enum nfp_rtsym_type type; int target; int domain; }; @@ -98,6 +100,32 @@ const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx); const struct nfp_rtsym * nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); +u64 nfp_rtsym_size(const struct nfp_rtsym *rtsym); +int __nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len); +int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len); +int __nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *value); +int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 *value); +int __nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 *value); +int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 *value); +int __nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len); +int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len); +int __nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 value); +int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 value); +int __nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 value); +int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 value); + u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error); int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 2abee0fe3a7c..bf593a6b26a1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -87,6 +87,11 @@ #define NSP_CODE_MAJOR GENMASK(15, 12) #define NSP_CODE_MINOR GENMASK(11, 0) +#define NFP_FW_LOAD_RET_MAJOR GENMASK(15, 8) +#define NFP_FW_LOAD_RET_MINOR GENMASK(23, 16) + +#define NFP_HWINFO_LOOKUP_SIZE GENMASK(11, 0) + enum nfp_nsp_cmd { SPCODE_NOOP = 0, /* No operation */ SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ @@ -100,6 +105,8 @@ enum nfp_nsp_cmd { SPCODE_NSP_WRITE_FLASH = 11, /* Load and flash image from buffer */ SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */ SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ + SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */ + SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */ }; static const struct { @@ -127,6 +134,40 @@ struct nfp_nsp { void *entries; }; +/** + * struct nfp_nsp_command_arg - NFP command argument structure + * @code: NFP SP Command Code + * @timeout_sec:Timeout value to wait for completion in seconds + * @option: NFP SP Command Argument + * @buff_cpp: NFP SP Buffer CPP Address info + * @buff_addr: NFP SP Buffer Host address + * @error_cb: Callback for interpreting option if error occurred + */ +struct nfp_nsp_command_arg { + u16 code; + unsigned int timeout_sec; + u32 option; + u32 buff_cpp; + u64 buff_addr; + void (*error_cb)(struct nfp_nsp *state, u32 ret_val); +}; + +/** + * struct nfp_nsp_command_buf_arg - NFP command with buffer argument structure + * @arg: NFP command argument structure + * @in_buf: Buffer with data for input + * @in_size: Size of @in_buf + * @out_buf: Buffer for output data + * @out_size: Size of @out_buf + */ +struct nfp_nsp_command_buf_arg { + struct nfp_nsp_command_arg arg; + const void *in_buf; + unsigned int in_size; + void *out_buf; + unsigned int out_size; +}; + struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state) { return state->cpp; @@ -291,11 +332,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, /** * __nfp_nsp_command() - Execute a command on the NFP Service Processor * @state: NFP SP state - * @code: NFP SP Command Code - * @option: NFP SP Command Argument - * @buff_cpp: NFP SP Buffer CPP Address info - * @buff_addr: NFP SP Buffer Host address - * @timeout_sec:Timeout value to wait for completion in seconds + * @arg: NFP command argument structure * * Return: 0 for success with no result * @@ -308,8 +345,7 @@ nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, * -ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete */ static int -__nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, - u64 buff_addr, u32 timeout_sec) +__nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) { u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; struct nfp_cpp *cpp = state->cpp; @@ -326,22 +362,22 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, if (err) return err; - if (!FIELD_FIT(NSP_BUFFER_CPP, buff_cpp >> 8) || - !FIELD_FIT(NSP_BUFFER_ADDRESS, buff_addr)) { + if (!FIELD_FIT(NSP_BUFFER_CPP, arg->buff_cpp >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, arg->buff_addr)) { nfp_err(cpp, "Host buffer out of reach %08x %016llx\n", - buff_cpp, buff_addr); + arg->buff_cpp, arg->buff_addr); return -EINVAL; } err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, - FIELD_PREP(NSP_BUFFER_CPP, buff_cpp >> 8) | - FIELD_PREP(NSP_BUFFER_ADDRESS, buff_addr)); + FIELD_PREP(NSP_BUFFER_CPP, arg->buff_cpp >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, arg->buff_addr)); if (err < 0) return err; err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, - FIELD_PREP(NSP_COMMAND_OPTION, option) | - FIELD_PREP(NSP_COMMAND_CODE, code) | + FIELD_PREP(NSP_COMMAND_OPTION, arg->option) | + FIELD_PREP(NSP_COMMAND_CODE, arg->code) | FIELD_PREP(NSP_COMMAND_START, 1)); if (err < 0) return err; @@ -351,16 +387,16 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT); if (err) { nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n", - err, code); + err, arg->code); return err; } /* Wait for NSP_STATUS_BUSY to go to 0 */ err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, NSP_STATUS_BUSY, - 0, timeout_sec); + 0, arg->timeout_sec ?: NFP_NSP_TIMEOUT_DEFAULT); if (err) { nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n", - err, code); + err, arg->code); return err; } @@ -372,26 +408,28 @@ __nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, err = FIELD_GET(NSP_STATUS_RESULT, reg); if (err) { nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n", - -err, (int)ret_val, code); - nfp_nsp_print_extended_error(state, ret_val); + -err, (int)ret_val, arg->code); + if (arg->error_cb) + arg->error_cb(state, ret_val); + else + nfp_nsp_print_extended_error(state, ret_val); return -err; } return ret_val; } -static int -nfp_nsp_command(struct nfp_nsp *state, u16 code, u32 option, u32 buff_cpp, - u64 buff_addr) +static int nfp_nsp_command(struct nfp_nsp *state, u16 code) { - return __nfp_nsp_command(state, code, option, buff_cpp, buff_addr, - NFP_NSP_TIMEOUT_DEFAULT); + const struct nfp_nsp_command_arg arg = { + .code = code, + }; + + return __nfp_nsp_command(state, &arg); } static int -__nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, - const void *in_buf, unsigned int in_size, void *out_buf, - unsigned int out_size, u32 timeout_sec) +nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg) { struct nfp_cpp *cpp = nsp->cpp; unsigned int max_size; @@ -401,7 +439,7 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, if (nsp->ver.minor < 13) { nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n", - code, nsp->ver.major, nsp->ver.minor); + arg->arg.code, nsp->ver.major, nsp->ver.minor); return -EOPNOTSUPP; } @@ -412,10 +450,11 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, if (err < 0) return err; - max_size = max(in_size, out_size); + max_size = max(arg->in_size, arg->out_size); if (FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M < max_size) { nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%llu < %u)\n", - code, FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, + arg->arg.code, + FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M, max_size); return -EINVAL; } @@ -430,27 +469,30 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8; cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg); - if (in_buf && in_size) { - err = nfp_cpp_write(cpp, cpp_id, cpp_buf, in_buf, in_size); + if (arg->in_buf && arg->in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf, + arg->in_buf, arg->in_size); if (err < 0) return err; } /* Zero out remaining part of the buffer */ - if (out_buf && out_size && out_size > in_size) { - memset(out_buf, 0, out_size - in_size); - err = nfp_cpp_write(cpp, cpp_id, cpp_buf + in_size, - out_buf, out_size - in_size); + if (arg->out_buf && arg->out_size && arg->out_size > arg->in_size) { + memset(arg->out_buf, 0, arg->out_size - arg->in_size); + err = nfp_cpp_write(cpp, cpp_id, cpp_buf + arg->in_size, + arg->out_buf, arg->out_size - arg->in_size); if (err < 0) return err; } - ret = __nfp_nsp_command(nsp, code, option, cpp_id, cpp_buf, - timeout_sec); + arg->arg.buff_cpp = cpp_id; + arg->arg.buff_addr = cpp_buf; + ret = __nfp_nsp_command(nsp, &arg->arg); if (ret < 0) return ret; - if (out_buf && out_size) { - err = nfp_cpp_read(cpp, cpp_id, cpp_buf, out_buf, out_size); + if (arg->out_buf && arg->out_size) { + err = nfp_cpp_read(cpp, cpp_id, cpp_buf, + arg->out_buf, arg->out_size); if (err < 0) return err; } @@ -458,16 +500,6 @@ __nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, return ret; } -static int -nfp_nsp_command_buf(struct nfp_nsp *nsp, u16 code, u32 option, - const void *in_buf, unsigned int in_size, void *out_buf, - unsigned int out_size) -{ - return __nfp_nsp_command_buf(nsp, code, option, in_buf, in_size, - out_buf, out_size, - NFP_NSP_TIMEOUT_DEFAULT); -} - int nfp_nsp_wait(struct nfp_nsp *state) { const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ; @@ -479,7 +511,7 @@ int nfp_nsp_wait(struct nfp_nsp *state) for (;;) { const unsigned long start_time = jiffies; - err = nfp_nsp_command(state, SPCODE_NOOP, 0, 0, 0); + err = nfp_nsp_command(state, SPCODE_NOOP); if (err != -EAGAIN) break; @@ -501,53 +533,211 @@ int nfp_nsp_wait(struct nfp_nsp *state) int nfp_nsp_device_soft_reset(struct nfp_nsp *state) { - return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0); + return nfp_nsp_command(state, SPCODE_SOFT_RESET); } int nfp_nsp_mac_reinit(struct nfp_nsp *state) { - return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0); + return nfp_nsp_command(state, SPCODE_MAC_INIT); +} + +static void nfp_nsp_load_fw_extended_msg(struct nfp_nsp *state, u32 ret_val) +{ + static const char * const major_msg[] = { + /* 0 */ "Firmware from driver loaded", + /* 1 */ "Firmware from flash loaded", + /* 2 */ "Firmware loading failure", + }; + static const char * const minor_msg[] = { + /* 0 */ "", + /* 1 */ "no named partition on flash", + /* 2 */ "error reading from flash", + /* 3 */ "can not deflate", + /* 4 */ "not a trusted file", + /* 5 */ "can not parse FW file", + /* 6 */ "MIP not found in FW file", + /* 7 */ "null firmware name in MIP", + /* 8 */ "FW version none", + /* 9 */ "FW build number none", + /* 10 */ "no FW selection policy HWInfo key found", + /* 11 */ "static FW selection policy", + /* 12 */ "FW version has precedence", + /* 13 */ "different FW application load requested", + /* 14 */ "development build", + }; + unsigned int major, minor; + const char *level; + + major = FIELD_GET(NFP_FW_LOAD_RET_MAJOR, ret_val); + minor = FIELD_GET(NFP_FW_LOAD_RET_MINOR, ret_val); + + if (!nfp_nsp_has_stored_fw_load(state)) + return; + + /* Lower the message level in legacy case */ + if (major == 0 && (minor == 0 || minor == 10)) + level = KERN_DEBUG; + else if (major == 2) + level = KERN_ERR; + else + level = KERN_INFO; + + if (major >= ARRAY_SIZE(major_msg)) + nfp_printk(level, state->cpp, "FW loading status: %x\n", + ret_val); + else if (minor >= ARRAY_SIZE(minor_msg)) + nfp_printk(level, state->cpp, "%s, reason code: %d\n", + major_msg[major], minor); + else + nfp_printk(level, state->cpp, "%s%c %s\n", + major_msg[major], minor ? ',' : '.', + minor_msg[minor]); } int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw) { - return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data, - fw->size, NULL, 0); + struct nfp_nsp_command_buf_arg load_fw = { + { + .code = SPCODE_FW_LOAD, + .option = fw->size, + .error_cb = nfp_nsp_load_fw_extended_msg, + }, + .in_buf = fw->data, + .in_size = fw->size, + }; + int ret; + + ret = nfp_nsp_command_buf(state, &load_fw); + if (ret < 0) + return ret; + + nfp_nsp_load_fw_extended_msg(state, ret); + return 0; } int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw) { - /* The flash time is specified to take a maximum of 70s so we add an - * additional factor to this spec time. - */ - u32 timeout_sec = 2.5 * 70; - - return __nfp_nsp_command_buf(state, SPCODE_NSP_WRITE_FLASH, fw->size, - fw->data, fw->size, NULL, 0, timeout_sec); + struct nfp_nsp_command_buf_arg write_flash = { + { + .code = SPCODE_NSP_WRITE_FLASH, + .option = fw->size, + /* The flash time is specified to take a maximum of 70s + * so we add an additional factor to this spec time. + */ + .timeout_sec = 2.5 * 70, + }, + .in_buf = fw->data, + .in_size = fw->size, + }; + + return nfp_nsp_command_buf(state, &write_flash); } int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_ETH_RESCAN, size, NULL, 0, - buf, size); + struct nfp_nsp_command_buf_arg eth_rescan = { + { + .code = SPCODE_ETH_RESCAN, + .option = size, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, ð_rescan); } int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_ETH_CONTROL, size, buf, size, - NULL, 0); + struct nfp_nsp_command_buf_arg eth_ctrl = { + { + .code = SPCODE_ETH_CONTROL, + .option = size, + }, + .in_buf = buf, + .in_size = size, + }; + + return nfp_nsp_command_buf(state, ð_ctrl); } int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_NSP_IDENTIFY, size, NULL, 0, - buf, size); + struct nfp_nsp_command_buf_arg identify = { + { + .code = SPCODE_NSP_IDENTIFY, + .option = size, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &identify); } int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf, unsigned int size) { - return nfp_nsp_command_buf(state, SPCODE_NSP_SENSORS, sensor_mask, - NULL, 0, buf, size); + struct nfp_nsp_command_buf_arg sensors = { + { + .code = SPCODE_NSP_SENSORS, + .option = sensor_mask, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &sensors); +} + +int nfp_nsp_load_stored_fw(struct nfp_nsp *state) +{ + const struct nfp_nsp_command_arg arg = { + .code = SPCODE_FW_STORED, + .error_cb = nfp_nsp_load_fw_extended_msg, + }; + int ret; + + ret = __nfp_nsp_command(state, &arg); + if (ret < 0) + return ret; + + nfp_nsp_load_fw_extended_msg(state, ret); + return 0; +} + +static int +__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg hwinfo_lookup = { + { + .code = SPCODE_HWINFO_LOOKUP, + .option = size, + }, + .in_buf = buf, + .in_size = size, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &hwinfo_lookup); +} + +int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) +{ + int err; + + size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE); + + err = __nfp_nsp_hwinfo_lookup(state, buf, size); + if (err) + return err; + + if (strnlen(buf, size) == size) { + nfp_err(state->cpp, "NSP HWinfo value not NULL-terminated\n"); + return -EINVAL; + } + + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index f23d9e06f097..bd6c9071c8e9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -50,12 +50,24 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state); int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); +int nfp_nsp_load_stored_fw(struct nfp_nsp *state); +int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size); static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state) { return nfp_nsp_get_abi_ver_minor(state) > 20; } +static inline bool nfp_nsp_has_stored_fw_load(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 23; +} + +static inline bool nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 24; +} + enum nfp_eth_interface { NFP_INTERFACE_NONE = 0, NFP_INTERFACE_SFP = 1, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 9e34216578da..1ad0a015572e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -39,6 +39,8 @@ * Espen Skoglund <espen.skoglund@netronome.com> * Francois H. Theron <francois.theron@netronome.com> */ + +#include <asm/unaligned.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -233,6 +235,229 @@ nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) return NULL; } +u64 nfp_rtsym_size(const struct nfp_rtsym *sym) +{ + switch (sym->type) { + case NFP_RTSYM_TYPE_NONE: + pr_err("rtsym '%s': type NONE\n", sym->name); + return 0; + default: + pr_warn("rtsym '%s': unknown type: %d\n", sym->name, sym->type); + /* fall through */ + case NFP_RTSYM_TYPE_OBJECT: + case NFP_RTSYM_TYPE_FUNCTION: + return sym->size; + case NFP_RTSYM_TYPE_ABS: + return sizeof(u64); + } +} + +static int +nfp_rtsym_to_dest(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *cpp_id, u64 *addr) +{ + if (sym->type != NFP_RTSYM_TYPE_OBJECT) { + nfp_err(cpp, "rtsym '%s': direct access to non-object rtsym\n", + sym->name); + return -EINVAL; + } + + *addr = sym->addr + off; + + if (sym->target == NFP_RTSYM_TARGET_EMU_CACHE) { + int locality_off = nfp_cpp_mu_locality_lsb(cpp); + + *addr &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *addr |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + + *cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, action, token, + sym->domain); + } else if (sym->target < 0) { + nfp_err(cpp, "rtsym '%s': unhandled target encoding: %d\n", + sym->name, sym->target); + return -EINVAL; + } else { + *cpp_id = NFP_CPP_ISLAND_ID(sym->target, action, token, + sym->domain); + } + + return 0; +} + +int __nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len) +{ + u64 sym_size = nfp_rtsym_size(sym); + u32 cpp_id; + u64 addr; + int err; + + if (off > sym_size) { + nfp_err(cpp, "rtsym '%s': read out of bounds: off: %lld + len: %zd > size: %lld\n", + sym->name, off, len, sym_size); + return -ENXIO; + } + len = min_t(size_t, len, sym_size - off); + + if (sym->type == NFP_RTSYM_TYPE_ABS) { + u8 tmp[8]; + + put_unaligned_le64(sym->addr, tmp); + memcpy(buf, &tmp[off], len); + + return len; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_read(cpp, cpp_id, addr, buf, len); +} + +int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len) +{ + return __nfp_rtsym_read(cpp, sym, NFP_CPP_ACTION_RW, 0, off, buf, len); +} + +int __nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 4 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': readl out of bounds: off: %lld + 4 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_readl(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 *value) +{ + return __nfp_rtsym_readl(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 *value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 8 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': readq out of bounds: off: %lld + 8 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + if (sym->type == NFP_RTSYM_TYPE_ABS) { + *value = sym->addr; + return 0; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_readq(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 *value) +{ + return __nfp_rtsym_readq(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len) +{ + u64 sym_size = nfp_rtsym_size(sym); + u32 cpp_id; + u64 addr; + int err; + + if (off > sym_size) { + nfp_err(cpp, "rtsym '%s': write out of bounds: off: %lld + len: %zd > size: %lld\n", + sym->name, off, len, sym_size); + return -ENXIO; + } + len = min_t(size_t, len, sym_size - off); + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_write(cpp, cpp_id, addr, buf, len); +} + +int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len) +{ + return __nfp_rtsym_write(cpp, sym, NFP_CPP_ACTION_RW, 0, off, buf, len); +} + +int __nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 4 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': writel out of bounds: off: %lld + 4 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_writel(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 value) +{ + return __nfp_rtsym_writel(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 8 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': writeq out of bounds: off: %lld + 8 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_writeq(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 value) +{ + return __nfp_rtsym_writeq(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + /** * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol * @rtbl: NFP RTsym table @@ -249,7 +474,7 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int *error) { const struct nfp_rtsym *sym; - u32 val32, id; + u32 val32; u64 val; int err; @@ -259,20 +484,18 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, goto exit; } - id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); - - switch (sym->size) { + switch (nfp_rtsym_size(sym)) { case 4: - err = nfp_cpp_readl(rtbl->cpp, id, sym->addr, &val32); + err = nfp_rtsym_readl(rtbl->cpp, sym, 0, &val32); val = val32; break; case 8: - err = nfp_cpp_readq(rtbl->cpp, id, sym->addr, &val); + err = nfp_rtsym_readq(rtbl->cpp, sym, 0, &val); break; default: nfp_err(rtbl->cpp, - "rtsym '%s' unsupported or non-scalar size: %lld\n", - name, sym->size); + "rtsym '%s': unsupported or non-scalar size: %lld\n", + name, nfp_rtsym_size(sym)); err = -EINVAL; break; } @@ -303,25 +526,22 @@ int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, { const struct nfp_rtsym *sym; int err; - u32 id; sym = nfp_rtsym_lookup(rtbl, name); if (!sym) return -ENOENT; - id = NFP_CPP_ISLAND_ID(sym->target, NFP_CPP_ACTION_RW, 0, sym->domain); - - switch (sym->size) { + switch (nfp_rtsym_size(sym)) { case 4: - err = nfp_cpp_writel(rtbl->cpp, id, sym->addr, value); + err = nfp_rtsym_writel(rtbl->cpp, sym, 0, value); break; case 8: - err = nfp_cpp_writeq(rtbl->cpp, id, sym->addr, value); + err = nfp_rtsym_writeq(rtbl->cpp, sym, 0, value); break; default: nfp_err(rtbl->cpp, - "rtsym '%s' unsupported or non-scalar size: %lld\n", - name, sym->size); + "rtsym '%s': unsupported or non-scalar size: %lld\n", + name, nfp_rtsym_size(sym)); err = -EINVAL; break; } @@ -335,20 +555,29 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, { const struct nfp_rtsym *sym; u8 __iomem *mem; + u32 cpp_id; + u64 addr; + int err; sym = nfp_rtsym_lookup(rtbl, name); if (!sym) return (u8 __iomem *)ERR_PTR(-ENOENT); + err = nfp_rtsym_to_dest(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, + &cpp_id, &addr); + if (err) { + nfp_err(rtbl->cpp, "rtsym '%s': mapping failed\n", name); + return (u8 __iomem *)ERR_PTR(err); + } + if (sym->size < min_size) { - nfp_err(rtbl->cpp, "Symbol %s too small\n", name); + nfp_err(rtbl->cpp, "rtsym '%s': too small\n", name); return (u8 __iomem *)ERR_PTR(-EINVAL); } - mem = nfp_cpp_map_area(rtbl->cpp, id, sym->domain, sym->target, - sym->addr, sym->size, area); + mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr, sym->size, area); if (IS_ERR(mem)) { - nfp_err(rtbl->cpp, "Failed to map symbol %s: %ld\n", + nfp_err(rtbl->cpp, "rtysm '%s': failed to map: %ld\n", name, PTR_ERR(mem)); return mem; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c index 4ea1e585d945..f691c6587c76 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c @@ -39,7 +39,11 @@ * Francois H. Theron <francois.theron@netronome.com> */ +#define pr_fmt(fmt) "NFP target: " fmt + #include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/printk.h> #include "nfp_cpp.h" @@ -733,8 +737,10 @@ int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, u32 imb; int err; - if (target < 0 || target >= 16) + if (target < 0 || target >= 16) { + pr_err("Invalid CPP target: %d\n", target); return -EINVAL; + } if (island == 0) { /* Already translated */ @@ -753,8 +759,10 @@ int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, err = nfp_cppat_addr_encode(cpp_target_address, island, target, ((imb >> 13) & 7), ((imb >> 12) & 1), ((imb >> 6) & 0x3f), ((imb >> 0) & 0x3f)); - if (err) + if (err) { + pr_err("Can't encode CPP address: %d\n", err); return err; + } *cpp_target_id = NFP_CPP_ID(target, NFP_CPP_ID_ACTION_of(cpp_island_id), diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig index aa41e5f6e437..c73978474c4b 100644 --- a/drivers/net/ethernet/ni/Kconfig +++ b/drivers/net/ethernet/ni/Kconfig @@ -18,8 +18,9 @@ if NET_VENDOR_NI config NI_XGE_MANAGEMENT_ENET tristate "National Instruments XGE management enet support" - depends on ARCH_ZYNQ + depends on HAS_IOMEM && HAS_DMA select PHYLIB + select OF_MDIO if OF help Simple LAN device for debug or management purposes. Can support either 10G or 1G PHYs via SFP+ ports. diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 76efed058f33..74cf52e3fb09 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -106,10 +106,10 @@ (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE) struct nixge_hw_dma_bd { - u32 next; - u32 reserved1; - u32 phys; - u32 reserved2; + u32 next_lo; + u32 next_hi; + u32 phys_lo; + u32 phys_hi; u32 reserved3; u32 reserved4; u32 cntrl; @@ -119,11 +119,39 @@ struct nixge_hw_dma_bd { u32 app2; u32 app3; u32 app4; - u32 sw_id_offset; - u32 reserved5; + u32 sw_id_offset_lo; + u32 sw_id_offset_hi; u32 reserved6; }; +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define nixge_hw_dma_bd_set_addr(bd, field, addr) \ + do { \ + (bd)->field##_lo = lower_32_bits(((u64)addr)); \ + (bd)->field##_hi = upper_32_bits(((u64)addr)); \ + } while (0) +#else +#define nixge_hw_dma_bd_set_addr(bd, field, addr) \ + ((bd)->field##_lo = lower_32_bits((addr))) +#endif + +#define nixge_hw_dma_bd_set_phys(bd, addr) \ + nixge_hw_dma_bd_set_addr((bd), phys, (addr)) + +#define nixge_hw_dma_bd_set_next(bd, addr) \ + nixge_hw_dma_bd_set_addr((bd), next, (addr)) + +#define nixge_hw_dma_bd_set_offset(bd, addr) \ + nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr)) + +#ifdef CONFIG_PHYS_ADDR_T_64BIT +#define nixge_hw_dma_bd_get_addr(bd, field) \ + (dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo)) +#else +#define nixge_hw_dma_bd_get_addr(bd, field) \ + (dma_addr_t)((bd)->field##_lo) +#endif + struct nixge_tx_skb { struct sk_buff *skb; dma_addr_t mapping; @@ -176,6 +204,15 @@ static void nixge_dma_write_reg(struct nixge_priv *priv, off_t offset, u32 val) writel(val, priv->dma_regs + offset); } +static void nixge_dma_write_desc_reg(struct nixge_priv *priv, off_t offset, + dma_addr_t addr) +{ + writel(lower_32_bits(addr), priv->dma_regs + offset); +#ifdef CONFIG_PHYS_ADDR_T_64BIT + writel(upper_32_bits(addr), priv->dma_regs + offset + 4); +#endif +} + static u32 nixge_dma_read_reg(const struct nixge_priv *priv, off_t offset) { return readl(priv->dma_regs + offset); @@ -202,13 +239,22 @@ static u32 nixge_ctrl_read_reg(struct nixge_priv *priv, off_t offset) static void nixge_hw_dma_bd_release(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); + dma_addr_t phys_addr; + struct sk_buff *skb; int i; for (i = 0; i < RX_BD_NUM; i++) { - dma_unmap_single(ndev->dev.parent, priv->rx_bd_v[i].phys, - NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); - dev_kfree_skb((struct sk_buff *) - (priv->rx_bd_v[i].sw_id_offset)); + phys_addr = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], + phys); + + dma_unmap_single(ndev->dev.parent, phys_addr, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + + skb = (struct sk_buff *) + nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[i], + sw_id_offset); + dev_kfree_skb(skb); } if (priv->rx_bd_v) @@ -231,6 +277,7 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) { struct nixge_priv *priv = netdev_priv(ndev); struct sk_buff *skb; + dma_addr_t phys; u32 cr; int i; @@ -259,27 +306,30 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) goto out; for (i = 0; i < TX_BD_NUM; i++) { - priv->tx_bd_v[i].next = priv->tx_bd_p + - sizeof(*priv->tx_bd_v) * - ((i + 1) % TX_BD_NUM); + nixge_hw_dma_bd_set_next(&priv->tx_bd_v[i], + priv->tx_bd_p + + sizeof(*priv->tx_bd_v) * + ((i + 1) % TX_BD_NUM)); } for (i = 0; i < RX_BD_NUM; i++) { - priv->rx_bd_v[i].next = priv->rx_bd_p + - sizeof(*priv->rx_bd_v) * - ((i + 1) % RX_BD_NUM); + nixge_hw_dma_bd_set_next(&priv->rx_bd_v[i], + priv->rx_bd_p + + sizeof(*priv->rx_bd_v) * + ((i + 1) % RX_BD_NUM)); skb = netdev_alloc_skb_ip_align(ndev, NIXGE_MAX_JUMBO_FRAME_SIZE); if (!skb) goto out; - priv->rx_bd_v[i].sw_id_offset = (u32)skb; - priv->rx_bd_v[i].phys = - dma_map_single(ndev->dev.parent, - skb->data, - NIXGE_MAX_JUMBO_FRAME_SIZE, - DMA_FROM_DEVICE); + nixge_hw_dma_bd_set_offset(&priv->rx_bd_v[i], skb); + phys = dma_map_single(ndev->dev.parent, skb->data, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + + nixge_hw_dma_bd_set_phys(&priv->rx_bd_v[i], phys); + priv->rx_bd_v[i].cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; } @@ -312,18 +362,18 @@ static int nixge_hw_dma_bd_init(struct net_device *ndev) /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ - nixge_dma_write_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_RX_CDESC_OFFSET, priv->rx_bd_p); cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET); nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); - nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p + + nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, priv->rx_bd_p + (sizeof(*priv->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting. */ - nixge_dma_write_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_TX_CDESC_OFFSET, priv->tx_bd_p); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); nixge_dma_write_reg(priv, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); @@ -451,7 +501,7 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct nixge_priv *priv = netdev_priv(ndev); struct nixge_hw_dma_bd *cur_p; struct nixge_tx_skb *tx_skb; - dma_addr_t tail_p; + dma_addr_t tail_p, cur_phys; skb_frag_t *frag; u32 num_frag; u32 ii; @@ -466,15 +516,16 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } - cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, - skb_headlen(skb), DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) + cur_phys = dma_map_single(ndev->dev.parent, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_phys)) goto drop; + nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; tx_skb->skb = NULL; - tx_skb->mapping = cur_p->phys; + tx_skb->mapping = cur_phys; tx_skb->size = skb_headlen(skb); tx_skb->mapped_as_page = false; @@ -485,16 +536,17 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) tx_skb = &priv->tx_skb[priv->tx_bd_tail]; frag = &skb_shinfo(skb)->frags[ii]; - cur_p->phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, - skb_frag_size(frag), - DMA_TO_DEVICE); - if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) + cur_phys = skb_frag_dma_map(ndev->dev.parent, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_phys)) goto frag_err; + nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = skb_frag_size(frag); tx_skb->skb = NULL; - tx_skb->mapping = cur_p->phys; + tx_skb->mapping = cur_phys; tx_skb->size = skb_frag_size(frag); tx_skb->mapped_as_page = true; } @@ -506,7 +558,7 @@ static int nixge_start_xmit(struct sk_buff *skb, struct net_device *ndev) tail_p = priv->tx_bd_p + sizeof(*priv->tx_bd_v) * priv->tx_bd_tail; /* Start the transfer */ - nixge_dma_write_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_TX_TDESC_OFFSET, tail_p); ++priv->tx_bd_tail; priv->tx_bd_tail %= TX_BD_NUM; @@ -537,7 +589,7 @@ static int nixge_recv(struct net_device *ndev, int budget) struct nixge_priv *priv = netdev_priv(ndev); struct sk_buff *skb, *new_skb; struct nixge_hw_dma_bd *cur_p; - dma_addr_t tail_p = 0; + dma_addr_t tail_p = 0, cur_phys = 0; u32 packets = 0; u32 length = 0; u32 size = 0; @@ -549,13 +601,15 @@ static int nixge_recv(struct net_device *ndev, int budget) tail_p = priv->rx_bd_p + sizeof(*priv->rx_bd_v) * priv->rx_bd_ci; - skb = (struct sk_buff *)(cur_p->sw_id_offset); + skb = (struct sk_buff *)nixge_hw_dma_bd_get_addr(cur_p, + sw_id_offset); length = cur_p->status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; if (length > NIXGE_MAX_JUMBO_FRAME_SIZE) length = NIXGE_MAX_JUMBO_FRAME_SIZE; - dma_unmap_single(ndev->dev.parent, cur_p->phys, + dma_unmap_single(ndev->dev.parent, + nixge_hw_dma_bd_get_addr(cur_p, phys), NIXGE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE); @@ -579,16 +633,17 @@ static int nixge_recv(struct net_device *ndev, int budget) if (!new_skb) return packets; - cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, - NIXGE_MAX_JUMBO_FRAME_SIZE, - DMA_FROM_DEVICE); - if (dma_mapping_error(ndev->dev.parent, cur_p->phys)) { + cur_phys = dma_map_single(ndev->dev.parent, new_skb->data, + NIXGE_MAX_JUMBO_FRAME_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, cur_phys)) { /* FIXME: bail out and clean up */ netdev_err(ndev, "Failed to map ...\n"); } + nixge_hw_dma_bd_set_phys(cur_p, cur_phys); cur_p->cntrl = NIXGE_MAX_JUMBO_FRAME_SIZE; cur_p->status = 0; - cur_p->sw_id_offset = (u32)new_skb; + nixge_hw_dma_bd_set_offset(cur_p, new_skb); ++priv->rx_bd_ci; priv->rx_bd_ci %= RX_BD_NUM; @@ -599,7 +654,7 @@ static int nixge_recv(struct net_device *ndev, int budget) ndev->stats.rx_bytes += size; if (tail_p) - nixge_dma_write_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p); + nixge_dma_write_desc_reg(priv, XAXIDMA_RX_TDESC_OFFSET, tail_p); return packets; } @@ -637,6 +692,7 @@ static irqreturn_t nixge_tx_irq(int irq, void *_ndev) struct nixge_priv *priv = netdev_priv(_ndev); struct net_device *ndev = _ndev; unsigned int status; + dma_addr_t phys; u32 cr; status = nixge_dma_read_reg(priv, XAXIDMA_TX_SR_OFFSET); @@ -650,9 +706,11 @@ static irqreturn_t nixge_tx_irq(int irq, void *_ndev) return IRQ_NONE; } if (status & XAXIDMA_IRQ_ERROR_MASK) { + phys = nixge_hw_dma_bd_get_addr(&priv->tx_bd_v[priv->tx_bd_ci], + phys); + netdev_err(ndev, "DMA Tx error 0x%x\n", status); - netdev_err(ndev, "Current BD is at: 0x%x\n", - (priv->tx_bd_v[priv->tx_bd_ci]).phys); + netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ @@ -678,6 +736,7 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev) struct nixge_priv *priv = netdev_priv(_ndev); struct net_device *ndev = _ndev; unsigned int status; + dma_addr_t phys; u32 cr; status = nixge_dma_read_reg(priv, XAXIDMA_RX_SR_OFFSET); @@ -697,9 +756,10 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev) return IRQ_NONE; } if (status & XAXIDMA_IRQ_ERROR_MASK) { + phys = nixge_hw_dma_bd_get_addr(&priv->rx_bd_v[priv->rx_bd_ci], + phys); netdev_err(ndev, "DMA Rx error 0x%x\n", status); - netdev_err(ndev, "Current BD is at: 0x%x\n", - (priv->rx_bd_v[priv->rx_bd_ci]).phys); + netdev_err(ndev, "Current BD is at: 0x%llx\n", (u64)phys); cr = nixge_dma_read_reg(priv, XAXIDMA_TX_CR_OFFSET); /* Disable coalesce, delay timer and error interrupts */ @@ -735,10 +795,10 @@ static void nixge_dma_err_handler(unsigned long data) tx_skb = &lp->tx_skb[i]; nixge_tx_skb_unmap(lp, tx_skb); - cur_p->phys = 0; + nixge_hw_dma_bd_set_phys(cur_p, 0); cur_p->cntrl = 0; cur_p->status = 0; - cur_p->sw_id_offset = 0; + nixge_hw_dma_bd_set_offset(cur_p, 0); } for (i = 0; i < RX_BD_NUM; i++) { @@ -779,18 +839,18 @@ static void nixge_dma_err_handler(unsigned long data) /* Populate the tail pointer and bring the Rx Axi DMA engine out of * halted state. This will make the Rx side ready for reception. */ - nixge_dma_write_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); + nixge_dma_write_desc_reg(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); cr = nixge_dma_read_reg(lp, XAXIDMA_RX_CR_OFFSET); nixge_dma_write_reg(lp, XAXIDMA_RX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); - nixge_dma_write_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + + nixge_dma_write_desc_reg(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); /* Write to the RS (Run-stop) bit in the Tx channel control register. * Tx channel is now ready to run. But only after we write to the * tail pointer register that the Tx channel will start transmitting */ - nixge_dma_write_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); + nixge_dma_write_desc_reg(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); cr = nixge_dma_read_reg(lp, XAXIDMA_TX_CR_OFFSET); nixge_dma_write_reg(lp, XAXIDMA_TX_CR_OFFSET, cr | XAXIDMA_CR_RUNSTOP_MASK); diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 08381ef8bdb4..8b23d2848457 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -797,8 +797,7 @@ static int lpc_mii_probe(struct net_device *ndev) return PTR_ERR(phydev); } - /* mask with MAC supported features */ - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); phydev->advertising = phydev->supported; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index a60e1c8d470a..5f0962d353ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -623,6 +623,7 @@ struct qed_hwfn { void *unzip_buf; struct dbg_tools_data dbg_info; + void *dbg_user_info; /* PWM region specific data */ u16 wid_count; diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index f1977aa440e5..dc1c1b616084 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -40,7 +40,6 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/string.h> -#include <linux/bitops.h> #include "qed.h" #include "qed_cxt.h" #include "qed_dev_api.h" diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 6bb76e6d3c14..6ce9a762cfc0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -253,8 +253,9 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, *type = DCBX_PROTOCOL_ROCE_V2; } else { *type = DCBX_MAX_PROTOCOL_TYPE; - DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n", - app_prio_bitmap); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); return false; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 1aa9fc1c5890..78a638ec7c0a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -3454,7 +3454,8 @@ static u32 qed_grc_dump_iors(struct qed_hwfn *p_hwfn, addr = BYTES_TO_DWORDS(storm->sem_fast_mem_addr + SEM_FAST_REG_STORM_REG_FILE) + IOR_SET_OFFSET(set_id); - buf[strlen(buf) - 1] = '0' + set_id; + if (strlen(buf) > 0) + buf[strlen(buf) - 1] = '0' + set_id; offset += qed_grc_dump_mem(p_hwfn, p_ptt, dump_buf + offset, @@ -5563,35 +5564,6 @@ struct block_info { enum block_id id; }; -struct mcp_trace_format { - u32 data; -#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff -#define MCP_TRACE_FORMAT_MODULE_SHIFT 0 -#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 -#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 -#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 -#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 -#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 -#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 -#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 -#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 -#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 -#define MCP_TRACE_FORMAT_LEN_SHIFT 24 - - char *format_str; -}; - -/* Meta data structure, generated by a perl script during MFW build. therefore, - * the structs mcp_trace_meta and mcp_trace_format are duplicated in the perl - * script. - */ -struct mcp_trace_meta { - u32 modules_num; - char **modules; - u32 formats_num; - struct mcp_trace_format *formats; -}; - /* REG fifo element */ struct reg_fifo_element { u64 data; @@ -5714,6 +5686,20 @@ struct igu_fifo_addr_data { enum igu_fifo_addr_types type; }; +struct mcp_trace_meta { + u32 modules_num; + char **modules; + u32 formats_num; + struct mcp_trace_format *formats; + bool is_allocated; +}; + +/* Debug Tools user data */ +struct dbg_tools_user_data { + struct mcp_trace_meta mcp_trace_meta; + const u32 *mcp_trace_user_meta_buf; +}; + /******************************** Constants **********************************/ #define MAX_MSG_LEN 1024 @@ -6137,15 +6123,6 @@ static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { /******************************** Variables **********************************/ -/* MCP Trace meta data array - used in case the dump doesn't contain the - * meta data (e.g. due to no NVRAM access). - */ -static struct user_dbg_array s_mcp_trace_meta_arr = { NULL, 0 }; - -/* Parsed MCP Trace meta data info, based on MCP trace meta array */ -static struct mcp_trace_meta s_mcp_trace_meta; -static bool s_mcp_trace_meta_valid; - /* Temporary buffer, used for print size calculations */ static char s_temp_buf[MAX_MSG_LEN]; @@ -6311,6 +6288,12 @@ static u32 qed_print_section_params(u32 *dump_buf, return dump_offset; } +static struct dbg_tools_user_data * +qed_dbg_get_user_data(struct qed_hwfn *p_hwfn) +{ + return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info; +} + /* Parses the idle check rules and returns the number of characters printed. * In case of parsing error, returns 0. */ @@ -6570,43 +6553,26 @@ static enum dbg_status qed_parse_idle_chk_dump(u32 *dump_buf, return DBG_STATUS_OK; } -/* Frees the specified MCP Trace meta data */ -static void qed_mcp_trace_free_meta(struct qed_hwfn *p_hwfn, - struct mcp_trace_meta *meta) -{ - u32 i; - - s_mcp_trace_meta_valid = false; - - /* Release modules */ - if (meta->modules) { - for (i = 0; i < meta->modules_num; i++) - kfree(meta->modules[i]); - kfree(meta->modules); - } - - /* Release formats */ - if (meta->formats) { - for (i = 0; i < meta->formats_num; i++) - kfree(meta->formats[i].format_str); - kfree(meta->formats); - } -} - /* Allocates and fills MCP Trace meta data based on the specified meta data * dump buffer. * Returns debug status code. */ -static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, - const u32 *meta_buf, - struct mcp_trace_meta *meta) +static enum dbg_status +qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf) { - u8 *meta_buf_bytes = (u8 *)meta_buf; + struct dbg_tools_user_data *dev_user_data; u32 offset = 0, signature, i; + struct mcp_trace_meta *meta; + u8 *meta_buf_bytes; + + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + meta_buf_bytes = (u8 *)meta_buf; /* Free the previous meta before loading a new one. */ - if (s_mcp_trace_meta_valid) - qed_mcp_trace_free_meta(p_hwfn, meta); + if (meta->is_allocated) + qed_mcp_trace_free_meta_data(p_hwfn); memset(meta, 0, sizeof(*meta)); @@ -6674,7 +6640,7 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, format_len, format_ptr->format_str); } - s_mcp_trace_meta_valid = true; + meta->is_allocated = true; return DBG_STATUS_OK; } @@ -6687,21 +6653,26 @@ static enum dbg_status qed_mcp_trace_alloc_meta(struct qed_hwfn *p_hwfn, * buffer. * data_size - size in bytes of data to parse. * parsed_buf - destination buffer for parsed data. - * parsed_bytes - size of parsed data in bytes. + * parsed_results_bytes - size of parsed data in bytes. */ -static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, +static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn, + u8 *trace_buf, u32 trace_buf_size, u32 data_offset, u32 data_size, char *parsed_buf, - u32 *parsed_bytes) + u32 *parsed_results_bytes) { + struct dbg_tools_user_data *dev_user_data; + struct mcp_trace_meta *meta; u32 param_mask, param_shift; enum dbg_status status; - *parsed_bytes = 0; + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + *parsed_results_bytes = 0; - if (!s_mcp_trace_meta_valid) + if (!meta->is_allocated) return DBG_STATUS_MCP_TRACE_BAD_DATA; status = DBG_STATUS_OK; @@ -6723,7 +6694,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, format_idx = header & MFW_TRACE_EVENTID_MASK; /* Skip message if its index doesn't exist in the meta data */ - if (format_idx >= s_mcp_trace_meta.formats_num) { + if (format_idx >= meta->formats_num) { u8 format_size = (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> MFW_TRACE_PRM_SIZE_SHIFT); @@ -6738,7 +6709,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, continue; } - format_ptr = &s_mcp_trace_meta.formats[format_idx]; + format_ptr = &meta->formats[format_idx]; for (i = 0, param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, @@ -6783,19 +6754,20 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, return DBG_STATUS_MCP_TRACE_BAD_DATA; /* Print current message to results buffer */ - *parsed_bytes += - sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes), + *parsed_results_bytes += + sprintf(qed_get_buf_ptr(parsed_buf, + *parsed_results_bytes), "%s %-8s: ", s_mcp_trace_level_str[format_level], - s_mcp_trace_meta.modules[format_module]); - *parsed_bytes += - sprintf(qed_get_buf_ptr(parsed_buf, *parsed_bytes), + meta->modules[format_module]); + *parsed_results_bytes += + sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes), format_ptr->format_str, params[0], params[1], params[2]); } /* Add string NULL terminator */ - (*parsed_bytes)++; + (*parsed_results_bytes)++; return status; } @@ -6803,24 +6775,25 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf, /* Parses an MCP Trace dump buffer. * If result_buf is not NULL, the MCP Trace results are printed to it. * In any case, the required results buffer size is assigned to - * parsed_bytes. + * parsed_results_bytes. * The parsing status is returned. */ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *dump_buf, - char *parsed_buf, - u32 *parsed_bytes) + char *results_buf, + u32 *parsed_results_bytes, + bool free_meta_data) { const char *section_name, *param_name, *param_str_val; u32 data_size, trace_data_dwords, trace_meta_dwords; - u32 offset, results_offset, parsed_buf_bytes; + u32 offset, results_offset, results_buf_bytes; u32 param_num_val, num_section_params; struct mcp_trace *trace; enum dbg_status status; const u32 *meta_buf; u8 *trace_buf; - *parsed_bytes = 0; + *parsed_results_bytes = 0; /* Read global_params section */ dump_buf += qed_read_section_hdr(dump_buf, @@ -6831,7 +6804,7 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Print global params */ dump_buf += qed_print_section_params(dump_buf, num_section_params, - parsed_buf, &results_offset); + results_buf, &results_offset); /* Read trace_data section */ dump_buf += qed_read_section_hdr(dump_buf, @@ -6846,6 +6819,9 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Prepare trace info */ trace = (struct mcp_trace *)dump_buf; + if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + trace_buf = (u8 *)dump_buf + sizeof(*trace); offset = trace->trace_oldest; data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size); @@ -6865,31 +6841,39 @@ static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, /* Choose meta data buffer */ if (!trace_meta_dwords) { /* Dump doesn't include meta data */ - if (!s_mcp_trace_meta_arr.ptr) + struct dbg_tools_user_data *dev_user_data = + qed_dbg_get_user_data(p_hwfn); + + if (!dev_user_data->mcp_trace_user_meta_buf) return DBG_STATUS_MCP_TRACE_NO_META; - meta_buf = s_mcp_trace_meta_arr.ptr; + + meta_buf = dev_user_data->mcp_trace_user_meta_buf; } else { /* Dump includes meta data */ meta_buf = dump_buf; } /* Allocate meta data memory */ - status = qed_mcp_trace_alloc_meta(p_hwfn, meta_buf, &s_mcp_trace_meta); + status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf); if (status != DBG_STATUS_OK) return status; - status = qed_parse_mcp_trace_buf(trace_buf, + status = qed_parse_mcp_trace_buf(p_hwfn, + trace_buf, trace->size, offset, data_size, - parsed_buf ? - parsed_buf + results_offset : + results_buf ? + results_buf + results_offset : NULL, - &parsed_buf_bytes); + &results_buf_bytes); if (status != DBG_STATUS_OK) return status; - *parsed_bytes = results_offset + parsed_buf_bytes; + if (free_meta_data) + qed_mcp_trace_free_meta_data(p_hwfn); + + *parsed_results_bytes = results_offset + results_buf_bytes; return DBG_STATUS_OK; } @@ -7361,6 +7345,16 @@ enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr) return DBG_STATUS_OK; } +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn) +{ + p_hwfn->dbg_user_info = kzalloc(sizeof(struct dbg_tools_user_data), + GFP_KERNEL); + if (!p_hwfn->dbg_user_info) + return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + + return DBG_STATUS_OK; +} + const char *qed_dbg_get_status_str(enum dbg_status status) { return (status < @@ -7397,10 +7391,13 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, num_errors, num_warnings); } -void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size) +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf) { - s_mcp_trace_meta_arr.ptr = data; - s_mcp_trace_meta_arr.size_in_dwords = size; + struct dbg_tools_user_data *dev_user_data = + qed_dbg_get_user_data(p_hwfn); + + dev_user_data->mcp_trace_user_meta_buf = meta_buf; } enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, @@ -7409,7 +7406,7 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size) { return qed_parse_mcp_trace_dump(p_hwfn, - dump_buf, NULL, results_buf_size); + dump_buf, NULL, results_buf_size, true); } enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, @@ -7421,20 +7418,61 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, - results_buf, &parsed_buf_size); + results_buf, &parsed_buf_size, true); +} + +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf, + &parsed_buf_size, false); } -enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, u32 num_dumped_bytes, char *results_buf) { - u32 parsed_bytes; + u32 parsed_results_bytes; - return qed_parse_mcp_trace_buf(dump_buf, + return qed_parse_mcp_trace_buf(p_hwfn, + dump_buf, num_dumped_bytes, 0, num_dumped_bytes, - results_buf, &parsed_bytes); + results_buf, &parsed_results_bytes); +} + +/* Frees the specified MCP Trace meta data */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn) +{ + struct dbg_tools_user_data *dev_user_data; + struct mcp_trace_meta *meta; + u32 i; + + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + if (!meta->is_allocated) + return; + + /* Release modules */ + if (meta->modules) { + for (i = 0; i < meta->modules_num; i++) + kfree(meta->modules[i]); + kfree(meta->modules); + } + + /* Release formats */ + if (meta->formats) { + for (i = 0; i < meta->formats_num; i++) + kfree(meta->formats[i].format_str); + kfree(meta->formats); + } + + meta->is_allocated = false; } enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 016ca8a7ec8a..128eb63ca54a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -144,6 +144,12 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn) qm_info->wfq_data = NULL; } +static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->dbg_user_info); + p_hwfn->dbg_user_info = NULL; +} + void qed_resc_free(struct qed_dev *cdev) { int i; @@ -183,6 +189,7 @@ void qed_resc_free(struct qed_dev *cdev) qed_l2_free(p_hwfn); qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); + qed_dbg_user_data_free(p_hwfn); } } @@ -1083,6 +1090,10 @@ int qed_resc_alloc(struct qed_dev *cdev) rc = qed_dcbx_info_alloc(p_hwfn); if (rc) goto alloc_err; + + rc = qed_dbg_alloc_user_data(p_hwfn); + if (rc) + goto alloc_err; } cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 8faceb691657..21ec8091a24a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -274,7 +274,8 @@ struct core_rx_start_ramrod_data { u8 mf_si_mcast_accept_all; struct core_rx_action_on_error action_on_error; u8 gsi_offload_flag; - u8 reserved[6]; + u8 wipe_inner_vlan_pri_en; + u8 reserved[5]; }; /* Ramrod data for rx queue stop ramrod */ @@ -351,7 +352,8 @@ struct core_tx_start_ramrod_data { __le16 pbl_size; __le16 qm_pq_id; u8 gsi_offload_flag; - u8 resrved[3]; + u8 vport_id; + u8 resrved[2]; }; /* Ramrod data for tx queue stop ramrod */ @@ -914,6 +916,16 @@ struct eth_rx_rate_limit { __le16 reserved1; }; +/* Update RSS indirection table entry command */ +struct eth_tstorm_rss_update_data { + u8 valid; + u8 vport_id; + u8 ind_table_index; + u8 reserved; + __le16 ind_table_value; + __le16 reserved1; +}; + struct eth_ustorm_per_pf_stat { struct regpair rcv_lb_ucast_bytes; struct regpair rcv_lb_mcast_bytes; @@ -1241,6 +1253,10 @@ struct rl_update_ramrod_data { u8 rl_id_first; u8 rl_id_last; u8 rl_dc_qcn_flg; + u8 dcqcn_reset_alpha_on_idle; + u8 rl_bc_stage_th; + u8 rl_timer_stage_th; + u8 reserved1; __le32 rl_bc_rate; __le16 rl_max_rate; __le16 rl_r_ai; @@ -1249,7 +1265,7 @@ struct rl_update_ramrod_data { __le32 dcqcn_k_us; __le32 dcqcn_timeuot_us; __le32 qcn_timeuot_us; - __le32 reserved[2]; + __le32 reserved2; }; /* Slowpath Element (SPQE) */ @@ -3322,6 +3338,25 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results); +/******************************* Data Types **********************************/ + +struct mcp_trace_format { + u32 data; +#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff +#define MCP_TRACE_FORMAT_MODULE_SHIFT 0 +#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 +#define MCP_TRACE_FORMAT_LEVEL_SHIFT 16 +#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 +#define MCP_TRACE_FORMAT_P1_SIZE_SHIFT 18 +#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 +#define MCP_TRACE_FORMAT_P2_SIZE_SHIFT 20 +#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 +#define MCP_TRACE_FORMAT_P3_SIZE_SHIFT 22 +#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 +#define MCP_TRACE_FORMAT_LEN_SHIFT 24 + char *format_str; +}; + /******************************** Constants **********************************/ #define MAX_NAME_LEN 16 @@ -3337,6 +3372,13 @@ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, enum dbg_status qed_dbg_user_set_bin_ptr(const u8 * const bin_ptr); /** + * @brief qed_dbg_alloc_user_data - Allocates user debug data. + * + * @param p_hwfn - HW device data + */ +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn); + +/** * @brief qed_dbg_get_status_str - Returns a string for the specified status. * * @param status - a debug status code. @@ -3381,8 +3423,7 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *num_warnings); /** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets a pointer to the MCP Trace - * meta data. + * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. * * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to * no NVRAM access). @@ -3390,7 +3431,8 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, * @param data - pointer to MCP Trace meta data * @param size - size of MCP Trace meta data in dwords */ -void qed_dbg_mcp_trace_set_meta_data(u32 *data, u32 size); +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf); /** * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size @@ -3425,19 +3467,45 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, char *results_buf); /** + * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and + * keeps the MCP trace meta data allocated, to support continuous MCP Trace + * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should + * be called to free the meta data. + * + * @param p_hwfn - HW device data + * @param dump_buf - mcp trace dump buffer, starting from the header. + * @param results_buf - buffer for printing the mcp trace results. + * + * @return error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf); + +/** * @brief print_mcp_trace_line - Prints MCP Trace results for a single line * + * @param p_hwfn - HW device data * @param dump_buf - mcp trace dump buffer, starting from the header. * @param num_dumped_bytes - number of bytes that were dumped. * @param results_buf - buffer for printing the mcp trace results. * * @return error if the parsing fails, ok otherwise. */ -enum dbg_status qed_print_mcp_trace_line(u8 *dump_buf, +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, u32 num_dumped_bytes, char *results_buf); /** + * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. + * Should be called after continuous MCP Trace parsing. + * + * @param p_hwfn - HW device data + */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); + +/** * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size * for reg_fifo results (in bytes). * @@ -4303,154 +4371,161 @@ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, (IRO[29].base + ((pf_id) * IRO[29].m1)) #define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size) +/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ +#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \ + (IRO[30].base + ((pf_id) * IRO[30].m1)) +#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size) + /* Xstorm queue zone */ #define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ - (IRO[30].base + ((queue_id) * IRO[30].m1)) -#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size) + (IRO[31].base + ((queue_id) * IRO[31].m1)) +#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size) /* Ystorm cqe producer */ #define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[31].base + ((rss_id) * IRO[31].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size) + (IRO[32].base + ((rss_id) * IRO[32].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size) /* Ustorm cqe producer */ #define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ - (IRO[32].base + ((rss_id) * IRO[32].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size) + (IRO[33].base + ((rss_id) * IRO[33].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size) /* Ustorm grq producer */ #define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ - (IRO[33].base + ((pf_id) * IRO[33].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size) + (IRO[34].base + ((pf_id) * IRO[34].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size) /* Tstorm cmdq-cons of given command queue-id */ #define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ - (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size) + (IRO[35].base + ((cmdq_queue_id) * IRO[35].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size) /* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, * BDqueue-id. */ #define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2)) -#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size) + (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) /* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ #define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ - (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2)) -#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size) + (IRO[37].base + ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size) /* Tstorm iSCSI RX stats */ #define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[37].base + ((pf_id) * IRO[37].m1)) -#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size) + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) /* Mstorm iSCSI RX stats */ #define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[38].base + ((pf_id) * IRO[38].m1)) -#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size) + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) /* Ustorm iSCSI RX stats */ #define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ - (IRO[39].base + ((pf_id) * IRO[39].m1)) -#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size) + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size) /* Xstorm iSCSI TX stats */ #define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[40].base + ((pf_id) * IRO[40].m1)) -#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size) + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) /* Ystorm iSCSI TX stats */ #define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[41].base + ((pf_id) * IRO[41].m1)) -#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size) + (IRO[42].base + ((pf_id) * IRO[42].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) /* Pstorm iSCSI TX stats */ #define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ - (IRO[42].base + ((pf_id) * IRO[42].m1)) -#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size) + (IRO[43].base + ((pf_id) * IRO[43].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size) /* Tstorm FCoE RX stats */ #define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ - (IRO[43].base + ((pf_id) * IRO[43].m1)) -#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size) + (IRO[44].base + ((pf_id) * IRO[44].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size) /* Pstorm FCoE TX stats */ #define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ - (IRO[44].base + ((pf_id) * IRO[44].m1)) -#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size) + (IRO[45].base + ((pf_id) * IRO[45].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size) /* Pstorm RDMA queue statistics */ #define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1)) -#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size) + (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) /* Tstorm RDMA queue statistics */ #define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ - (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1)) -#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size) + (IRO[47].base + ((rdma_stat_counter_id) * IRO[47].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size) /* Xstorm error level for assert */ #define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[47].base + ((pf_id) * IRO[47].m1)) -#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[47].size) + (IRO[48].base + ((pf_id) * IRO[48].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) /* Ystorm error level for assert */ #define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[48].base + ((pf_id) * IRO[48].m1)) -#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size) + (IRO[49].base + ((pf_id) * IRO[49].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) /* Pstorm error level for assert */ #define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[49].base + ((pf_id) * IRO[49].m1)) -#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size) + (IRO[50].base + ((pf_id) * IRO[50].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) /* Tstorm error level for assert */ #define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[50].base + ((pf_id) * IRO[50].m1)) -#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size) + (IRO[51].base + ((pf_id) * IRO[51].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) /* Mstorm error level for assert */ #define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[51].base + ((pf_id) * IRO[51].m1)) -#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size) + (IRO[52].base + ((pf_id) * IRO[52].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) /* Ustorm error level for assert */ #define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ - (IRO[52].base + ((pf_id) * IRO[52].m1)) -#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size) + (IRO[53].base + ((pf_id) * IRO[53].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size) /* Xstorm iWARP rxmit stats */ #define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ - (IRO[53].base + ((pf_id) * IRO[53].m1)) -#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[53].size) + (IRO[54].base + ((pf_id) * IRO[54].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size) /* Tstorm RoCE Event Statistics */ #define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ - (IRO[54].base + ((roce_pf_id) * IRO[54].m1)) -#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[54].size) + (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size) /* DCQCN Received Statistics */ #define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ - (IRO[55].base + ((roce_pf_id) * IRO[55].m1)) -#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[55].size) + (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size) /* RoCE Error Statistics */ #define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ - (IRO[56].base + ((roce_pf_id) * IRO[56].m1)) -#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[56].size) + (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size) /* DCQCN Sent Statistics */ #define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ - (IRO[57].base + ((roce_pf_id) * IRO[57].m1)) -#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[57].size) + (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size) /* RoCE CQEs Statistics */ #define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ - (IRO[58].base + ((roce_pf_id) * IRO[58].m1)) -#define USTORM_ROCE_CQE_STATS_SIZE (IRO[58].size) + (IRO[59].base + ((roce_pf_id) * IRO[59].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size) -static const struct iro iro_arr[59] = { +static const struct iro iro_arr[60] = { {0x0, 0x0, 0x0, 0x0, 0x8}, {0x4cb8, 0x88, 0x0, 0x0, 0x88}, {0x6530, 0x20, 0x0, 0x0, 0x20}, @@ -4461,14 +4536,14 @@ static const struct iro iro_arr[59] = { {0x84, 0x8, 0x0, 0x0, 0x2}, {0x4c48, 0x0, 0x0, 0x0, 0x78}, {0x3e38, 0x0, 0x0, 0x0, 0x78}, - {0x2b78, 0x0, 0x0, 0x0, 0x78}, + {0x3ef8, 0x0, 0x0, 0x0, 0x78}, {0x4c40, 0x0, 0x0, 0x0, 0x78}, {0x4998, 0x0, 0x0, 0x0, 0x78}, {0x7f50, 0x0, 0x0, 0x0, 0x78}, {0xa28, 0x8, 0x0, 0x0, 0x8}, {0x6210, 0x10, 0x0, 0x0, 0x10}, {0xb820, 0x30, 0x0, 0x0, 0x30}, - {0x96c0, 0x30, 0x0, 0x0, 0x30}, + {0xa990, 0x30, 0x0, 0x0, 0x30}, {0x4b68, 0x80, 0x0, 0x0, 0x40}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0x53a8, 0x80, 0x4, 0x0, 0x4}, @@ -4476,11 +4551,12 @@ static const struct iro iro_arr[59] = { {0x4ba8, 0x80, 0x0, 0x0, 0x20}, {0x8158, 0x40, 0x0, 0x0, 0x30}, {0xe770, 0x60, 0x0, 0x0, 0x60}, - {0x2d10, 0x80, 0x0, 0x0, 0x38}, - {0xf2b8, 0x78, 0x0, 0x0, 0x78}, + {0x4090, 0x80, 0x0, 0x0, 0x38}, + {0xfea8, 0x78, 0x0, 0x0, 0x78}, {0x1f8, 0x4, 0x0, 0x0, 0x4}, {0xaf20, 0x0, 0x0, 0x0, 0xf0}, {0xb010, 0x8, 0x0, 0x0, 0x8}, + {0xc00, 0x8, 0x0, 0x0, 0x8}, {0x1f8, 0x8, 0x0, 0x0, 0x8}, {0xac0, 0x8, 0x0, 0x0, 0x8}, {0x2578, 0x8, 0x0, 0x0, 0x8}, @@ -4492,23 +4568,23 @@ static const struct iro iro_arr[59] = { {0x12908, 0x18, 0x0, 0x0, 0x10}, {0x11aa8, 0x40, 0x0, 0x0, 0x18}, {0xa588, 0x50, 0x0, 0x0, 0x20}, - {0x8700, 0x40, 0x0, 0x0, 0x28}, - {0x10300, 0x18, 0x0, 0x0, 0x10}, + {0x8f00, 0x40, 0x0, 0x0, 0x28}, + {0x10e30, 0x18, 0x0, 0x0, 0x10}, {0xde48, 0x48, 0x0, 0x0, 0x38}, - {0x10768, 0x20, 0x0, 0x0, 0x20}, - {0x2d48, 0x80, 0x0, 0x0, 0x10}, + {0x11298, 0x20, 0x0, 0x0, 0x20}, + {0x40c8, 0x80, 0x0, 0x0, 0x10}, {0x5048, 0x10, 0x0, 0x0, 0x10}, {0xc748, 0x8, 0x0, 0x0, 0x1}, - {0xa128, 0x8, 0x0, 0x0, 0x1}, - {0x10f00, 0x8, 0x0, 0x0, 0x1}, + {0xa928, 0x8, 0x0, 0x0, 0x1}, + {0x11a30, 0x8, 0x0, 0x0, 0x1}, {0xf030, 0x8, 0x0, 0x0, 0x1}, {0x13028, 0x8, 0x0, 0x0, 0x1}, {0x12c58, 0x8, 0x0, 0x0, 0x1}, {0xc9b8, 0x30, 0x0, 0x0, 0x10}, {0xed90, 0x28, 0x0, 0x0, 0x28}, - {0xa520, 0x18, 0x0, 0x0, 0x18}, - {0xa6a0, 0x8, 0x0, 0x0, 0x8}, - {0x13108, 0x8, 0x0, 0x0, 0x8}, + {0xad20, 0x18, 0x0, 0x0, 0x18}, + {0xaea0, 0x8, 0x0, 0x0, 0x8}, + {0x13c38, 0x8, 0x0, 0x0, 0x8}, {0x13c50, 0x18, 0x0, 0x0, 0x18}, }; @@ -5661,6 +5737,14 @@ enum eth_filter_type { MAX_ETH_FILTER_TYPE }; +/* inner to inner vlan priority translation configurations */ +struct eth_in_to_in_pri_map_cfg { + u8 inner_vlan_pri_remap_en; + u8 reserved[7]; + u8 non_rdma_in_to_in_pri_map[8]; + u8 rdma_in_to_in_pri_map[8]; +}; + /* Eth IPv4 Fragment Type */ enum eth_ipv4_frag_type { ETH_IPV4_NOT_FRAG, @@ -6018,6 +6102,14 @@ struct tx_queue_update_ramrod_data { struct regpair reserved1[5]; }; +/* Inner to Inner VLAN priority map update mode */ +enum update_in_to_in_pri_map_mode_enum { + ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED, + ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL, + ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL, + MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM +}; + /* Ramrod data for vport update ramrod */ struct vport_filter_update_ramrod_data { struct eth_filter_cmd_header filter_cmd_hdr; @@ -6048,7 +6140,8 @@ struct vport_start_ramrod_data { u8 zero_placement_offset; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[1]; + u8 wipe_inner_vlan_pri_en; + struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg; }; /* Ramrod data for vport stop ramrod */ @@ -6100,7 +6193,9 @@ struct vport_update_ramrod_data_cmn { u8 update_ctl_frame_checks_en_flg; u8 ctl_frame_mac_check_en; u8 ctl_frame_ethtype_check_en; - u8 reserved[15]; + u8 update_in_to_in_pri_map_mode; + u8 in_to_in_pri_map[8]; + u8 reserved[6]; }; struct vport_update_ramrod_mcast { @@ -6929,11 +7024,6 @@ struct mstorm_rdma_task_st_ctx { struct regpair temp[4]; }; -/* The roce task context of Ustorm */ -struct ustorm_rdma_task_st_ctx { - struct regpair temp[2]; -}; - struct e4_ustorm_rdma_task_ag_ctx { u8 reserved; u8 state; @@ -7007,8 +7097,6 @@ struct e4_rdma_task_context { struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context; struct mstorm_rdma_task_st_ctx mstorm_st_context; struct rdif_task_context rdif_context; - struct ustorm_rdma_task_st_ctx ustorm_st_context; - struct regpair ustorm_st_padding[2]; struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context; }; @@ -7388,7 +7476,7 @@ struct e4_ustorm_rdma_conn_ag_ctx { #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 #define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 u8 byte2; - u8 byte3; + u8 nvmf_only; __le16 conn_dpi; __le16 word1; __le32 cq_cons; @@ -7831,7 +7919,12 @@ struct roce_create_qp_req_ramrod_data { struct regpair qp_handle_for_cqe; struct regpair qp_handle_for_async; u8 stats_counter_id; - u8 reserved3[7]; + u8 reserved3[6]; + u8 flags2; +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x7F +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 1 __le16 regular_latency_phy_queue; __le16 dpi; }; @@ -7954,6 +8047,7 @@ enum roce_event_opcode { ROCE_EVENT_DESTROY_QP, ROCE_EVENT_CREATE_UD_QP, ROCE_EVENT_DESTROY_UD_QP, + ROCE_EVENT_FUNC_UPDATE, MAX_ROCE_EVENT_OPCODE }; @@ -7962,7 +8056,13 @@ struct roce_init_func_params { u8 ll2_queue_id; u8 cnp_vlan_priority; u8 cnp_dscp; - u8 reserved; + u8 flags; +#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 +#define ROCE_INIT_FUNC_PARAMS_RESERVED0_MASK 0x3F +#define ROCE_INIT_FUNC_PARAMS_RESERVED0_SHIFT 2 __le32 cnp_send_timeout; __le16 rl_offset; u8 rl_count_log; @@ -8109,9 +8209,24 @@ enum roce_ramrod_cmd_id { ROCE_RAMROD_DESTROY_QP, ROCE_RAMROD_CREATE_UD_QP, ROCE_RAMROD_DESTROY_UD_QP, + ROCE_RAMROD_FUNC_UPDATE, MAX_ROCE_RAMROD_CMD_ID }; +/* RoCE func init ramrod data */ +struct roce_update_func_params { + u8 cnp_vlan_priority; + u8 cnp_dscp; + __le16 flags; +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 +#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_MASK 0x3FFF +#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_SHIFT 2 + __le32 cnp_send_timeout; +}; + struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part { u8 reserved0; u8 state; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 77e386ebff09..f7c2f32237cb 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -904,13 +904,11 @@ static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u32 size) { struct qlcnic_hardware_context *ahw = adapter->ahw; - u32 fw_mbx; u8 i, max = 2, hdr_size, j; hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); max = (size / sizeof(u32)) + hdr_size; - fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0)); for (i = 2, j = 0; j < hdr_size; i++, j++) *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i)); for (; j < max; i++, j++) @@ -936,7 +934,7 @@ static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf) static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) { struct qlcnic_vf_info *vf = trans->vf; - u32 pay_size, hdr_size; + u32 pay_size; u32 *hdr, *pay; int ret; u8 pci_func = trans->func_id; @@ -947,14 +945,12 @@ static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type) if (type == QLC_BC_COMMAND) { hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag); pay = (u32 *)(trans->req_pay + trans->curr_req_frag); - hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size, trans->curr_req_frag); pay_size = (pay_size / sizeof(u32)); } else { hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag); pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag); - hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32)); pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size, trans->curr_rsp_frag); pay_size = (pay_size / sizeof(u32)); diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index aa11b70b9ca4..04aa592f35c3 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1024,16 +1024,8 @@ static int r6040_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } - /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_10baseT_Half - | SUPPORTED_10baseT_Full - | SUPPORTED_100baseT_Half - | SUPPORTED_100baseT_Full - | SUPPORTED_Autoneg - | SUPPORTED_MII - | SUPPORTED_TP); - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, SPEED_100); + lp->old_link = 0; lp->old_duplex = -1; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index aff5516b781e..fb2a1125780d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1074,7 +1074,8 @@ static int ravb_phy_init(struct net_device *ndev) } /* 10BASE is not supported */ - phydev->supported &= ~PHY_10BT_FEATURES; + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index a9da1ad4b4f2..690aee88f0eb 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -298,8 +298,8 @@ static int sxgbe_init_phy(struct net_device *ndev) /* Stop Advertising 1000BASE Capability if interface is not GMII */ if ((phy_iface == PHY_INTERFACE_MODE_MII) || (phy_iface == PHY_INTERFACE_MODE_RMII)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_1000); + if (phydev->phy_id == 0) { phy_disconnect(phydev); return -ENODEV; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index f0afb88d7bc2..c009407618d9 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1048,10 +1048,10 @@ static int smsc911x_mii_probe(struct net_device *dev) phy_attached_info(phydev); + phy_set_max_speed(phydev, SPEED_100); + /* mask with MAC supported features */ - phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); pdata->last_duplex = -1; pdata->last_carrier = -1; diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index 2fa3c1d03abc..9b6366b20110 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -1135,10 +1135,10 @@ static int smsc9420_mii_probe(struct net_device *dev) return PTR_ERR(phydev); } + phy_set_max_speed(phydev, SPEED_100); + /* mask with MAC supported features */ - phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause | - SUPPORTED_Asym_Pause); - phydev->advertising = phydev->supported; + phy_support_asym_pause(phydev); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index f7ecceeb1e28..2a156dcd4534 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -461,16 +461,7 @@ static int ave_ethtool_set_pauseparam(struct net_device *ndev, priv->pause_rx = pause->rx_pause; priv->pause_tx = pause->tx_pause; - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - if (pause->rx_pause) - phydev->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; - if (pause->tx_pause) - phydev->advertising ^= ADVERTISED_Asym_Pause; - - if (pause->autoneg) { - if (netif_running(ndev)) - phy_start_aneg(phydev); - } + phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause); return 0; } @@ -904,11 +895,11 @@ static void ave_rxfifo_reset(struct net_device *ndev) /* assert reset */ writel(AVE_GRR_RXFFR, priv->base + AVE_GRR); - usleep_range(40, 50); + udelay(50); /* negate reset */ writel(0, priv->base + AVE_GRR); - usleep_range(10, 20); + udelay(20); /* negate interrupt status */ writel(AVE_GI_RXOVF, priv->base + AVE_GISR); @@ -1223,11 +1214,10 @@ static int ave_init(struct net_device *ndev) phy_ethtool_get_wol(phydev, &wol); device_set_wakeup_capable(&ndev->dev, !!wol.supported); - if (!phy_interface_is_rgmii(phydev)) { - phydev->supported &= ~PHY_GBIT_FEATURES; - phydev->supported |= PHY_BASIC_FEATURES; - } - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + if (!phy_interface_is_rgmii(phydev)) + phy_set_max_speed(phydev, SPEED_100); + + phy_support_asym_pause(phydev); phy_attached_info(phydev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h index 0a80fa25afe3..d6bb953685fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h @@ -119,11 +119,23 @@ #define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x))) #define XGMAC_TQS GENMASK(25, 16) #define XGMAC_TQS_SHIFT 16 +#define XGMAC_Q2TCMAP GENMASK(10, 8) +#define XGMAC_Q2TCMAP_SHIFT 8 #define XGMAC_TTC GENMASK(6, 4) #define XGMAC_TTC_SHIFT 4 #define XGMAC_TXQEN GENMASK(3, 2) #define XGMAC_TXQEN_SHIFT 2 #define XGMAC_TSF BIT(1) +#define XGMAC_MTL_TCx_ETS_CONTROL(x) (0x00001110 + (0x80 * (x))) +#define XGMAC_MTL_TCx_QUANTUM_WEIGHT(x) (0x00001118 + (0x80 * (x))) +#define XGMAC_MTL_TCx_SENDSLOPE(x) (0x0000111c + (0x80 * (x))) +#define XGMAC_MTL_TCx_HICREDIT(x) (0x00001120 + (0x80 * (x))) +#define XGMAC_MTL_TCx_LOCREDIT(x) (0x00001124 + (0x80 * (x))) +#define XGMAC_CC BIT(3) +#define XGMAC_TSA GENMASK(1, 0) +#define XGMAC_SP (0x0 << 0) +#define XGMAC_CBS (0x1 << 0) +#define XGMAC_ETS (0x2 << 0) #define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x))) #define XGMAC_RQS GENMASK(25, 16) #define XGMAC_RQS_SHIFT 16 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index d182f82f7b58..64b8cb88ea45 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -177,6 +177,23 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue, writel(value, ioaddr + reg); } +static void dwxgmac2_config_cbs(struct mac_device_info *hw, + u32 send_slope, u32 idle_slope, + u32 high_credit, u32 low_credit, u32 queue) +{ + void __iomem *ioaddr = hw->pcsr; + u32 value; + + writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue)); + writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue)); + writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue)); + writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue)); + + value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); + value |= XGMAC_CC | XGMAC_CBS; + writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); +} + static int dwxgmac2_host_irq_status(struct mac_device_info *hw, struct stmmac_extra_stats *x) { @@ -316,7 +333,7 @@ const struct stmmac_ops dwxgmac210_ops = { .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms, .set_mtl_tx_queue_weight = NULL, .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma, - .config_cbs = NULL, + .config_cbs = dwxgmac2_config_cbs, .dump_regs = NULL, .host_irq_status = dwxgmac2_host_irq_status, .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 20909036e002..6c5092e7771c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -182,6 +182,9 @@ static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode, value |= 0x7 << XGMAC_TTC_SHIFT; } + /* Use static TC to Queue mapping */ + value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP; + value &= ~XGMAC_TXQEN; if (qmode != MTL_QUEUE_AVB) value |= 0x2 << XGMAC_TXQEN_SHIFT; @@ -374,6 +377,21 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan) writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)); } +static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) +{ + u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); + + value &= ~XGMAC_TXQEN; + if (qmode != MTL_QUEUE_AVB) { + value |= 0x2 << XGMAC_TXQEN_SHIFT; + writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel)); + } else { + value |= 0x1 << XGMAC_TXQEN_SHIFT; + } + + writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); +} + static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan) { u32 value; @@ -407,5 +425,6 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = { .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr, .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr, .enable_tso = dwxgmac2_enable_tso, + .qmode = dwxgmac2_qmode, .set_bfsize = dwxgmac2_set_bfsize, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c index 357309a6d6a5..81b966a8261b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.c +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c @@ -133,7 +133,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac4_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = stmmac_dwmac4_quirks, }, { @@ -150,7 +150,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, .mode = &dwmac4_ring_mode_ops, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = NULL, }, { @@ -167,7 +167,7 @@ static const struct stmmac_hwif_entry { .mac = &dwmac410_ops, .hwtimestamp = &stmmac_ptp, .mode = &dwmac4_ring_mode_ops, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwmac4_setup, .quirks = NULL, }, { @@ -201,7 +201,7 @@ static const struct stmmac_hwif_entry { .mac = &dwxgmac210_ops, .hwtimestamp = &stmmac_ptp, .mode = NULL, - .tc = NULL, + .tc = &dwmac510_tc_ops, .setup = dwxgmac2_setup, .quirks = NULL, }, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 9f458bb16f2a..3715a0a4af3c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -987,17 +987,20 @@ static int stmmac_init_phy(struct net_device *dev) if ((interface == PHY_INTERFACE_MODE_MII) || (interface == PHY_INTERFACE_MODE_RMII) || (max_speed < 1000 && max_speed > 0)) - phydev->advertising &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full); + phy_set_max_speed(phydev, SPEED_100); /* * Half-duplex mode not supported with multiqueue * half-duplex can only works with single queue */ - if (tx_cnt > 1) - phydev->supported &= ~(SUPPORTED_1000baseT_Half | - SUPPORTED_100baseT_Half | - SUPPORTED_10baseT_Half); + if (tx_cnt > 1) { + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + } /* * Broken HW is sometimes missing the pull-up resistor on the diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 06da2f59fcbf..863fd602fd33 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2999,7 +2999,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, /* Now make sure pci_dev cookie is there. */ #ifdef CONFIG_SPARC dp = pci_device_to_OF_node(pdev); - strcpy(prom_name, dp->name); + snprintf(prom_name, sizeof(prom_name), "%pOFn", dp); #else if (is_quattro_p(pdev)) strcpy(prom_name, "SUNW,qfe"); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index a1d335a3c5e4..1f612268c998 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -225,17 +225,6 @@ static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap) return 0; } -static const char *netcp_node_name(struct device_node *node) -{ - const char *name; - - if (of_property_read_string(node, "label", &name) < 0) - name = node->name; - if (!name) - name = "unknown"; - return name; -} - /* Module management routines */ static int netcp_register_interface(struct netcp_intf *netcp) { @@ -267,8 +256,13 @@ static int netcp_module_probe(struct netcp_device *netcp_device, } for_each_available_child_of_node(devices, child) { - const char *name = netcp_node_name(child); + const char *name; + char node_name[32]; + if (of_property_read_string(node, "label", &name) < 0) { + snprintf(node_name, sizeof(node_name), "%pOFn", child); + name = node_name; + } if (!strcasecmp(module->name, name)) break; } @@ -2209,8 +2203,8 @@ static int netcp_probe(struct platform_device *pdev) for_each_available_child_of_node(interfaces, child) { ret = netcp_create_interface(netcp_device, child); if (ret) { - dev_err(dev, "could not create interface(%s)\n", - child->name); + dev_err(dev, "could not create interface(%pOFn)\n", + child); goto probe_quit_interface; } } diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 72b98e27c992..0397ccb6597e 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3137,15 +3137,15 @@ static void init_secondary_ports(struct gbe_priv *gbe_dev, for_each_child_of_node(node, port) { slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL); if (!slave) { - dev_err(dev, "memory alloc failed for secondary port(%s), skipping...\n", - port->name); + dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n", + port); continue; } if (init_slave(gbe_dev, slave, port)) { dev_err(dev, - "Failed to initialize secondary port(%s), skipping...\n", - port->name); + "Failed to initialize secondary port(%pOFn), skipping...\n", + port); devm_kfree(dev, slave); continue; } @@ -3239,8 +3239,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't xlate xgbe of node(%s) ss address at %d\n", - node->name, XGBE_SS_REG_INDEX); + "Can't xlate xgbe of node(%pOFn) ss address at %d\n", + node, XGBE_SS_REG_INDEX); return ret; } @@ -3254,8 +3254,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't xlate xgbe of node(%s) sm address at %d\n", - node->name, XGBE_SM_REG_INDEX); + "Can't xlate xgbe of node(%pOFn) sm address at %d\n", + node, XGBE_SM_REG_INDEX); return ret; } @@ -3269,8 +3269,8 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't xlate xgbe serdes of node(%s) address at %d\n", - node->name, XGBE_SERDES_REG_INDEX); + "Can't xlate xgbe serdes of node(%pOFn) address at %d\n", + node, XGBE_SERDES_REG_INDEX); return ret; } @@ -3347,8 +3347,8 @@ static int get_gbe_resource_version(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of node(%s) of gbe ss address at %d\n", - node->name, GBE_SS_REG_INDEX); + "Can't translate of node(%pOFn) of gbe ss address at %d\n", + node, GBE_SS_REG_INDEX); return ret; } @@ -3372,8 +3372,8 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of gbe node(%s) address at index %d\n", - node->name, GBE_SGMII34_REG_INDEX); + "Can't translate of gbe node(%pOFn) address at index %d\n", + node, GBE_SGMII34_REG_INDEX); return ret; } @@ -3388,8 +3388,8 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of gbe node(%s) address at index %d\n", - node->name, GBE_SM_REG_INDEX); + "Can't translate of gbe node(%pOFn) address at index %d\n", + node, GBE_SM_REG_INDEX); return ret; } @@ -3498,8 +3498,8 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev, ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res); if (ret) { dev_err(gbe_dev->dev, - "Can't translate of gbenu node(%s) addr at index %d\n", - node->name, GBENU_SM_REG_INDEX); + "Can't translate of gbenu node(%pOFn) addr at index %d\n", + node, GBENU_SM_REG_INDEX); return ret; } @@ -3642,7 +3642,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, gbe_dev->ss_regs); } else { - dev_err(dev, "unknown GBE node(%s)\n", node->name); + dev_err(dev, "unknown GBE node(%pOFn)\n", node); ret = -ENODEV; } @@ -3667,8 +3667,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, for_each_child_of_node(interfaces, interface) { ret = of_property_read_u32(interface, "slave-port", &slave_num); if (ret) { - dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n", - interface->name); + dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n", + interface); continue; } gbe_dev->num_slaves++; diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index cce9c9ed46aa..7163a8d10dba 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -628,7 +628,7 @@ static int tc_mii_probe(struct net_device *dev) phy_attached_info(phydev); /* mask with MAC supported features */ - phydev->supported &= PHY_BASIC_FEATURES; + phy_set_max_speed(phydev, SPEED_100); dropmask = 0; if (options.speed == 10) dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 42f1f518dad6..46d3092b8aad 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -941,8 +941,7 @@ static int xemaclite_open(struct net_device *dev) } /* EmacLite doesn't support giga-bit speeds */ - lp->phy_dev->supported &= (PHY_BASIC_FEATURES); - lp->phy_dev->advertising = lp->phy_dev->supported; + phy_set_max_speed(lp->phy_dev, SPEED_100); /* Don't advertise 1000BASE-T Full/Half duplex speeds */ phy_write(lp->phy_dev, MII_CTRL1000, 0); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 6acb6b5718b9..6625fabe2c88 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -69,6 +69,7 @@ struct geneve_dev { struct gro_cells gro_cells; bool collect_md; bool use_udp6_rx_checksums; + bool ttl_inherit; }; struct geneve_sock { @@ -843,7 +844,11 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, ttl = key->ttl; } else { tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb); - ttl = key->ttl ? : ip4_dst_hoplimit(&rt->dst); + if (geneve->ttl_inherit) + ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); + else + ttl = key->ttl; + ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); } df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; @@ -889,7 +894,11 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, } else { prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), ip_hdr(skb), skb); - ttl = key->ttl ? : ip6_dst_hoplimit(dst); + if (geneve->ttl_inherit) + ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb); + else + ttl = key->ttl; + ttl = ttl ? : ip6_dst_hoplimit(dst); } err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr)); if (unlikely(err)) @@ -1091,6 +1100,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 }, [IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, [IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, + [IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], @@ -1170,7 +1180,8 @@ static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, static int geneve_configure(struct net *net, struct net_device *dev, struct netlink_ext_ack *extack, const struct ip_tunnel_info *info, - bool metadata, bool ipv6_rx_csum) + bool metadata, bool ipv6_rx_csum, + bool ttl_inherit) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); @@ -1219,6 +1230,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, geneve->info = *info; geneve->collect_md = metadata; geneve->use_udp6_rx_checksums = ipv6_rx_csum; + geneve->ttl_inherit = ttl_inherit; err = register_netdevice(dev); if (err) @@ -1237,7 +1249,8 @@ static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack, struct ip_tunnel_info *info, bool *metadata, - bool *use_udp6_rx_checksums, bool changelink) + bool *use_udp6_rx_checksums, bool *ttl_inherit, + bool changelink) { int attrtype; @@ -1315,6 +1328,9 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_GENEVE_TTL]) info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); + if (data[IFLA_GENEVE_TTL_INHERIT]) + *ttl_inherit = true; + if (data[IFLA_GENEVE_TOS]) info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); @@ -1438,17 +1454,18 @@ static int geneve_newlink(struct net *net, struct net_device *dev, { bool use_udp6_rx_checksums = false; struct ip_tunnel_info info; + bool ttl_inherit = false; bool metadata = false; int err; init_tnl_info(&info, GENEVE_UDP_PORT); err = geneve_nl2info(tb, data, extack, &info, &metadata, - &use_udp6_rx_checksums, false); + &use_udp6_rx_checksums, &ttl_inherit, false); if (err) return err; err = geneve_configure(net, dev, extack, &info, metadata, - use_udp6_rx_checksums); + use_udp6_rx_checksums, ttl_inherit); if (err) return err; @@ -1511,6 +1528,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_info info; bool metadata; bool use_udp6_rx_checksums; + bool ttl_inherit; int err; /* If the geneve device is configured for metadata (or externally @@ -1523,8 +1541,9 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], memcpy(&info, &geneve->info, sizeof(info)); metadata = geneve->collect_md; use_udp6_rx_checksums = geneve->use_udp6_rx_checksums; + ttl_inherit = geneve->ttl_inherit; err = geneve_nl2info(tb, data, extack, &info, &metadata, - &use_udp6_rx_checksums, true); + &use_udp6_rx_checksums, &ttl_inherit, true); if (err) return err; @@ -1537,6 +1556,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], geneve->info = info; geneve->collect_md = metadata; geneve->use_udp6_rx_checksums = use_udp6_rx_checksums; + geneve->ttl_inherit = ttl_inherit; geneve_unquiesce(geneve, gs4, gs6); return 0; @@ -1562,6 +1582,7 @@ static size_t geneve_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_TX */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_ZERO_CSUM6_RX */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL_INHERIT */ 0; } @@ -1569,6 +1590,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); struct ip_tunnel_info *info = &geneve->info; + bool ttl_inherit = geneve->ttl_inherit; bool metadata = geneve->collect_md; __u8 tmp_vni[3]; __u32 vni; @@ -1614,6 +1636,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; #endif + if (nla_put_u8(skb, IFLA_GENEVE_TTL_INHERIT, ttl_inherit)) + goto nla_put_failure; + return 0; nla_put_failure: @@ -1650,7 +1675,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, return dev; init_tnl_info(&info, dst_port); - err = geneve_configure(net, dev, NULL, &info, true, true); + err = geneve_configure(net, dev, NULL, &info, true, true, false); if (err) { free_netdev(dev); return ERR_PTR(err); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 30612497643c..a7207fa7e451 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -59,12 +59,6 @@ #include <net/net_namespace.h> #include <linux/u64_stats_sync.h> -struct pcpu_lstats { - u64 packets; - u64 bytes; - struct u64_stats_sync syncp; -}; - /* The higher levels take care of making this non-reentrant (it's * called with bh's disabled). */ diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index 7ae1856d1f18..e964d312f4ca 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -19,7 +19,6 @@ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/netdevice.h> #include <linux/netpoll.h> #include <linux/rtnetlink.h> #include <linux/if_vlan.h> @@ -603,6 +602,9 @@ static int net_failover_slave_unregister(struct net_device *slave_dev, primary_dev = rtnl_dereference(nfo_info->primary_dev); standby_dev = rtnl_dereference(nfo_info->standby_dev); + if (WARN_ON_ONCE(slave_dev != primary_dev && slave_dev != standby_dev)) + return -ENODEV; + vlan_vids_del_by_dev(slave_dev, failover_dev); dev_uc_unsync(slave_dev, failover_dev); dev_mc_unsync(slave_dev, failover_dev); @@ -762,8 +764,10 @@ struct failover *net_failover_create(struct net_device *standby_dev) netif_carrier_off(failover_dev); failover = failover_register(failover_dev, &net_failover_ops); - if (IS_ERR(failover)) + if (IS_ERR(failover)) { + err = PTR_ERR(failover); goto err_failover_register; + } return failover; diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c index 4b22955de191..dd0db7534cb3 100644 --- a/drivers/net/nlmon.c +++ b/drivers/net/nlmon.c @@ -6,12 +6,6 @@ #include <linux/if_arp.h> #include <net/rtnetlink.h> -struct pcpu_lstats { - u64 packets; - u64 bytes; - struct u64_stats_sync syncp; -}; - static netdev_tx_t nlmon_xmit(struct sk_buff *skb, struct net_device *dev) { int len = skb->len; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 82070792edbb..3d187cd50eb0 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -240,7 +240,7 @@ config AT803X_PHY config BCM63XX_PHY tristate "Broadcom 63xx SOCs internal PHY" - depends on BCM63XX + depends on BCM63XX || COMPILE_TEST select BCM_NET_PHYLIB ---help--- Currently supports the 6348 and 6358 PHYs. diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c index a9a4edfa23c8..565e49e7f76f 100644 --- a/drivers/net/phy/et1011c.c +++ b/drivers/net/phy/et1011c.c @@ -91,8 +91,7 @@ static struct phy_driver et1011c_driver[] = { { .phy_id = 0x0282f014, .name = "ET1011C", .phy_id_mask = 0xfffffff0, - .features = (PHY_BASIC_FEATURES | SUPPORTED_1000baseT_Full), - .flags = PHY_POLL, + .features = PHY_GBIT_FEATURES, .config_aneg = et1011c_config_aneg, .read_status = et1011c_read_status, } }; diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 98f4b1f706df..2e59a8419b17 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -38,7 +38,6 @@ #include <linux/phy.h> #include <linux/io.h> #include <linux/uaccess.h> -#include <linux/gpio/consumer.h> #include <asm/irq.h> diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 84ca9ff40ae0..2d9676d78d3f 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -54,9 +54,9 @@ enum rgmii_rx_clock_delay { #define HP_AUTO_MDIX_X_OVER_IND_MASK 0x2000 #define MSCC_PHY_LED_MODE_SEL 29 -#define LED_1_MODE_SEL_MASK 0x00F0 -#define LED_0_MODE_SEL_MASK 0x000F -#define LED_1_MODE_SEL_POS 4 +#define LED_MODE_SEL_POS(x) ((x) * 4) +#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x)) +#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x)) #define MSCC_EXT_PAGE_ACCESS 31 #define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */ @@ -103,16 +103,33 @@ enum rgmii_rx_clock_delay { #define DOWNSHIFT_COUNT_MAX 5 +#define MAX_LEDS 4 +#define VSC85XX_SUPP_LED_MODES (BIT(VSC8531_LINK_ACTIVITY) | \ + BIT(VSC8531_LINK_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_100_ACTIVITY) | \ + BIT(VSC8531_LINK_10_ACTIVITY) | \ + BIT(VSC8531_LINK_100_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_10_1000_ACTIVITY) | \ + BIT(VSC8531_LINK_10_100_ACTIVITY) | \ + BIT(VSC8531_DUPLEX_COLLISION) | \ + BIT(VSC8531_COLLISION) | \ + BIT(VSC8531_ACTIVITY) | \ + BIT(VSC8531_AUTONEG_FAULT) | \ + BIT(VSC8531_SERIAL_MODE) | \ + BIT(VSC8531_FORCE_LED_OFF) | \ + BIT(VSC8531_FORCE_LED_ON)) + struct vsc8531_private { int rate_magic; - u8 led_0_mode; - u8 led_1_mode; + u16 supp_led_modes; + u32 leds_mode[MAX_LEDS]; + u8 nleds; }; #ifdef CONFIG_OF_MDIO struct vsc8531_edge_rate_table { - u16 vddmac; - u8 slowdown[8]; + u32 vddmac; + u32 slowdown[8]; }; static const struct vsc8531_edge_rate_table edge_table[] = { @@ -140,14 +157,8 @@ static int vsc85xx_led_cntl_set(struct phy_device *phydev, mutex_lock(&phydev->lock); reg_val = phy_read(phydev, MSCC_PHY_LED_MODE_SEL); - if (led_num) { - reg_val &= ~LED_1_MODE_SEL_MASK; - reg_val |= (((u16)mode << LED_1_MODE_SEL_POS) & - LED_1_MODE_SEL_MASK); - } else { - reg_val &= ~LED_0_MODE_SEL_MASK; - reg_val |= ((u16)mode & LED_0_MODE_SEL_MASK); - } + reg_val &= ~LED_MODE_SEL_MASK(led_num); + reg_val |= LED_MODE_SEL(led_num, (u16)mode); rc = phy_write(phydev, MSCC_PHY_LED_MODE_SEL, reg_val); mutex_unlock(&phydev->lock); @@ -375,8 +386,7 @@ out_unlock: #ifdef CONFIG_OF_MDIO static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) { - u8 sd; - u16 vdd; + u32 vdd, sd; int rc, i, j; struct device *dev = &phydev->mdio.dev; struct device_node *of_node = dev->of_node; @@ -385,11 +395,11 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) if (!of_node) return -ENODEV; - rc = of_property_read_u16(of_node, "vsc8531,vddmac", &vdd); + rc = of_property_read_u32(of_node, "vsc8531,vddmac", &vdd); if (rc != 0) vdd = MSCC_VDDMAC_3300; - rc = of_property_read_u8(of_node, "vsc8531,edge-slowdown", &sd); + rc = of_property_read_u32(of_node, "vsc8531,edge-slowdown", &sd); if (rc != 0) sd = 0; @@ -404,19 +414,20 @@ static int vsc85xx_edge_rate_magic_get(struct phy_device *phydev) static int vsc85xx_dt_led_mode_get(struct phy_device *phydev, char *led, - u8 default_mode) + u32 default_mode) { + struct vsc8531_private *priv = phydev->priv; struct device *dev = &phydev->mdio.dev; struct device_node *of_node = dev->of_node; - u8 led_mode; + u32 led_mode; int err; if (!of_node) return -ENODEV; led_mode = default_mode; - err = of_property_read_u8(of_node, led, &led_mode); - if (!err && (led_mode > 15 || led_mode == 7 || led_mode == 11)) { + err = of_property_read_u32(of_node, led, &led_mode); + if (!err && !(BIT(led_mode) & priv->supp_led_modes)) { phydev_err(phydev, "DT %s invalid\n", led); return -EINVAL; } @@ -438,6 +449,28 @@ static int vsc85xx_dt_led_mode_get(struct phy_device *phydev, } #endif /* CONFIG_OF_MDIO */ +static int vsc85xx_dt_led_modes_get(struct phy_device *phydev, + u32 *default_mode) +{ + struct vsc8531_private *priv = phydev->priv; + char led_dt_prop[19]; + int i, ret; + + for (i = 0; i < priv->nleds; i++) { + ret = sprintf(led_dt_prop, "vsc8531,led-%d-mode", i); + if (ret < 0) + return ret; + + ret = vsc85xx_dt_led_mode_get(phydev, led_dt_prop, + default_mode[i]); + if (ret < 0) + return ret; + priv->leds_mode[i] = ret; + } + + return 0; +} + static int vsc85xx_edge_rate_cntl_set(struct phy_device *phydev, u8 edge_rate) { int rc; @@ -545,7 +578,7 @@ static int vsc85xx_set_tunable(struct phy_device *phydev, static int vsc85xx_config_init(struct phy_device *phydev) { - int rc; + int rc, i; struct vsc8531_private *vsc8531 = phydev->priv; rc = vsc85xx_default_config(phydev); @@ -560,13 +593,11 @@ static int vsc85xx_config_init(struct phy_device *phydev) if (rc) return rc; - rc = vsc85xx_led_cntl_set(phydev, 1, vsc8531->led_1_mode); - if (rc) - return rc; - - rc = vsc85xx_led_cntl_set(phydev, 0, vsc8531->led_0_mode); - if (rc) - return rc; + for (i = 0; i < vsc8531->nleds; i++) { + rc = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]); + if (rc) + return rc; + } rc = genphy_config_init(phydev); @@ -626,7 +657,8 @@ static int vsc85xx_probe(struct phy_device *phydev) { struct vsc8531_private *vsc8531; int rate_magic; - int led_mode; + u32 default_mode[2] = {VSC8531_LINK_1000_ACTIVITY, + VSC8531_LINK_100_ACTIVITY}; rate_magic = vsc85xx_edge_rate_magic_get(phydev); if (rate_magic < 0) @@ -639,21 +671,10 @@ static int vsc85xx_probe(struct phy_device *phydev) phydev->priv = vsc8531; vsc8531->rate_magic = rate_magic; + vsc8531->nleds = 2; + vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES; - /* LED[0] and LED[1] mode */ - led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-0-mode", - VSC8531_LINK_1000_ACTIVITY); - if (led_mode < 0) - return led_mode; - vsc8531->led_0_mode = led_mode; - - led_mode = vsc85xx_dt_led_mode_get(phydev, "vsc8531,led-1-mode", - VSC8531_LINK_100_ACTIVITY); - if (led_mode < 0) - return led_mode; - vsc8531->led_1_mode = led_mode; - - return 0; + return vsc85xx_dt_led_modes_get(phydev, default_mode); } /* Microsemi VSC85xx PHYs */ diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index db1172db1e7c..af64a9320fb0 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1765,6 +1765,124 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed) } EXPORT_SYMBOL(phy_set_max_speed); +/** + * phy_remove_link_mode - Remove a supported link mode + * @phydev: phy_device structure to remove link mode from + * @link_mode: Link mode to be removed + * + * Description: Some MACs don't support all link modes which the PHY + * does. e.g. a 1G MAC often does not support 1000Half. Add a helper + * to remove a link mode. + */ +void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode) +{ + WARN_ON(link_mode > 31); + + phydev->supported &= ~BIT(link_mode); + phydev->advertising = phydev->supported; +} +EXPORT_SYMBOL(phy_remove_link_mode); + +/** + * phy_support_sym_pause - Enable support of symmetrical pause + * @phydev: target phy_device struct + * + * Description: Called by the MAC to indicate is supports symmetrical + * Pause, but not asym pause. + */ +void phy_support_sym_pause(struct phy_device *phydev) +{ + phydev->supported |= SUPPORTED_Pause; + phydev->advertising = phydev->supported; +} +EXPORT_SYMBOL(phy_support_sym_pause); + +/** + * phy_support_asym_pause - Enable support of asym pause + * @phydev: target phy_device struct + * + * Description: Called by the MAC to indicate is supports Asym Pause. + */ +void phy_support_asym_pause(struct phy_device *phydev) +{ + phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + phydev->advertising = phydev->supported; +} +EXPORT_SYMBOL(phy_support_asym_pause); + +/** + * phy_set_sym_pause - Configure symmetric Pause + * @phydev: target phy_device struct + * @rx: Receiver Pause is supported + * @tx: Transmit Pause is supported + * @autoneg: Auto neg should be used + * + * Description: Configure advertised Pause support depending on if + * receiver pause and pause auto neg is supported. Generally called + * from the set_pauseparam .ndo. + */ +void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, + bool autoneg) +{ + phydev->supported &= ~SUPPORTED_Pause; + + if (rx && tx && autoneg) + phydev->supported |= SUPPORTED_Pause; + + phydev->advertising = phydev->supported; +} +EXPORT_SYMBOL(phy_set_sym_pause); + +/** + * phy_set_asym_pause - Configure Pause and Asym Pause + * @phydev: target phy_device struct + * @rx: Receiver Pause is supported + * @tx: Transmit Pause is supported + * + * Description: Configure advertised Pause support depending on if + * transmit and receiver pause is supported. If there has been a + * change in adverting, trigger a new autoneg. Generally called from + * the set_pauseparam .ndo. + */ +void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx) +{ + u16 oldadv = phydev->advertising; + u16 newadv = oldadv &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause); + + if (rx) + newadv |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + if (tx) + newadv ^= SUPPORTED_Asym_Pause; + + if (oldadv != newadv) { + phydev->advertising = newadv; + + if (phydev->autoneg) + phy_start_aneg(phydev); + } +} +EXPORT_SYMBOL(phy_set_asym_pause); + +/** + * phy_validate_pause - Test if the PHY/MAC support the pause configuration + * @phydev: phy_device struct + * @pp: requested pause configuration + * + * Description: Test if the PHY/MAC combination supports the Pause + * configuration the user is requesting. Returns True if it is + * supported, false otherwise. + */ +bool phy_validate_pause(struct phy_device *phydev, + struct ethtool_pauseparam *pp) +{ + if (!(phydev->supported & SUPPORTED_Pause) || + (!(phydev->supported & SUPPORTED_Asym_Pause) && + pp->rx_pause != pp->tx_pause)) + return false; + return true; +} +EXPORT_SYMBOL(phy_validate_pause); + static void of_set_phy_supported(struct phy_device *phydev) { struct device_node *node = phydev->mdio.dev.of_node; diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index fbd548a1ad84..2fe9a87b55b5 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -86,7 +86,7 @@ static struct phy_driver ste10xp_pdriver[] = { .phy_id = STE101P_PHY_ID, .phy_id_mask = 0xfffffff0, .name = "STe101p", - .features = PHY_BASIC_FEATURES | SUPPORTED_Pause, + .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, .ack_interrupt = ste10Xp_ack_interrupt, @@ -97,7 +97,7 @@ static struct phy_driver ste10xp_pdriver[] = { .phy_id = STE100P_PHY_ID, .phy_id_mask = 0xffffffff, .name = "STe100p", - .features = PHY_BASIC_FEATURES | SUPPORTED_Pause, + .features = PHY_BASIC_FEATURES, .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, .ack_interrupt = ste10Xp_ack_interrupt, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 02ad03a2fab7..500bc0027c1b 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -2400,7 +2400,7 @@ ppp_mp_reconstruct(struct ppp *ppp) if (ppp->mrru == 0) /* do nothing until mrru is set */ return NULL; - head = list->next; + head = __skb_peek(list); tail = NULL; skb_queue_walk_safe(list, p, tmp) { again: diff --git a/drivers/net/tap.c b/drivers/net/tap.c index f0f7cd977667..a4ab4a791fe7 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -619,7 +619,7 @@ static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad, #define TAP_RESERVE HH_DATA_OFF(ETH_HLEN) /* Get packet from user space buffer */ -static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, +static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, struct iov_iter *from, int noblock) { int good_linear = SKB_MAX_HEAD(TAP_RESERVE); @@ -663,7 +663,7 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, if (unlikely(len < ETH_HLEN)) goto err; - if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { + if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { struct iov_iter i; copylen = vnet_hdr.hdr_len ? @@ -724,11 +724,11 @@ static ssize_t tap_get_user(struct tap_queue *q, struct msghdr *m, tap = rcu_dereference(q->tap); /* copy skb_ubuf_info for callback when skb has no error */ if (zerocopy) { - skb_shinfo(skb)->destructor_arg = m->msg_control; + skb_shinfo(skb)->destructor_arg = msg_control; skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; - } else if (m && m->msg_control) { - struct ubuf_info *uarg = m->msg_control; + } else if (msg_control) { + struct ubuf_info *uarg = msg_control; uarg->callback(uarg, false); } @@ -1146,11 +1146,87 @@ static const struct file_operations tap_fops = { #endif }; +static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) +{ + struct tun_xdp_hdr *hdr = xdp->data_hard_start; + struct virtio_net_hdr *gso = &hdr->gso; + int buflen = hdr->buflen; + int vnet_hdr_len = 0; + struct tap_dev *tap; + struct sk_buff *skb; + int err, depth; + + if (q->flags & IFF_VNET_HDR) + vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); + + skb = build_skb(xdp->data_hard_start, buflen); + if (!skb) { + err = -ENOMEM; + goto err; + } + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + + skb_set_network_header(skb, ETH_HLEN); + skb_reset_mac_header(skb); + skb->protocol = eth_hdr(skb)->h_proto; + + if (vnet_hdr_len) { + err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q)); + if (err) + goto err_kfree; + } + + skb_probe_transport_header(skb, ETH_HLEN); + + /* Move network header to the right position for VLAN tagged packets */ + if ((skb->protocol == htons(ETH_P_8021Q) || + skb->protocol == htons(ETH_P_8021AD)) && + __vlan_get_protocol(skb, skb->protocol, &depth) != 0) + skb_set_network_header(skb, depth); + + rcu_read_lock(); + tap = rcu_dereference(q->tap); + if (tap) { + skb->dev = tap->dev; + dev_queue_xmit(skb); + } else { + kfree_skb(skb); + } + rcu_read_unlock(); + + return 0; + +err_kfree: + kfree_skb(skb); +err: + rcu_read_lock(); + tap = rcu_dereference(q->tap); + if (tap && tap->count_tx_dropped) + tap->count_tx_dropped(tap); + rcu_read_unlock(); + return err; +} + static int tap_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { struct tap_queue *q = container_of(sock, struct tap_queue, sock); - return tap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); + struct tun_msg_ctl *ctl = m->msg_control; + struct xdp_buff *xdp; + int i; + + if (ctl && (ctl->type == TUN_MSG_PTR)) { + for (i = 0; i < ctl->num; i++) { + xdp = &((struct xdp_buff *)ctl->ptr)[i]; + tap_get_user_xdp(q, xdp); + } + return 0; + } + + return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter, + m->msg_flags & MSG_DONTWAIT); } static int tap_recvmsg(struct socket *sock, struct msghdr *m, diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index e0d6760f3219..c48c3a1eb1f8 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Networking over Thunderbolt cable using Apple ThunderboltIP protocol * @@ -5,10 +6,6 @@ * Authors: Amir Levy <amir.jer.levy@intel.com> * Michael Jamet <michael.jamet@intel.com> * Mika Westerberg <mika.westerberg@linux.intel.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/atomic.h> diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ebd07ad82431..2a2cd35853b7 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -113,7 +113,6 @@ do { \ } while (0) #endif -#define TUN_HEADROOM 256 #define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) /* TUN device flags */ @@ -869,6 +868,9 @@ static int tun_attach(struct tun_struct *tun, struct file *file, tun_napi_init(tun, tfile, napi); } + if (rtnl_dereference(tun->xdp_prog)) + sock_set_flag(&tfile->sk, SOCK_XDP); + tun_set_real_num_queues(tun); /* device is allowed to go away first, so no need to hold extra @@ -1241,13 +1243,29 @@ static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct tun_struct *tun = netdev_priv(dev); + struct tun_file *tfile; struct bpf_prog *old_prog; + int i; old_prog = rtnl_dereference(tun->xdp_prog); rcu_assign_pointer(tun->xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); + for (i = 0; i < tun->numqueues; i++) { + tfile = rtnl_dereference(tun->tfiles[i]); + if (prog) + sock_set_flag(&tfile->sk, SOCK_XDP); + else + sock_reset_flag(&tfile->sk, SOCK_XDP); + } + list_for_each_entry(tfile, &tun->disabled, next) { + if (prog) + sock_set_flag(&tfile->sk, SOCK_XDP); + else + sock_reset_flag(&tfile->sk, SOCK_XDP); + } + return 0; } @@ -1617,6 +1635,55 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, return true; } +static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf, + int buflen, int len, int pad) +{ + struct sk_buff *skb = build_skb(buf, buflen); + + if (!skb) + return ERR_PTR(-ENOMEM); + + skb_reserve(skb, pad); + skb_put(skb, len); + + get_page(alloc_frag->page); + alloc_frag->offset += buflen; + + return skb; +} + +static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog, + struct xdp_buff *xdp, u32 act) +{ + int err; + + switch (act) { + case XDP_REDIRECT: + err = xdp_do_redirect(tun->dev, xdp, xdp_prog); + if (err) + return err; + break; + case XDP_TX: + err = tun_xdp_tx(tun->dev, xdp); + if (err < 0) + return err; + break; + case XDP_PASS: + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(tun->dev, xdp_prog, act); + /* fall through */ + case XDP_DROP: + this_cpu_inc(tun->pcpu_stats->rx_dropped); + break; + } + + return act; +} + static struct sk_buff *tun_build_skb(struct tun_struct *tun, struct tun_file *tfile, struct iov_iter *from, @@ -1624,18 +1691,17 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, int len, int *skb_xdp) { struct page_frag *alloc_frag = ¤t->task_frag; - struct sk_buff *skb; struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - unsigned int delta = 0; char *buf; size_t copied; - int err, pad = TUN_RX_PAD; + int pad = TUN_RX_PAD; + int err = 0; rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) - pad += TUN_HEADROOM; + pad += XDP_PACKET_HEADROOM; buflen += SKB_DATA_ALIGN(len + pad); rcu_read_unlock(); @@ -1654,17 +1720,18 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, * of xdp_prog above, this should be rare and for simplicity * we do XDP on skb in case the headroom is not enough. */ - if (hdr->gso_type || !xdp_prog) + if (hdr->gso_type || !xdp_prog) { *skb_xdp = 1; - else - *skb_xdp = 0; + return __tun_build_skb(alloc_frag, buf, buflen, len, pad); + } + + *skb_xdp = 0; local_bh_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); - if (xdp_prog && !*skb_xdp) { + if (xdp_prog) { struct xdp_buff xdp; - void *orig_data; u32 act; xdp.data_hard_start = buf; @@ -1672,66 +1739,33 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; xdp.rxq = &tfile->xdp_rxq; - orig_data = xdp.data; - act = bpf_prog_run_xdp(xdp_prog, &xdp); - switch (act) { - case XDP_REDIRECT: - get_page(alloc_frag->page); - alloc_frag->offset += buflen; - err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); - xdp_do_flush_map(); - if (err) - goto err_redirect; - rcu_read_unlock(); - local_bh_enable(); - return NULL; - case XDP_TX: + act = bpf_prog_run_xdp(xdp_prog, &xdp); + if (act == XDP_REDIRECT || act == XDP_TX) { get_page(alloc_frag->page); alloc_frag->offset += buflen; - if (tun_xdp_tx(tun->dev, &xdp) < 0) - goto err_redirect; - rcu_read_unlock(); - local_bh_enable(); - return NULL; - case XDP_PASS: - delta = orig_data - xdp.data; - len = xdp.data_end - xdp.data; - break; - default: - bpf_warn_invalid_xdp_action(act); - /* fall through */ - case XDP_ABORTED: - trace_xdp_exception(tun->dev, xdp_prog, act); - /* fall through */ - case XDP_DROP: - goto err_xdp; } - } + err = tun_xdp_act(tun, xdp_prog, &xdp, act); + if (err < 0) + goto err_xdp; + if (err == XDP_REDIRECT) + xdp_do_flush_map(); + if (err != XDP_PASS) + goto out; - skb = build_skb(buf, buflen); - if (!skb) { - rcu_read_unlock(); - local_bh_enable(); - return ERR_PTR(-ENOMEM); + pad = xdp.data - xdp.data_hard_start; + len = xdp.data_end - xdp.data; } - - skb_reserve(skb, pad - delta); - skb_put(skb, len); - get_page(alloc_frag->page); - alloc_frag->offset += buflen; - rcu_read_unlock(); local_bh_enable(); - return skb; + return __tun_build_skb(alloc_frag, buf, buflen, len, pad); -err_redirect: - put_page(alloc_frag->page); err_xdp: + put_page(alloc_frag->page); +out: rcu_read_unlock(); local_bh_enable(); - this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; } @@ -2392,18 +2426,133 @@ static void tun_sock_write_space(struct sock *sk) kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); } +static int tun_xdp_one(struct tun_struct *tun, + struct tun_file *tfile, + struct xdp_buff *xdp, int *flush) +{ + struct tun_xdp_hdr *hdr = xdp->data_hard_start; + struct virtio_net_hdr *gso = &hdr->gso; + struct tun_pcpu_stats *stats; + struct bpf_prog *xdp_prog; + struct sk_buff *skb = NULL; + u32 rxhash = 0, act; + int buflen = hdr->buflen; + int err = 0; + bool skb_xdp = false; + + xdp_prog = rcu_dereference(tun->xdp_prog); + if (xdp_prog) { + if (gso->gso_type) { + skb_xdp = true; + goto build; + } + xdp_set_data_meta_invalid(xdp); + xdp->rxq = &tfile->xdp_rxq; + + act = bpf_prog_run_xdp(xdp_prog, xdp); + err = tun_xdp_act(tun, xdp_prog, xdp, act); + if (err < 0) { + put_page(virt_to_head_page(xdp->data)); + return err; + } + + switch (err) { + case XDP_REDIRECT: + *flush = true; + /* fall through */ + case XDP_TX: + return 0; + case XDP_PASS: + break; + default: + put_page(virt_to_head_page(xdp->data)); + return 0; + } + } + +build: + skb = build_skb(xdp->data_hard_start, buflen); + if (!skb) { + err = -ENOMEM; + goto out; + } + + skb_reserve(skb, xdp->data - xdp->data_hard_start); + skb_put(skb, xdp->data_end - xdp->data); + + if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) { + this_cpu_inc(tun->pcpu_stats->rx_frame_errors); + kfree_skb(skb); + err = -EINVAL; + goto out; + } + + skb->protocol = eth_type_trans(skb, tun->dev); + skb_reset_network_header(skb); + skb_probe_transport_header(skb, 0); + + if (skb_xdp) { + err = do_xdp_generic(xdp_prog, skb); + if (err != XDP_PASS) + goto out; + } + + if (!rcu_dereference(tun->steering_prog)) + rxhash = __skb_get_hash_symmetric(skb); + + netif_receive_skb(skb); + + stats = get_cpu_ptr(tun->pcpu_stats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + put_cpu_ptr(stats); + + if (rxhash) + tun_flow_update(tun, rxhash, tfile); + +out: + return err; +} + static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) { - int ret; + int ret, i; struct tun_file *tfile = container_of(sock, struct tun_file, socket); struct tun_struct *tun = tun_get(tfile); + struct tun_msg_ctl *ctl = m->msg_control; + struct xdp_buff *xdp; if (!tun) return -EBADFD; - ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, + if (ctl && (ctl->type == TUN_MSG_PTR)) { + int n = ctl->num; + int flush = 0; + + local_bh_disable(); + rcu_read_lock(); + + for (i = 0; i < n; i++) { + xdp = &((struct xdp_buff *)ctl->ptr)[i]; + tun_xdp_one(tun, tfile, xdp, &flush); + } + + if (flush) + xdp_do_flush_map(); + + rcu_read_unlock(); + local_bh_enable(); + + ret = total_len; + goto out; + } + + ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); +out: tun_put(tun); return ret; } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index a9991c5f4736..04f0a094d864 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1,18 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2015 Microchip Technology - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/version.h> #include <linux/module.h> @@ -1027,7 +1015,7 @@ done: static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata, int index, u8 addr[ETH_ALEN]) { - u32 temp; + u32 temp; if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) { temp = addr[3]; @@ -1847,8 +1835,7 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) node = of_get_child_by_name(dev->udev->dev.of_node, "mdio"); ret = of_mdiobus_register(dev->mdiobus, node); - if (node) - of_node_put(node); + of_node_put(node); if (ret) { netdev_err(dev->net, "can't register MDIO bus\n"); goto exit1; @@ -2178,7 +2165,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) } /* MAC doesn't support 1000T Half */ - phydev->supported &= ~SUPPORTED_1000baseT_Half; + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); /* support both flow controls */ dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); @@ -2702,7 +2689,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev) static int lan78xx_stop(struct net_device *net) { - struct lan78xx_net *dev = netdev_priv(net); + struct lan78xx_net *dev = netdev_priv(net); if (timer_pending(&dev->stat_monitor)) del_timer_sync(&dev->stat_monitor); @@ -2952,6 +2939,11 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf) int i; ret = lan78xx_get_endpoints(dev, intf); + if (ret) { + netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n", + ret); + return ret; + } dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL); @@ -3080,7 +3072,7 @@ static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev, static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) { - int status; + int status; if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { skb_queue_tail(&dev->rxq_pause, skb); @@ -3347,9 +3339,9 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev) count = 0; length = 0; spin_lock_irqsave(&tqp->lock, flags); - for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { + skb_queue_walk(tqp, skb) { if (skb_is_gso(skb)) { - if (pkt_cnt) { + if (!skb_queue_is_first(tqp, skb)) { /* handle previous packets first */ break; } @@ -3640,10 +3632,10 @@ static void intr_complete(struct urb *urb) static void lan78xx_disconnect(struct usb_interface *intf) { - struct lan78xx_net *dev; - struct usb_device *udev; - struct net_device *net; - struct phy_device *phydev; + struct lan78xx_net *dev; + struct usb_device *udev; + struct net_device *net; + struct phy_device *phydev; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); @@ -3761,7 +3753,6 @@ static int lan78xx_probe(struct usb_interface *intf, ret = lan78xx_bind(dev, intf); if (ret < 0) goto out2; - strcpy(netdev->name, "eth%d"); if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len)) netdev->mtu = dev->hard_mtu - netdev->hard_header_len; diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h index 25aa54611774..968e5e5faee0 100644 --- a/drivers/net/usb/lan78xx.h +++ b/drivers/net/usb/lan78xx.h @@ -1,18 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2015 Microchip Technology - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #ifndef _LAN78XX_H #define _LAN78XX_H diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 770aa624147f..73aa33364d80 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1527,6 +1527,7 @@ static void usbnet_bh (struct timer_list *t) continue; case tx_done: kfree(entry->urb->sg); + /* fall through */ case rx_cleanup: usb_free_urb (entry->urb); dev_kfree_skb (skb); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 41a00cd76955..8fc64b67f01e 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -24,6 +24,7 @@ #include <linux/filter.h> #include <linux/ptr_ring.h> #include <linux/bpf_trace.h> +#include <linux/net_tstamp.h> #define DRV_NAME "veth" #define DRV_VERSION "1.0" @@ -114,6 +115,18 @@ static void veth_get_ethtool_stats(struct net_device *dev, data[0] = peer ? peer->ifindex : 0; } +static int veth_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + info->so_timestamping = + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + + return 0; +} + static const struct ethtool_ops veth_ethtool_ops = { .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, @@ -121,6 +134,7 @@ static const struct ethtool_ops veth_ethtool_ops = { .get_sset_count = veth_get_sset_count, .get_ethtool_stats = veth_get_ethtool_stats, .get_link_ksettings = veth_get_link_ksettings, + .get_ts_info = veth_get_ts_info, }; /* general routines */ @@ -201,6 +215,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) skb_record_rx_queue(skb, rxq); } + skb_tx_timestamp(skb); if (likely(veth_forward_skb(rcv, skb, rq, rcv_xdp) == NET_RX_SUCCESS)) { struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats); diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c index c28bdce14fd5..7bad5c95551f 100644 --- a/drivers/net/vsockmon.c +++ b/drivers/net/vsockmon.c @@ -11,12 +11,6 @@ #define DEFAULT_MTU (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + \ sizeof(struct af_vsockmon_hdr)) -struct pcpu_lstats { - u64 rx_packets; - u64 rx_bytes; - struct u64_stats_sync syncp; -}; - static int vsockmon_dev_init(struct net_device *dev) { dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); @@ -56,8 +50,8 @@ static netdev_tx_t vsockmon_xmit(struct sk_buff *skb, struct net_device *dev) struct pcpu_lstats *stats = this_cpu_ptr(dev->lstats); u64_stats_update_begin(&stats->syncp); - stats->rx_bytes += len; - stats->rx_packets++; + stats->bytes += len; + stats->packets++; u64_stats_update_end(&stats->syncp); dev_kfree_skb(skb); @@ -80,8 +74,8 @@ vsockmon_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) do { start = u64_stats_fetch_begin_irq(&vstats->syncp); - tbytes = vstats->rx_bytes; - tpackets = vstats->rx_packets; + tbytes = vstats->bytes; + tpackets = vstats->packets; } while (u64_stats_fetch_retry_irq(&vstats->syncp, start)); packets += tpackets; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ababba37d735..e5d236595206 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -464,7 +464,7 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, struct vxlan_fdb *f; f = __vxlan_find_mac(vxlan, mac, vni); - if (f) + if (f && f->used != jiffies) f->used = jiffies; return f; diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 5f0366a125e2..8523ade16030 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -97,6 +97,12 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) if (priv->tsa) { uf_info->tsa = 1; uf_info->ctsp = 1; + uf_info->cds = 1; + uf_info->ctss = 1; + } else { + uf_info->cds = 0; + uf_info->ctsp = 0; + uf_info->ctss = 0; } /* This sets HPM register in CMXUCR register which configures a @@ -265,7 +271,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr); iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr); iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt); - iowrite16be(DEFAULT_ADDR_MASK, &priv->ucc_pram->hmask); + iowrite16be(priv->hmask, &priv->ucc_pram->hmask); iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1); iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2); iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3); @@ -375,6 +381,10 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += skb->len; break; + case ARPHRD_ETHER: + dev->stats.tx_bytes += skb->len; + break; + default: dev->stats.tx_dropped++; dev_kfree_skb(skb); @@ -512,6 +522,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) break; case ARPHRD_PPP: + case ARPHRD_ETHER: length -= HDLC_CRC_SIZE; skb = dev_alloc_skb(length); @@ -780,6 +791,7 @@ static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding, if (parity != PARITY_NONE && parity != PARITY_CRC32_PR1_CCITT && + parity != PARITY_CRC16_PR0_CCITT && parity != PARITY_CRC16_PR1_CCITT) return -EINVAL; @@ -987,11 +999,17 @@ static const struct dev_pm_ops uhdlc_pm_ops = { #define HDLC_PM_OPS NULL #endif +static void uhdlc_tx_timeout(struct net_device *ndev) +{ + netdev_err(ndev, "%s\n", __func__); +} + static const struct net_device_ops uhdlc_ops = { .ndo_open = uhdlc_open, .ndo_stop = uhdlc_close, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = uhdlc_ioctl, + .ndo_tx_timeout = uhdlc_tx_timeout, }; static int ucc_hdlc_probe(struct platform_device *pdev) @@ -1015,7 +1033,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev) } ucc_num = val - 1; - if ((ucc_num > 3) || (ucc_num < 0)) { + if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) { dev_err(&pdev->dev, ": Invalid UCC num\n"); return -EINVAL; } @@ -1090,6 +1108,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev) goto free_utdm; } + if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask)) + uhdlc_priv->hmask = DEFAULT_ADDR_MASK; + ret = uhdlc_init(uhdlc_priv); if (ret) { dev_err(&pdev->dev, "Failed to init uhdlc\n"); @@ -1107,6 +1128,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev) hdlc = dev_to_hdlc(dev); dev->tx_queue_len = 16; dev->netdev_ops = &uhdlc_ops; + dev->watchdog_timeo = 2 * HZ; hdlc->attach = ucc_hdlc_attach; hdlc->xmit = ucc_hdlc_tx; netif_napi_add(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32); diff --git a/drivers/net/wan/fsl_ucc_hdlc.h b/drivers/net/wan/fsl_ucc_hdlc.h index c21134c1f180..b99fa2f1cd99 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.h +++ b/drivers/net/wan/fsl_ucc_hdlc.h @@ -106,6 +106,7 @@ struct ucc_hdlc_private { unsigned short encoding; unsigned short parity; + unsigned short hmask; u32 clocking; spinlock_t lock; /* lock for Tx BD and Tx buffer */ #ifdef CONFIG_PM diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 51c3330bc316..49533f884993 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1436,7 +1436,7 @@ static int wil_freq_debugfs_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr; - u16 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0; + u32 freq = wdev->chandef.chan ? wdev->chandef.chan->center_freq : 0; seq_printf(s, "Freq = %d\n", freq); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index d2f788d88668..3e37c8cf82c6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -576,7 +576,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev, if (pktq->qlen == 1) err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, - pktq->next); + __skb_peek(pktq)); else if (!sdiodev->sg_support) { glom_skb = brcmu_pkt_buf_get_skb(totlen); if (!glom_skb) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index a907d7b065fa..1e2fd289323a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -2189,7 +2189,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, * length of the chain (including padding) */ if (bus->txglom) - brcmf_sdio_update_hwhdr(pktq->next->data, total_len); + brcmf_sdio_update_hwhdr(__skb_peek(pktq)->data, total_len); return 0; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 73969dbeb5c5..27db4a3ba1f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -476,30 +476,40 @@ static struct ieee80211_sband_iftype_data iwl_he_capa = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE, + IEEE80211_HE_MAC_CAP0_HTC_HE | + IEEE80211_HE_MAC_CAP0_TWT_REQ, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | - IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, - .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = + IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU | + IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, + .mac_cap_info[5] = + IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 | + IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 | + IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU, .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_DUAL_BAND | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, .phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | @@ -511,18 +521,31 @@ static struct ieee80211_sband_iftype_data iwl_he_capa = { IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, .phy_cap_info[5] = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | - IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 | + IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | + IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK, .phy_cap_info[6] = + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | + IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO | IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, .phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | - IEEE80211_HE_PHY_CAP7_MAX_NC_7, + IEEE80211_HE_PHY_CAP7_MAX_NC_1, .phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | - IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU, + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ, + .phy_cap_info[9] = + IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, }, /* * Set default Tx/Rx HE MCS NSS Support field. Indicate support @@ -559,9 +582,11 @@ static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */ if ((tx_chains & rx_chains) != ANT_AB) { iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &= - ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS; + ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &= - ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS; + ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; + iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[7] &= + ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index b15b0d84bb7e..d46f3fbea46e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1978,10 +1978,6 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); } - if (sta->he_cap.he_cap_elem.mac_cap_info[2] & - IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED) - sta_ctxt_cmd.htc_flags |= - cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED); if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); if (sta->he_cap.he_cap_elem.mac_cap_info[3] & diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c index 3a4214d362ff..790784568ad2 100644 --- a/drivers/net/wireless/intersil/p54/txrx.c +++ b/drivers/net/wireless/intersil/p54/txrx.c @@ -121,8 +121,8 @@ static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb) } if (unlikely(!target_skb)) { if (priv->rx_end - last_addr >= len) { - target_skb = priv->tx_queue.prev; - if (!skb_queue_empty(&priv->tx_queue)) { + target_skb = skb_peek_tail(&priv->tx_queue); + if (target_skb) { info = IEEE80211_SKB_CB(target_skb); range = (void *)info->rate_driver_data; target_addr = range->end_addr; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 1068757ec42e..f3863101af78 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -3,6 +3,7 @@ * Copyright (c) 2008, Jouni Malinen <j@w1.fi> * Copyright (c) 2011, Javier Lopez <jlopex@gmail.com> * Copyright (c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -2529,23 +2530,20 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz = { IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, - .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_DUAL_BAND, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | @@ -2579,18 +2577,16 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz = { IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_DUAL_BAND | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, @@ -2598,7 +2594,7 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz = { IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 9a1d15b3ce45..cec37787ecf8 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -499,7 +499,7 @@ static void rtl8187b_status_cb(struct urb *urb) if (cmd_type == 1) { unsigned int pkt_rc, seq_no; bool tok; - struct sk_buff *skb; + struct sk_buff *skb, *iter; struct ieee80211_hdr *ieee80211hdr; unsigned long flags; @@ -508,8 +508,9 @@ static void rtl8187b_status_cb(struct urb *urb) seq_no = (val >> 16) & 0xFFF; spin_lock_irqsave(&priv->b_tx_status.queue.lock, flags); - skb_queue_reverse_walk(&priv->b_tx_status.queue, skb) { - ieee80211hdr = (struct ieee80211_hdr *)skb->data; + skb = NULL; + skb_queue_reverse_walk(&priv->b_tx_status.queue, iter) { + ieee80211hdr = (struct ieee80211_hdr *)iter->data; /* * While testing, it was discovered that the seq_no @@ -522,10 +523,12 @@ static void rtl8187b_status_cb(struct urb *urb) * it's unlikely we wrongly ack some sent data */ if ((le16_to_cpu(ieee80211hdr->seq_ctrl) - & 0xFFF) == seq_no) + & 0xFFF) == seq_no) { + skb = iter; break; + } } - if (skb != (struct sk_buff *) &priv->b_tx_status.queue) { + if (skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); __skb_unlink(skb, &priv->b_tx_status.queue); diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 3621e05a7494..80aae3a32c2a 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1660,8 +1660,7 @@ module_init(netback_init); static void __exit netback_fini(void) { #ifdef CONFIG_DEBUG_FS - if (!IS_ERR_OR_NULL(xen_netback_dbg_root)) - debugfs_remove_recursive(xen_netback_dbg_root); + debugfs_remove_recursive(xen_netback_dbg_root); #endif /* CONFIG_DEBUG_FS */ xenvif_xenbus_fini(); } diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index cd51492ae6c2..fe1d52247bbe 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -254,8 +254,7 @@ static void xenvif_debugfs_delif(struct xenvif *vif) if (IS_ERR_OR_NULL(xen_netback_dbg_root)) return; - if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) - debugfs_remove_recursive(vif->xenvif_dbg_root); + debugfs_remove_recursive(vif->xenvif_dbg_root); vif->xenvif_dbg_root = NULL; } #endif /* CONFIG_DEBUG_FS */ |