aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig12
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c115
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c478
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h95
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h500
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c834
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h158
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c513
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c81
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h14
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c256
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
25 files changed, 2897 insertions, 391 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 96413808c726..67134ece1107 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -61,10 +61,12 @@ config BCM63XX_ENET
config BCMGENET
tristate "Broadcom GENET internal MAC support"
+ depends on OF && HAS_IOMEM
select MII
select PHYLIB
select FIXED_PHY
select BCM7XXX_PHY
+ select MDIO_BCM_UNIMAC
help
This driver supports the built-in Ethernet MACs found in the
Broadcom BCM7xxx Set Top Box family chipset.
@@ -193,6 +195,7 @@ config SYSTEMPORT
config BNXT
tristate "Broadcom NetXtreme-C/E support"
depends on PCI
+ depends on MAY_USE_DEVLINK
select FW_LOADER
select LIBCRC32C
---help---
@@ -209,6 +212,15 @@ config BNXT_SRIOV
Virtualization support in the NetXtreme-C/E products. This
allows for virtual function acceleration in virtual environments.
+config BNXT_FLOWER_OFFLOAD
+ bool "TC Flower offload support for NetXtreme-C/E"
+ depends on BNXT
+ default y
+ ---help---
+ This configuration parameter enables TC Flower packet classifier
+ offload for eswitch. This option enables SR-IOV switchdev eswitch
+ offload.
+
config BNXT_DCB
bool "Data Center Bridging (DCB) Support"
default n
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 61a88b64bd39..4f3845a58126 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -2674,7 +2674,7 @@ static int bcm_enetsw_set_ringparam(struct net_device *dev,
return 0;
}
-static struct ethtool_ops bcm_enetsw_ethtool_ops = {
+static const struct ethtool_ops bcm_enetsw_ethtool_ops = {
.get_strings = bcm_enetsw_get_strings,
.get_sset_count = bcm_enetsw_get_sset_count,
.get_ethtool_stats = bcm_enetsw_get_ethtool_stats,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index dc3052751bc1..a6572b51435a 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -32,13 +32,13 @@
#define BCM_SYSPORT_IO_MACRO(name, offset) \
static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
{ \
- u32 reg = __raw_readl(priv->base + offset + off); \
+ u32 reg = readl_relaxed(priv->base + offset + off); \
return reg; \
} \
static inline void name##_writel(struct bcm_sysport_priv *priv, \
u32 val, u32 off) \
{ \
- __raw_writel(val, priv->base + offset + off); \
+ writel_relaxed(val, priv->base + offset + off); \
} \
BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
@@ -59,14 +59,14 @@ static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
{
if (priv->is_lite && off >= RDMA_STATUS)
off += 4;
- return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off);
+ return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
}
static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
{
if (priv->is_lite && off >= RDMA_STATUS)
off += 4;
- __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
+ writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
}
static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
@@ -110,10 +110,10 @@ static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
dma_addr_t addr)
{
#ifdef CONFIG_PHYS_ADDR_T_64BIT
- __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
+ writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
d + DESC_ADDR_HI_STATUS_LEN);
#endif
- __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
+ writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
}
static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
@@ -201,10 +201,10 @@ static int bcm_sysport_set_features(struct net_device *dev,
*/
static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
/* general stats */
- STAT_NETDEV(rx_packets),
- STAT_NETDEV(tx_packets),
- STAT_NETDEV(rx_bytes),
- STAT_NETDEV(tx_bytes),
+ STAT_NETDEV64(rx_packets),
+ STAT_NETDEV64(tx_packets),
+ STAT_NETDEV64(rx_bytes),
+ STAT_NETDEV64(tx_bytes),
STAT_NETDEV(rx_errors),
STAT_NETDEV(tx_errors),
STAT_NETDEV(rx_dropped),
@@ -316,6 +316,7 @@ static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
{
switch (type) {
case BCM_SYSPORT_STAT_NETDEV:
+ case BCM_SYSPORT_STAT_NETDEV64:
case BCM_SYSPORT_STAT_RXCHK:
case BCM_SYSPORT_STAT_RBUF:
case BCM_SYSPORT_STAT_SOFT:
@@ -398,6 +399,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
s = &bcm_sysport_gstrings_stats[i];
switch (s->type) {
case BCM_SYSPORT_STAT_NETDEV:
+ case BCM_SYSPORT_STAT_NETDEV64:
case BCM_SYSPORT_STAT_SOFT:
continue;
case BCM_SYSPORT_STAT_MIB_RX:
@@ -434,7 +436,10 @@ static void bcm_sysport_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_stats64 *stats64 = &priv->stats64;
+ struct u64_stats_sync *syncp = &priv->syncp;
struct bcm_sysport_tx_ring *ring;
+ unsigned int start;
int i, j;
if (netif_running(dev))
@@ -447,14 +452,22 @@ static void bcm_sysport_get_stats(struct net_device *dev,
s = &bcm_sysport_gstrings_stats[i];
if (s->type == BCM_SYSPORT_STAT_NETDEV)
p = (char *)&dev->stats;
+ else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
+ p = (char *)stats64;
else
p = (char *)priv;
if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
continue;
-
p += s->stat_offset;
- data[j] = *(unsigned long *)p;
+
+ if (s->stat_sizeof == sizeof(u64))
+ do {
+ start = u64_stats_fetch_begin_irq(syncp);
+ data[i] = *(u64 *)p;
+ } while (u64_stats_fetch_retry_irq(syncp, start));
+ else
+ data[i] = *(u32 *)p;
j++;
}
@@ -597,7 +610,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
{
- dev_kfree_skb_any(cb->skb);
+ dev_consume_skb_any(cb->skb);
cb->skb = NULL;
dma_unmap_addr_set(cb, dma_addr, 0);
}
@@ -666,6 +679,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int budget)
{
+ struct bcm_sysport_stats64 *stats64 = &priv->stats64;
struct net_device *ndev = priv->netdev;
unsigned int processed = 0, to_process;
struct bcm_sysport_cb *cb;
@@ -769,6 +783,10 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
skb->protocol = eth_type_trans(skb, ndev);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += len;
+ u64_stats_update_begin(&priv->syncp);
+ stats64->rx_packets++;
+ stats64->rx_bytes += len;
+ u64_stats_update_end(&priv->syncp);
napi_gro_receive(&priv->napi, skb);
next:
@@ -791,17 +809,15 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
struct device *kdev = &priv->pdev->dev;
if (cb->skb) {
- ring->bytes += cb->skb->len;
*bytes_compl += cb->skb->len;
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len),
DMA_TO_DEVICE);
- ring->packets++;
(*pkts_compl)++;
bcm_sysport_free_cb(cb);
/* SKB fragment */
} else if (dma_unmap_addr(cb, dma_addr)) {
- ring->bytes += dma_unmap_len(cb, dma_len);
+ *bytes_compl += dma_unmap_len(cb, dma_len);
dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
dma_unmap_addr_set(cb, dma_addr, 0);
@@ -812,9 +828,9 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
- struct net_device *ndev = priv->netdev;
unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct net_device *ndev = priv->netdev;
struct bcm_sysport_cb *cb;
u32 hw_ind;
@@ -853,6 +869,11 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
last_c_index &= (num_tx_cbs - 1);
}
+ u64_stats_update_begin(&priv->syncp);
+ ring->packets += pkts_compl;
+ ring->bytes += bytes_compl;
+ u64_stats_update_end(&priv->syncp);
+
ring->c_index = c_index;
netif_dbg(priv, tx_done, ndev,
@@ -1346,6 +1367,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
if (!ring->cbs) {
+ dma_free_coherent(kdev, sizeof(struct dma_desc),
+ ring->desc_cpu, ring->desc_dma);
netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
return -ENOMEM;
}
@@ -1369,6 +1392,19 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+ /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
+ * with the original definition of ACB_ALGO
+ */
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (priv->is_lite)
+ reg &= ~BIT(TSB_SWAP1);
+ /* Set a correct TSB format based on host endian */
+ if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ reg |= tdma_control_bit(priv, TSB_SWAP0);
+ else
+ reg &= ~tdma_control_bit(priv, TSB_SWAP0);
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
/* Program the number of descriptors as MAX_THRESHOLD and half of
* its size for the hysteresis trigger
*/
@@ -1675,22 +1711,41 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p)
return 0;
}
-static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev)
+static void bcm_sysport_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- unsigned long tx_bytes = 0, tx_packets = 0;
+ struct bcm_sysport_stats64 *stats64 = &priv->stats64;
struct bcm_sysport_tx_ring *ring;
+ u64 tx_packets = 0, tx_bytes = 0;
+ unsigned int start;
unsigned int q;
+ netdev_stats_to_stats64(stats, &dev->stats);
+
for (q = 0; q < dev->num_tx_queues; q++) {
ring = &priv->tx_rings[q];
- tx_bytes += ring->bytes;
- tx_packets += ring->packets;
+ do {
+ start = u64_stats_fetch_begin_irq(&priv->syncp);
+ tx_bytes = ring->bytes;
+ tx_packets = ring->packets;
+ } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
+
+ stats->tx_bytes += tx_bytes;
+ stats->tx_packets += tx_packets;
}
- dev->stats.tx_bytes = tx_bytes;
- dev->stats.tx_packets = tx_packets;
- return &dev->stats;
+ /* lockless update tx_bytes and tx_packets */
+ u64_stats_update_begin(&priv->syncp);
+ stats64->tx_bytes = stats->tx_bytes;
+ stats64->tx_packets = stats->tx_packets;
+ u64_stats_update_end(&priv->syncp);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&priv->syncp);
+ stats->rx_packets = stats64->rx_packets;
+ stats->rx_bytes = stats64->rx_bytes;
+ } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
}
static void bcm_sysport_netif_start(struct net_device *dev)
@@ -1722,10 +1777,14 @@ static void rbuf_init(struct bcm_sysport_priv *priv)
reg = rbuf_readl(priv, RBUF_CONTROL);
reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
/* Set a correct RSB format on SYSTEMPORT Lite */
- if (priv->is_lite) {
+ if (priv->is_lite)
reg &= ~RBUF_RSB_SWAP1;
+
+ /* Set a correct RSB format based on host endian */
+ if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
reg |= RBUF_RSB_SWAP0;
- }
+ else
+ reg &= ~RBUF_RSB_SWAP0;
rbuf_writel(priv, reg, RBUF_CONTROL);
}
@@ -1954,7 +2013,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bcm_sysport_poll_controller,
#endif
- .ndo_get_stats = bcm_sysport_get_nstats,
+ .ndo_get_stats64 = bcm_sysport_get_stats64,
};
#define REV_FMT "v%2x.%02x"
@@ -2102,6 +2161,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* libphy will adjust the link state accordingly */
netif_carrier_off(dev);
+ u64_stats_init(&priv->syncp);
+
ret = register_netdev(dev);
if (ret) {
dev_err(&pdev->dev, "failed to register net_device\n");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 77a51c167a69..82e401df199e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -449,7 +449,8 @@ struct bcm_rsb {
/* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we
* keep the SYSTEMPORT layout here and adjust with tdma_control_bit()
*/
-#define TSB_SWAP 2
+#define TSB_SWAP0 2
+#define TSB_SWAP1 3
#define ACB_ALGO 3
#define BUF_DATA_OFFSET_SHIFT 4
#define BUF_DATA_OFFSET_MASK 0x3ff
@@ -603,6 +604,7 @@ struct bcm_sysport_mib {
/* HW maintains a large list of counters */
enum bcm_sysport_stat_type {
BCM_SYSPORT_STAT_NETDEV = -1,
+ BCM_SYSPORT_STAT_NETDEV64,
BCM_SYSPORT_STAT_MIB_RX,
BCM_SYSPORT_STAT_MIB_TX,
BCM_SYSPORT_STAT_RUNT,
@@ -619,6 +621,13 @@ enum bcm_sysport_stat_type {
.type = BCM_SYSPORT_STAT_NETDEV, \
}
+#define STAT_NETDEV64(m) { \
+ .stat_string = __stringify(m), \
+ .stat_sizeof = sizeof(((struct bcm_sysport_stats64 *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_stats64, m), \
+ .type = BCM_SYSPORT_STAT_NETDEV64, \
+}
+
#define STAT_MIB(str, m, _type) { \
.stat_string = str, \
.stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
@@ -659,6 +668,14 @@ struct bcm_sysport_stats {
u16 reg_offset;
};
+struct bcm_sysport_stats64 {
+ /* 64bit stats on 32bit/64bit Machine */
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+};
+
/* Software house keeping helper structure */
struct bcm_sysport_cb {
struct sk_buff *skb; /* SKB for RX packets */
@@ -743,5 +760,10 @@ struct bcm_sysport_priv {
/* Ethtool */
u32 msg_enable;
+
+ struct bcm_sysport_stats64 stats64;
+
+ /* For atomic update generic 64bit value on 32bit Machine */
+ struct u64_stats_sync syncp;
};
#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 67fe3d826566..1216c1f1e052 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4284,15 +4284,17 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
return 0;
}
-int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
- __be16 proto, struct tc_to_netdev *tc)
+int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
{
- if (tc->type != TC_SETUP_MQPRIO)
- return -EINVAL;
+ struct tc_mqprio_qopt *mqprio = type_data;
+
+ if (type != TC_SETUP_MQPRIO)
+ return -EOPNOTSUPP;
- tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- return bnx2x_setup_tc(dev, tc->mqprio->num_tc);
+ return bnx2x_setup_tc(dev, mqprio->num_tc);
}
/* called with rtnl_lock */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c26688d2f326..a5265e1344f1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -486,8 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
/* setup_tc callback */
int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
-int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
- __be16 proto, struct tc_to_netdev *tc);
+int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
int bnx2x_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index a7ca45b251cb..4f0cb8e1ffc0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_tc.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e7c8539cbddf..aacec8bc19d5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -33,6 +33,7 @@
#include <linux/mii.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
#include <linux/rtc.h>
#include <linux/bpf.h>
#include <net/ip.h>
@@ -48,6 +49,8 @@
#include <linux/aer.h>
#include <linux/bitmap.h>
#include <linux/cpu_rmap.h>
+#include <linux/cpumask.h>
+#include <net/pkt_cls.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
@@ -56,6 +59,8 @@
#include "bnxt_ethtool.h"
#include "bnxt_dcb.h"
#include "bnxt_xdp.h"
+#include "bnxt_vfr.h"
+#include "bnxt_tc.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
@@ -101,6 +106,8 @@ enum board_idx {
BCM57416_NPAR,
BCM57452,
BCM57454,
+ BCM58802,
+ BCM58808,
NETXTREME_E_VF,
NETXTREME_C_VF,
};
@@ -109,39 +116,42 @@ enum board_idx {
static const struct {
char *name;
} board_info[] = {
- { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
- { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
- { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
- { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
- { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
- { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
- { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
- { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
- { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
- { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
- { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
- { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
- { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
- { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
- { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
- { "Broadcom NetXtreme-E Ethernet Virtual Function" },
- { "Broadcom NetXtreme-C Ethernet Virtual Function" },
+ [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
+ [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
+ [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
+ [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
+ [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
+ [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
+ [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
+ [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
+ [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
+ [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
+ [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
+ [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
+ [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
+ [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
+ [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
+ [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
+ [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
+ [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
+ [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
+ [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
+ [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
+ [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
+ [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
+ [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
+ [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+ [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+ [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
+ [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
+ { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
{ PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
@@ -172,8 +182,9 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
{ PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
- { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
+ { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
@@ -243,6 +254,16 @@ const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
};
+static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
+{
+ struct metadata_dst *md_dst = skb_metadata_dst(skb);
+
+ if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
+ return 0;
+
+ return md_dst->u.port_info.port_id;
+}
+
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
@@ -287,7 +308,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf->nr_frags = last_frag;
vlan_tag_flags = 0;
- cfa_action = 0;
+ cfa_action = bnxt_xmit_get_cfa_action(skb);
if (skb_vlan_tag_present(skb)) {
vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
skb_vlan_tag_get(skb);
@@ -322,7 +343,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_push1->tx_bd_hsize_lflags = 0;
tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
- tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+ tx_push1->tx_bd_cfa_action =
+ cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
end = pdata + length;
end = PTR_ALIGN(end, 8) - 1;
@@ -427,7 +449,8 @@ normal_tx:
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
- txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+ txbd1->tx_bd_cfa_action =
+ cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
for (i = 0; i < last_frag; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1032,7 +1055,10 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
bnxt_sched_reset(bp, rxr);
return;
}
-
+ /* Store cfa_code in tpa_info to use in tpa_end
+ * completion processing.
+ */
+ tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
prod_rx_buf->data = tpa_info->data;
prod_rx_buf->data_ptr = tpa_info->data_ptr;
@@ -1267,6 +1293,17 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
return skb;
}
+/* Given the cfa_code of a received packet determine which
+ * netdev (vf-rep or PF) the packet is destined to.
+ */
+static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
+{
+ struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
+
+ /* if vf-rep dev is NULL, the must belongs to the PF */
+ return dev ? dev : bp->dev;
+}
+
static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
struct bnxt_napi *bnapi,
u32 *raw_cons,
@@ -1360,7 +1397,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
}
- skb->protocol = eth_type_trans(skb, bp->dev);
+
+ skb->protocol =
+ eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
@@ -1387,6 +1426,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return skb;
}
+static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct sk_buff *skb)
+{
+ if (skb->dev != bp->dev) {
+ /* this packet belongs to a vf-rep */
+ bnxt_vf_rep_rx(bp, skb);
+ return;
+ }
+ skb_record_rx_queue(skb, bnapi->index);
+ napi_gro_receive(&bnapi->napi, skb);
+}
+
/* returns the following:
* 1 - 1 packet successfully received
* 0 - successful TPA_START, packet not completed yet
@@ -1403,7 +1454,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
struct rx_cmp *rxcmp;
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
- u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
+ u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
@@ -1445,8 +1496,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rc = -ENOMEM;
if (likely(skb)) {
- skb_record_rx_queue(skb, bnapi->index);
- napi_gro_receive(&bnapi->napi, skb);
+ bnxt_deliver_skb(bp, bnapi, skb);
rc = 1;
}
*event |= BNXT_RX_EVENT;
@@ -1535,7 +1585,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
- skb->protocol = eth_type_trans(skb, dev);
+ cfa_code = RX_CMP_CFA_CODE(rxcmp1);
+ skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
if ((rxcmp1->rx_cmp_flags2 &
cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
@@ -1560,8 +1611,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
}
- skb_record_rx_queue(skb, bnapi->index);
- napi_gro_receive(&bnapi->napi, skb);
+ bnxt_deliver_skb(bp, bnapi, skb);
rc = 1;
next_rx:
@@ -1802,6 +1852,13 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
&event);
if (likely(rc >= 0))
rx_pkts += rc;
+ /* Increment rx_pkts when rc is -ENOMEM to count towards
+ * the NAPI budget. Otherwise, we may potentially loop
+ * here forever if we consistently cannot allocate
+ * buffers.
+ */
+ else if (rc == -ENOMEM)
+ rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
} else if (unlikely((TX_CMP_TYPE(txcmp) ==
@@ -4420,9 +4477,33 @@ static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
mutex_lock(&bp->hwrm_cmd_lock);
rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!rc)
+ bp->tx_reserved_rings = *tx_rings;
return rc;
}
+static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10801)
+ return 0;
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+ req.num_tx_rings = cpu_to_le16(tx_rings);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+ return 0;
+}
+
static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
u32 buf_tmrs, u16 flags,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
@@ -4577,6 +4658,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 flags;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
@@ -4593,15 +4675,15 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
}
#endif
- if (BNXT_PF(bp)) {
- u16 flags = le16_to_cpu(resp->flags);
-
- if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
- FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED))
- bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
- if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)
- bp->flags |= BNXT_FLAG_MULTI_HOST;
+ flags = le16_to_cpu(resp->flags);
+ if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
+ FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
+ bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+ if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
+ bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
}
+ if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
+ bp->flags |= BNXT_FLAG_MULTI_HOST;
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
@@ -4610,6 +4692,13 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
bp->port_partition_type = resp->port_partition_type;
break;
}
+ if (bp->hwrm_spec_code < 0x10707 ||
+ resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
+ bp->br_mode = BRIDGE_MODE_VEB;
+ else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
+ bp->br_mode = BRIDGE_MODE_VEPA;
+ else
+ bp->br_mode = BRIDGE_MODE_UNDEF;
func_qcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -4647,7 +4736,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
- memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -4687,16 +4775,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
- mutex_unlock(&bp->hwrm_cmd_lock);
-
- if (is_valid_ether_addr(vf->mac_addr)) {
- /* overwrite netdev dev_adr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
- } else {
- eth_hw_addr_random(bp->dev);
- rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
- }
- return rc;
#endif
}
@@ -4911,6 +4989,26 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
}
}
+static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
+{
+ struct hwrm_func_cfg_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
+ if (br_mode == BRIDGE_MODE_VEB)
+ req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
+ else if (br_mode == BRIDGE_MODE_VEPA)
+ req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
+ else
+ return -EINVAL;
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
@@ -5046,6 +5144,15 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc);
goto err_out;
}
+ if (bp->tx_reserved_rings != bp->tx_nr_rings) {
+ int tx = bp->tx_nr_rings;
+
+ if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
+ tx < bp->tx_nr_rings) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ }
}
rc = bnxt_hwrm_ring_alloc(bp);
@@ -5452,8 +5559,15 @@ static void bnxt_free_irq(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
irq = &bp->irq_tbl[i];
- if (irq->requested)
+ if (irq->requested) {
+ if (irq->have_cpumask) {
+ irq_set_affinity_hint(irq->vector, NULL);
+ free_cpumask_var(irq->cpu_mask);
+ irq->have_cpumask = 0;
+ }
free_irq(irq->vector, bp->bnapi[i]);
+ }
+
irq->requested = 0;
}
}
@@ -5486,6 +5600,21 @@ static int bnxt_request_irq(struct bnxt *bp)
break;
irq->requested = 1;
+
+ if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
+ int numa_node = dev_to_node(&bp->pdev->dev);
+
+ irq->have_cpumask = 1;
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ irq->cpu_mask);
+ rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
+ if (rc) {
+ netdev_warn(bp->dev,
+ "Set affinity failed, IRQ = %d\n",
+ irq->vector);
+ break;
+ }
+ }
}
return rc;
}
@@ -5559,12 +5688,10 @@ void bnxt_tx_disable(struct bnxt *bp)
{
int i;
struct bnxt_tx_ring_info *txr;
- struct netdev_queue *txq;
if (bp->tx_ring) {
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txq = netdev_get_tx_queue(bp->dev, i);
txr->dev_state = BNXT_DEV_STATE_CLOSING;
}
}
@@ -5577,11 +5704,9 @@ void bnxt_tx_enable(struct bnxt *bp)
{
int i;
struct bnxt_tx_ring_info *txr;
- struct netdev_queue *txq;
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
- txq = netdev_get_tx_queue(bp->dev, i);
txr->dev_state = 0;
}
netif_tx_wake_all_queues(bp->dev);
@@ -5646,7 +5771,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_phy_qcaps_exit;
- if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
+ if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
@@ -5661,6 +5786,8 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
+ bp->port_count = resp->port_cnt;
+
hwrm_phy_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -5686,13 +5813,15 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
link_info->phy_link_status = resp->link;
- link_info->duplex = resp->duplex;
+ link_info->duplex = resp->duplex_cfg;
+ if (bp->hwrm_spec_code >= 0x10800)
+ link_info->duplex = resp->duplex_state;
link_info->pause = resp->pause;
link_info->auto_mode = resp->auto_mode;
link_info->auto_pause_setting = resp->auto_pause;
link_info->lp_pause = resp->link_partner_adv_pause;
link_info->force_pause_setting = resp->force_pause;
- link_info->duplex_setting = resp->duplex;
+ link_info->duplex_setting = resp->duplex_cfg;
if (link_info->phy_link_status == BNXT_LINK_LINK)
link_info->link_speed = le16_to_cpu(resp->link_speed);
else
@@ -6214,6 +6343,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* Poll link status and check for SFP+ module status */
bnxt_get_port_module_status(bp);
+ /* VF-reps may need to be re-opened after the PF is re-opened */
+ if (BNXT_PF(bp))
+ bnxt_vf_reps_open(bp);
return 0;
open_err:
@@ -6302,6 +6434,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (rc)
netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
}
+
+ /* Close the VF-reps before closing PF */
+ if (BNXT_PF(bp))
+ bnxt_vf_reps_close(bp);
#endif
/* Change device state to avoid TX queue wake up's */
bnxt_tx_disable(bp);
@@ -6813,7 +6949,8 @@ static void bnxt_timer(unsigned long data)
if (atomic_read(&bp->intr_sem) != 0)
goto bnxt_restart_timer;
- if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
+ if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
+ bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
}
@@ -6923,8 +7060,8 @@ static void bnxt_sp_task(struct work_struct *work)
}
/* Under rtnl_lock */
-int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
- int tx_xdp)
+int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
+ int tx_xdp)
{
int max_rx, max_tx, tx_sets = 1;
int tx_rings_needed;
@@ -6944,10 +7081,7 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
if (max_tx < tx_rings_needed)
return -ENOMEM;
- if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
- tx_rings_needed < (tx * tx_sets + tx_xdp))
- return -ENOMEM;
- return 0;
+ return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
}
static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -7136,8 +7270,8 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
sh = true;
- rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
- sh, tc, bp->tx_nr_rings_xdp);
+ rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+ sh, tc, bp->tx_nr_rings_xdp);
if (rc)
return rc;
@@ -7152,6 +7286,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
netdev_reset_tc(dev);
}
+ bp->tx_nr_rings += bp->tx_nr_rings_xdp;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
bp->num_stat_ctxs = bp->cp_nr_rings;
@@ -7162,15 +7297,33 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
return 0;
}
-static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
- __be16 proto, struct tc_to_netdev *ntc)
+static int bnxt_setup_flower(struct net_device *dev,
+ struct tc_cls_flower_offload *cls_flower)
{
- if (ntc->type != TC_SETUP_MQPRIO)
- return -EINVAL;
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
+ return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower);
+}
- ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return bnxt_setup_flower(dev, type_data);
+ case TC_SETUP_MQPRIO: {
+ struct tc_mqprio_qopt *mqprio = type_data;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
+ return bnxt_setup_mq_tc(dev, mqprio->num_tc);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
}
#ifdef CONFIG_RFS_ACCEL
@@ -7422,6 +7575,102 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
schedule_work(&bp->sp_task);
}
+static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
+ nlflags, filter_mask, NULL);
+}
+
+static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem, rc = 0;
+
+ if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ u16 mode;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ if (nla_len(attr) < sizeof(mode))
+ return -EINVAL;
+
+ mode = nla_get_u16(attr);
+ if (mode == bp->br_mode)
+ break;
+
+ rc = bnxt_hwrm_set_br_mode(bp, mode);
+ if (!rc)
+ bp->br_mode = mode;
+ break;
+ }
+ return rc;
+}
+
+static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
+ size_t len)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ int rc;
+
+ /* The PF and it's VF-reps only support the switchdev framework */
+ if (!BNXT_PF(bp))
+ return -EOPNOTSUPP;
+
+ rc = snprintf(buf, len, "p%d", bp->pf.port_id);
+
+ if (rc >= len)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
+{
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return -EOPNOTSUPP;
+
+ /* The PF and it's VF-reps only support the switchdev framework */
+ if (!BNXT_PF(bp))
+ return -EOPNOTSUPP;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ /* In SRIOV each PF-pool (PF + child VFs) serves as a
+ * switching domain, the PF's perm mac-addr can be used
+ * as the unique parent-id
+ */
+ attr->u.ppid.id_len = ETH_ALEN;
+ ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int bnxt_swdev_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ return bnxt_port_attr_get(netdev_priv(dev), attr);
+}
+
+static const struct switchdev_ops bnxt_switchdev_ops = {
+ .switchdev_port_attr_get = bnxt_swdev_port_attr_get
+};
+
static const struct net_device_ops bnxt_netdev_ops = {
.ndo_open = bnxt_open,
.ndo_start_xmit = bnxt_start_xmit,
@@ -7453,6 +7702,9 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
.ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
.ndo_xdp = bnxt_xdp,
+ .ndo_bridge_getlink = bnxt_bridge_getlink,
+ .ndo_bridge_setlink = bnxt_bridge_setlink,
+ .ndo_get_phys_port_name = bnxt_get_phys_port_name
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -7460,11 +7712,14 @@ static void bnxt_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
- if (BNXT_PF(bp))
+ if (BNXT_PF(bp)) {
bnxt_sriov_disable(bp);
+ bnxt_dl_unregister(bp);
+ }
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
+ bnxt_shutdown_tc(bp);
cancel_work_sync(&bp->sp_task);
bp->sp_event = 0;
@@ -7633,6 +7888,9 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues();
+ /* Reduce default rings to reduce memory usage on multi-port cards */
+ if (bp->port_count > 1)
+ dflt_rings = min_t(int, dflt_rings, 4);
rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc)
return rc;
@@ -7661,6 +7919,28 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp)
bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
}
+static int bnxt_init_mac_addr(struct bnxt *bp)
+{
+ int rc = 0;
+
+ if (BNXT_PF(bp)) {
+ memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ } else {
+#ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+ /* overwrite netdev dev_adr with admin VF MAC */
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ } else {
+ eth_hw_addr_random(bp->dev);
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ }
+#endif
+ }
+ return rc;
+}
+
static void bnxt_parse_log_pcie_link(struct bnxt *bp)
{
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -7710,6 +7990,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &bnxt_netdev_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
+ SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
@@ -7764,6 +8045,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
+ mutex_init(&bp->sriov_lock);
#endif
bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4_PLUS(bp))
@@ -7789,7 +8071,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -1;
goto init_err_pci_clean;
}
-
+ rc = bnxt_init_mac_addr(bp);
+ if (rc) {
+ dev_err(&pdev->dev, "Unable to initialize mac address.\n");
+ rc = -EADDRNOTAVAIL;
+ goto init_err_pci_clean;
+ }
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
@@ -7803,6 +8090,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_ethtool_init(bp);
bnxt_dcb_init(bp);
+ rc = bnxt_probe_phy(bp);
+ if (rc)
+ goto init_err_pci_clean;
+
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
@@ -7837,10 +8128,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
bp->flags |= BNXT_FLAG_STRIP_VLAN;
- rc = bnxt_probe_phy(bp);
- if (rc)
- goto init_err_pci_clean;
-
rc = bnxt_init_int_mode(bp);
if (rc)
goto init_err_pci_clean;
@@ -7851,9 +8138,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
device_set_wakeup_capable(&pdev->dev, false);
+ if (BNXT_PF(bp))
+ bnxt_init_tc(bp);
+
rc = register_netdev(dev);
if (rc)
- goto init_err_clr_int;
+ goto init_err_cleanup_tc;
+
+ if (BNXT_PF(bp))
+ bnxt_dl_register(bp);
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name,
@@ -7863,7 +8156,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-init_err_clr_int:
+init_err_cleanup_tc:
+ bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
init_err_pci_clean:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f34691f85602..7b888d4b2b55 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -12,13 +12,17 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.7.0"
+#define DRV_MODULE_VERSION "1.8.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 7
+#define DRV_VER_MIN 8
#define DRV_VER_UPD 0
#include <linux/interrupt.h>
+#include <linux/rhashtable.h>
+#include <net/devlink.h>
+#include <net/dst_metadata.h>
+#include <net/switchdev.h>
struct tx_bd {
__le32 tx_bd_len_flags_type;
@@ -242,6 +246,10 @@ struct rx_cmp_ext {
((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \
RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
+#define RX_CMP_CFA_CODE(rxcmpl1) \
+ ((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \
+ RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
+
struct rx_agg_cmp {
__le32 rx_agg_cmp_len_flags_type;
#define RX_AGG_CMP_TYPE (0x3f << 0)
@@ -311,6 +319,10 @@ struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_hdr_info;
};
+#define TPA_START_CFA_CODE(rx_tpa_start) \
+ ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
+ RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
+
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
@@ -618,6 +630,8 @@ struct bnxt_tpa_info {
#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
((hdr_info) & 0x1ff)
+
+ u16 cfa_code; /* cfa_code in TPA start compl */
};
struct bnxt_rx_ring_info {
@@ -688,8 +702,10 @@ struct bnxt_napi {
struct bnxt_irq {
irq_handler_t handler;
unsigned int vector;
- u8 requested;
+ u8 requested:1;
+ u8 have_cpumask:1;
char name[IFNAMSIZ + 2];
+ cpumask_var_t cpu_mask;
};
#define HWRM_RING_ALLOC_TX 0x1
@@ -825,8 +841,8 @@ struct bnxt_link_info {
u8 loop_back;
u8 link_up;
u8 duplex;
-#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF
-#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL
+#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
+#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
u8 pause;
#define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX
#define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX
@@ -928,6 +944,45 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
+struct bnxt_tc_info {
+ bool enabled;
+
+ /* hash table to store TC offloaded flows */
+ struct rhashtable flow_table;
+ struct rhashtable_params flow_ht_params;
+
+ /* hash table to store L2 keys of TC flows */
+ struct rhashtable l2_table;
+ struct rhashtable_params l2_ht_params;
+
+ /* lock to atomically add/del an l2 node when a flow is
+ * added or deleted.
+ */
+ struct mutex lock;
+
+ /* Stat counter mask (width) */
+ u64 bytes_mask;
+ u64 packets_mask;
+};
+
+struct bnxt_vf_rep_stats {
+ u64 packets;
+ u64 bytes;
+ u64 dropped;
+};
+
+struct bnxt_vf_rep {
+ struct bnxt *bp;
+ struct net_device *dev;
+ struct metadata_dst *dst;
+ u16 vf_idx;
+ u16 tx_cfa_action;
+ u16 rx_cfa_code;
+
+ struct bnxt_vf_rep_stats rx_stats;
+ struct bnxt_vf_rep_stats tx_stats;
+};
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -957,6 +1012,9 @@ struct bnxt {
#define CHIP_NUM_5745X 0xd730
+#define CHIP_NUM_58802 0xd802
+#define CHIP_NUM_58808 0xd808
+
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -988,6 +1046,10 @@ struct bnxt {
#define BNXT_CHIP_NUM_57X1X(chip_num) \
(BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
+#define BNXT_CHIP_NUM_588XX(chip_num) \
+ ((chip_num) == CHIP_NUM_58802 || \
+ (chip_num) == CHIP_NUM_58808)
+
struct net_device *dev;
struct pci_dev *pdev;
@@ -1027,6 +1089,7 @@ struct bnxt {
#define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_SHORT_CMD 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
+ #define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
@@ -1045,6 +1108,7 @@ struct bnxt {
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
BNXT_CHIP_NUM_5745X((bp)->chip_num) || \
+ BNXT_CHIP_NUM_588XX((bp)->chip_num) || \
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
@@ -1086,6 +1150,7 @@ struct bnxt {
int tx_nr_rings;
int tx_nr_rings_per_tc;
int tx_nr_rings_xdp;
+ int tx_reserved_rings;
int tx_wake_thresh;
int tx_push_thresh;
@@ -1164,6 +1229,8 @@ struct bnxt {
u8 nge_port_cnt;
__le16 nge_fw_dst_port_id;
u8 port_partition_type;
+ u8 port_count;
+ u16 br_mode;
u16 rx_coal_ticks;
u16 rx_coal_ticks_irq;
@@ -1206,6 +1273,12 @@ struct bnxt {
wait_queue_head_t sriov_cfg_wait;
bool sriov_cfg;
#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
+
+ /* lock to protect VF-rep creation/cleanup via
+ * multiple paths such as ->sriov_configure() and
+ * devlink ->eswitch_mode_set()
+ */
+ struct mutex sriov_lock;
#endif
#define BNXT_NTP_FLTR_MAX_FLTR 4096
@@ -1232,6 +1305,13 @@ struct bnxt {
struct bnxt_led_info leds[BNXT_MAX_LED];
struct bpf_prog *xdp_prog;
+
+ /* devlink interface and vf-rep structs */
+ struct devlink *dl;
+ enum devlink_eswitch_mode eswitch_mode;
+ struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
+ u16 *cfa_code_map; /* cfa_code -> vf_idx map */
+ struct bnxt_tc_info tc_info;
};
#define BNXT_RX_STATS_OFFSET(counter) \
@@ -1301,9 +1381,10 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
-int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
- int tx_xdp);
+int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
+ int tx_xdp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp);
+int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 5c6dd0ce209f..aa1f3a2c7a78 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -93,6 +93,12 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cos2bw.tsa =
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
cos2bw.bw_weight = ets->tc_tx_bw[i];
+ /* older firmware requires min_bw to be set to the
+ * same weight value in percent.
+ */
+ cos2bw.min_bw =
+ cpu_to_le32((ets->tc_tx_bw[i] * 100) |
+ BW_VALUE_UNIT_PERCENT1_100);
}
memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
if (i == 0) {
@@ -549,13 +555,18 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
struct bnxt *bp = netdev_priv(dev);
- /* only support IEEE */
- if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
+ /* All firmware DCBX settings are set in NVRAM */
+ if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
return 1;
if (mode & DCB_CAP_DCBX_HOST) {
if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
return 1;
+
+ /* only support IEEE */
+ if ((mode & DCB_CAP_DCBX_VER_CEE) ||
+ !(mode & DCB_CAP_DCBX_VER_IEEE))
+ return 1;
}
if (mode == bp->dcbx_cap)
@@ -584,7 +595,7 @@ void bnxt_dcb_init(struct bnxt *bp)
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
- else
+ else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT)
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
bp->dev->dcbnl_ops = &dcbnl_ops;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index ecd0a5e46a49..d2e0af960bf5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -26,6 +26,7 @@ struct bnxt_cos2bw_cfg {
u8 queue_id;
__le32 min_bw;
__le32 max_bw;
+#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
u8 tsa;
u8 pri_lvl;
u8 bw_weight;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index be6acadcb202..8eff05a3e0e4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -86,9 +86,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
u32 stats_ticks = coal->stats_block_coalesce_usecs;
- stats_ticks = clamp_t(u32, stats_ticks,
- BNXT_MIN_STATS_COAL_TICKS,
- BNXT_MAX_STATS_COAL_TICKS);
+ /* Allow 0, which means disable. */
+ if (stats_ticks)
+ stats_ticks = clamp_t(u32, stats_ticks,
+ BNXT_MIN_STATS_COAL_TICKS,
+ BNXT_MAX_STATS_COAL_TICKS);
stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
bp->stats_coal_ticks = stats_ticks;
update_stats = true;
@@ -198,19 +200,23 @@ static const struct {
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
+static int bnxt_get_num_stats(struct bnxt *bp)
+{
+ int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+
+ if (bp->flags & BNXT_FLAG_PORT_STATS)
+ num_stats += BNXT_NUM_PORT_STATS;
+
+ return num_stats;
+}
+
static int bnxt_get_sset_count(struct net_device *dev, int sset)
{
struct bnxt *bp = netdev_priv(dev);
switch (sset) {
- case ETH_SS_STATS: {
- int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
-
- if (bp->flags & BNXT_FLAG_PORT_STATS)
- num_stats += BNXT_NUM_PORT_STATS;
-
- return num_stats;
- }
+ case ETH_SS_STATS:
+ return bnxt_get_num_stats(bp);
case ETH_SS_TEST:
if (!bp->num_tests)
return -EOPNOTSUPP;
@@ -225,11 +231,8 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
- u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
- memset(buf, 0, buf_size);
-
if (!bp->bnapi)
return;
@@ -432,8 +435,7 @@ static int bnxt_set_channels(struct net_device *dev,
}
tx_xdp = req_rx_rings;
}
- rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, sh, tcs,
- tx_xdp);
+ rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp);
if (rc) {
netdev_warn(dev, "Unable to allocate the requested rings\n");
return rc;
@@ -520,7 +522,7 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
struct flow_keys *fkeys;
int i, rc = -EINVAL;
- if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
+ if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR)
return rc;
for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
@@ -835,7 +837,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
strlcpy(info->fw_version, bp->fw_ver_str,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
- info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+ info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
info->eedump_len = 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 7dc71bb95837..cb04cc76e8ad 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -11,14 +11,14 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
-/* HSI and HWRM Specification 1.7.6 */
+/* HSI and HWRM Specification 1.8.1 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 7
-#define HWRM_VERSION_UPDATE 6
+#define HWRM_VERSION_MINOR 8
+#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 2 /* non-zero means beta version */
+#define HWRM_VERSION_RSVD 4 /* non-zero means beta version */
-#define HWRM_VERSION_STR "1.7.6.2"
+#define HWRM_VERSION_STR "1.8.1.4"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -813,7 +813,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
- #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -835,9 +835,8 @@ struct hwrm_func_qcfg_output {
u8 port_pf_cnt;
#define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
__le16 dflt_vnic_id;
- u8 host_cnt;
- #define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL 0x0UL
u8 unused_0;
+ u8 unused_1;
__le32 min_bw;
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
@@ -874,12 +873,56 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
#define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
- u8 unused_1;
+ u8 unused_2;
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_func_vlan_cfg */
+/* Input (48 bytes) */
+struct hwrm_func_vlan_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0;
+ u8 unused_1;
+ __le32 enables;
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
+ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
+ __le16 stag_vid;
+ u8 stag_pcp;
+ u8 unused_2;
+ __be16 stag_tpid;
+ __le16 ctag_vid;
+ u8 ctag_pcp;
+ u8 unused_3;
+ __be16 ctag_tpid;
+ __le32 rsvd1;
+ __le32 rsvd2;
+ __le32 unused_4;
+};
+
+/* Output (16 bytes) */
+struct hwrm_func_vlan_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
u8 unused_2;
+ u8 unused_3;
u8 valid;
};
@@ -902,6 +945,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
#define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1456,9 +1501,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
- u8 duplex;
- #define PORT_PHY_QCFG_RESP_DUPLEX_HALF 0x0UL
- #define PORT_PHY_QCFG_RESP_DUPLEX_FULL 0x1UL
+ u8 duplex_cfg;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
u8 pause;
#define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
@@ -1573,6 +1618,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -1651,14 +1699,16 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ u8 duplex_state;
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
u8 unused_1;
- u8 unused_2;
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
- __le32 unused_3;
+ __le32 unused_2;
+ u8 unused_3;
u8 unused_4;
u8 unused_5;
- u8 unused_6;
u8 valid;
};
@@ -1744,6 +1794,51 @@ struct hwrm_port_mac_cfg_output {
u8 valid;
};
+/* hwrm_port_mac_ptp_qcfg */
+/* Input (24 bytes) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (80 bytes) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ u8 unused_0;
+ __le16 unused_1;
+ __le32 rx_ts_reg_off_lower;
+ __le32 rx_ts_reg_off_upper;
+ __le32 rx_ts_reg_off_seq_id;
+ __le32 rx_ts_reg_off_src_id_0;
+ __le32 rx_ts_reg_off_src_id_1;
+ __le32 rx_ts_reg_off_src_id_2;
+ __le32 rx_ts_reg_off_domain_id;
+ __le32 rx_ts_reg_off_fifo;
+ __le32 rx_ts_reg_off_fifo_adv;
+ __le32 rx_ts_reg_off_granularity;
+ __le32 tx_ts_reg_off_lower;
+ __le32 tx_ts_reg_off_upper;
+ __le32 tx_ts_reg_off_seq_id;
+ __le32 tx_ts_reg_off_fifo;
+ __le32 tx_ts_reg_off_granularity;
+ __le32 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 unused_5;
+ u8 valid;
+};
+
/* hwrm_port_qstats */
/* Input (40 bytes) */
struct hwrm_port_qstats_input {
@@ -1874,11 +1969,16 @@ struct hwrm_port_phy_qcaps_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 eee_supported;
- #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL
- #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1
- u8 unused_0;
+ u8 flags;
+ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
+ u8 port_cnt;
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
__le16 supported_speeds_force_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
@@ -3152,6 +3252,95 @@ struct hwrm_queue_cos2bw_cfg_output {
u8 valid;
};
+/* hwrm_queue_dscp_qcaps */
+/* Input (24 bytes) */
+struct hwrm_queue_dscp_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 port_id;
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_dscp_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_dscp_bits;
+ u8 unused_0;
+ __le16 max_entries;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_qcfg */
+/* Input (32 bytes) */
+struct hwrm_queue_dscp2pri_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 dest_data_addr;
+ u8 port_id;
+ u8 unused_0;
+ __le16 dest_data_buffer_size;
+ __le32 unused_1;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_dscp2pri_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 entry_cnt;
+ u8 default_pri;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_queue_dscp2pri_cfg */
+/* Input (40 bytes) */
+struct hwrm_queue_dscp2pri_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 src_data_addr;
+ __le32 flags;
+ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+ __le32 enables;
+ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+ u8 port_id;
+ u8 default_pri;
+ __le16 entry_cnt;
+ __le32 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_queue_dscp2pri_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* hwrm_vnic_alloc */
/* Input (24 bytes) */
struct hwrm_vnic_alloc_input {
@@ -4038,7 +4227,7 @@ struct hwrm_cfa_encap_record_alloc_input {
#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
u8 unused_0;
__le16 unused_1;
- __le32 encap_data[16];
+ __le32 encap_data[20];
};
/* Output (16 bytes) */
@@ -4120,8 +4309,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
u8 ip_protocol;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x6UL
- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x11UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
@@ -4224,6 +4413,216 @@ struct hwrm_cfa_ntuple_filter_cfg_output {
u8 valid;
};
+/* hwrm_cfa_flow_alloc */
+/* Input (128 bytes) */
+struct hwrm_cfa_flow_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flags;
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+ __le16 src_fid;
+ __le32 tunnel_handle;
+ __le16 action_flags;
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ __le16 dst_fid;
+ __be16 l2_rewrite_vlan_tpid;
+ __be16 l2_rewrite_vlan_tci;
+ __le16 act_meter_id;
+ __le16 ref_flow_handle;
+ __be16 ethertype;
+ __be16 outer_vlan_tci;
+ __be16 dmac[3];
+ __be16 inner_vlan_tci;
+ __be16 smac[3];
+ u8 ip_dst_mask_len;
+ u8 ip_src_mask_len;
+ __be32 ip_dst[4];
+ __be32 ip_src[4];
+ __be16 l4_src_port;
+ __be16 l4_src_port_mask;
+ __be16 l4_dst_port;
+ __be16 l4_dst_port_mask;
+ __be32 nat_ip_address[4];
+ __be16 l2_rewrite_dmac[3];
+ __be16 nat_port;
+ __be16 l2_rewrite_smac[3];
+ u8 ip_proto;
+ u8 unused_0;
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_flow_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 flow_handle;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_flow_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 flow_handle;
+ __le16 unused_0[3];
+};
+
+/* Output (32 bytes) */
+struct hwrm_cfa_flow_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet;
+ __le64 byte;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_flow_stats */
+/* Input (40 bytes) */
+struct hwrm_cfa_flow_stats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 num_flows;
+ __le16 flow_handle_0;
+ __le16 flow_handle_1;
+ __le16 flow_handle_2;
+ __le16 flow_handle_3;
+ __le16 flow_handle_4;
+ __le16 flow_handle_5;
+ __le16 flow_handle_6;
+ __le16 flow_handle_7;
+ __le16 flow_handle_8;
+ __le16 flow_handle_9;
+ __le16 unused_0;
+};
+
+/* Output (176 bytes) */
+struct hwrm_cfa_flow_stats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 packet_0;
+ __le64 packet_1;
+ __le64 packet_2;
+ __le64 packet_3;
+ __le64 packet_4;
+ __le64 packet_5;
+ __le64 packet_6;
+ __le64 packet_7;
+ __le64 packet_8;
+ __le64 packet_9;
+ __le64 byte_0;
+ __le64 byte_1;
+ __le64 byte_2;
+ __le64 byte_3;
+ __le64 byte_4;
+ __le64 byte_5;
+ __le64 byte_6;
+ __le64 byte_7;
+ __le64 byte_8;
+ __le64 byte_9;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_alloc */
+/* Input (32 bytes) */
+struct hwrm_cfa_vfr_alloc_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 vf_id;
+ __le16 reserved;
+ __le32 unused_0;
+ char vfr_name[32];
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_vfr_alloc_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 rx_cfa_code;
+ __le16 tx_cfa_action;
+ u8 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 valid;
+};
+
+/* hwrm_cfa_vfr_free */
+/* Input (24 bytes) */
+struct hwrm_cfa_vfr_free_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ char vfr_name[32];
+};
+
+/* Output (16 bytes) */
+struct hwrm_cfa_vfr_free_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* hwrm_tunnel_dst_port_query */
/* Input (24 bytes) */
struct hwrm_tunnel_dst_port_query_input {
@@ -4448,12 +4847,13 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
+ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
u8 selfrst_status;
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- __le16 unused_0[3];
+ u8 host_idx;
+ u8 unused_0[5];
};
/* Output (16 bytes) */
@@ -4487,7 +4887,7 @@ struct hwrm_fw_qstatus_input {
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
u8 unused_0[7];
};
@@ -4572,6 +4972,16 @@ struct hwrm_fw_set_structured_data_output {
u8 valid;
};
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_fw_set_structured_data_cmd_err {
+ u8 code;
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
+ #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ u8 unused_0[7];
+};
+
/* hwrm_fw_get_structured_data */
/* Input (32 bytes) */
struct hwrm_fw_get_structured_data_input {
@@ -4611,6 +5021,14 @@ struct hwrm_fw_get_structured_data_output {
u8 valid;
};
+/* Command specific Error Codes (8 bytes) */
+struct hwrm_fw_get_structured_data_cmd_err {
+ u8 code;
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
+ u8 unused_0[7];
+};
+
/* hwrm_exec_fwd_resp */
/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
@@ -5280,11 +5698,15 @@ struct hwrm_selftest_qlist_output {
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_EYE_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_EYE_TEST 0x20UL
u8 offline_tests;
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_EYE_TEST 0x10UL
+ #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_EYE_TEST 0x20UL
u8 unused_0;
__le16 test_timeout;
u8 unused_1;
@@ -5312,6 +5734,8 @@ struct hwrm_selftest_exec_input {
#define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_REQ_FLAGS_PCIE_EYE_TEST 0x10UL
+ #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_EYE_TEST 0x20UL
u8 unused_0[7];
};
@@ -5326,11 +5750,15 @@ struct hwrm_selftest_exec_output {
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_EYE_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_EYE_TEST 0x20UL
u8 test_success;
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL
#define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_EYE_TEST 0x10UL
+ #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_EYE_TEST 0x20UL
__le16 unused_0[3];
};
@@ -5411,7 +5839,7 @@ struct cmd_nums {
#define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
#define HWRM_PORT_PHY_QCFG (0x27UL)
#define HWRM_PORT_MAC_QCFG (0x28UL)
- #define RESERVED7 (0x29UL)
+ #define HWRM_PORT_MAC_PTP_QCFG (0x29UL)
#define HWRM_PORT_PHY_QCAPS (0x2aUL)
#define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
#define HWRM_PORT_PHY_I2C_READ (0x2cUL)
@@ -5421,14 +5849,17 @@ struct cmd_nums {
#define HWRM_QUEUE_QPORTCFG (0x30UL)
#define HWRM_QUEUE_QCFG (0x31UL)
#define HWRM_QUEUE_CFG (0x32UL)
- #define RESERVED2 (0x33UL)
- #define RESERVED3 (0x34UL)
+ #define HWRM_FUNC_VLAN_CFG (0x33UL)
+ #define HWRM_FUNC_VLAN_QCFG (0x34UL)
#define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
#define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
#define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
#define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
#define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
#define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
+ #define HWRM_QUEUE_DSCP_QCAPS (0x3bUL)
+ #define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL)
+ #define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL)
#define HWRM_VNIC_ALLOC (0x40UL)
#define HWRM_VNIC_FREE (0x41UL)
#define HWRM_VNIC_CFG (0x42UL)
@@ -5455,7 +5886,7 @@ struct cmd_nums {
#define HWRM_CFA_L2_FILTER_FREE (0x91UL)
#define HWRM_CFA_L2_FILTER_CFG (0x92UL)
#define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
- #define RESERVED4 (0x94UL)
+ #define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL)
#define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
#define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
#define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
@@ -5494,6 +5925,8 @@ struct cmd_nums {
#define HWRM_CFA_METER_PROFILE_CFG (0xf7UL)
#define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL)
#define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL)
+ #define HWRM_CFA_VFR_ALLOC (0xfdUL)
+ #define HWRM_CFA_VFR_FREE (0xfeUL)
#define HWRM_CFA_VF_PAIR_ALLOC (0x100UL)
#define HWRM_CFA_VF_PAIR_FREE (0x101UL)
#define HWRM_CFA_VF_PAIR_INFO (0x102UL)
@@ -5502,14 +5935,20 @@ struct cmd_nums {
#define HWRM_CFA_FLOW_FLUSH (0x105UL)
#define HWRM_CFA_FLOW_STATS (0x106UL)
#define HWRM_CFA_FLOW_INFO (0x107UL)
+ #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL)
+ #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL)
+ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL)
#define HWRM_SELFTEST_QLIST (0x200UL)
#define HWRM_SELFTEST_EXEC (0x201UL)
#define HWRM_SELFTEST_IRQ (0x202UL)
+ #define HWRM_SELFTEST_RETREIVE_EYE_DATA (0x203UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
+ #define HWRM_DBG_ERASE_NVM (0xff15UL)
+ #define HWRM_DBG_CFG (0xff16UL)
#define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
#define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
#define HWRM_NVM_FLUSH (0xfff0UL)
@@ -5720,6 +6159,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
+ #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
__le16 len;
u8 version;
u8 count;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index b8e7248294d9..d37925a8a65b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -18,6 +18,7 @@
#include "bnxt.h"
#include "bnxt_ulp.h"
#include "bnxt_sriov.h"
+#include "bnxt_vfr.h"
#include "bnxt_ethtool.h"
#ifdef CONFIG_BNXT_SRIOV
@@ -587,6 +588,10 @@ void bnxt_sriov_disable(struct bnxt *bp)
if (!num_vfs)
return;
+ /* synchronize VF and VF-rep create and destroy */
+ mutex_lock(&bp->sriov_lock);
+ bnxt_vf_reps_destroy(bp);
+
if (pci_vfs_assigned(bp->pdev)) {
bnxt_hwrm_fwd_async_event_cmpl(
bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
@@ -597,6 +602,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Free the HW resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
}
+ mutex_unlock(&bp->sriov_lock);
bnxt_free_vf_resources(bp);
@@ -794,8 +800,10 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
PORT_PHY_QCFG_RESP_LINK_LINK;
phy_qcfg_resp.link_speed = cpu_to_le16(
PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
- phy_qcfg_resp.duplex =
- PORT_PHY_QCFG_RESP_DUPLEX_FULL;
+ phy_qcfg_resp.duplex_cfg =
+ PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
+ phy_qcfg_resp.duplex_state =
+ PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
phy_qcfg_resp.pause =
(PORT_PHY_QCFG_RESP_PAUSE_TX |
PORT_PHY_QCFG_RESP_PAUSE_RX);
@@ -804,7 +812,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
/* force link down */
phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
phy_qcfg_resp.link_speed = 0;
- phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
+ phy_qcfg_resp.duplex_state =
+ PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
phy_qcfg_resp.pause = 0;
}
rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
new file mode 100644
index 000000000000..ccd699fb2d70
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -0,0 +1,834 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/if_vlan.h>
+#include <net/flow_dissector.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_skbedit.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_sriov.h"
+#include "bnxt_tc.h"
+#include "bnxt_vfr.h"
+
+#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
+
+#define BNXT_FID_INVALID 0xffff
+#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
+
+/* Return the dst fid of the func for flow forwarding
+ * For PFs: src_fid is the fid of the PF
+ * For VF-reps: src_fid the fid of the VF
+ */
+static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
+{
+ struct bnxt *bp;
+
+ /* check if dev belongs to the same switch */
+ if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
+ netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
+ dev->ifindex);
+ return BNXT_FID_INVALID;
+ }
+
+ /* Is dev a VF-rep? */
+ if (dev != pf_bp->dev)
+ return bnxt_vf_rep_get_fid(dev);
+
+ bp = netdev_priv(dev);
+ return bp->pf.fw_fid;
+}
+
+static int bnxt_tc_parse_redir(struct bnxt *bp,
+ struct bnxt_tc_actions *actions,
+ const struct tc_action *tc_act)
+{
+ int ifindex = tcf_mirred_ifindex(tc_act);
+ struct net_device *dev;
+ u16 dst_fid;
+
+ dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
+ if (!dev) {
+ netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
+ return -EINVAL;
+ }
+
+ /* find the FID from dev */
+ dst_fid = bnxt_flow_get_dst_fid(bp, dev);
+ if (dst_fid == BNXT_FID_INVALID) {
+ netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
+ return -EINVAL;
+ }
+
+ actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
+ actions->dst_fid = dst_fid;
+ actions->dst_dev = dev;
+ return 0;
+}
+
+static void bnxt_tc_parse_vlan(struct bnxt *bp,
+ struct bnxt_tc_actions *actions,
+ const struct tc_action *tc_act)
+{
+ if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
+ actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
+ } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
+ actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
+ actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
+ actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
+ }
+}
+
+static int bnxt_tc_parse_actions(struct bnxt *bp,
+ struct bnxt_tc_actions *actions,
+ struct tcf_exts *tc_exts)
+{
+ const struct tc_action *tc_act;
+ LIST_HEAD(tc_actions);
+ int rc;
+
+ if (!tcf_exts_has_actions(tc_exts)) {
+ netdev_info(bp->dev, "no actions");
+ return -EINVAL;
+ }
+
+ tcf_exts_to_list(tc_exts, &tc_actions);
+ list_for_each_entry(tc_act, &tc_actions, list) {
+ /* Drop action */
+ if (is_tcf_gact_shot(tc_act)) {
+ actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
+ return 0; /* don't bother with other actions */
+ }
+
+ /* Redirect action */
+ if (is_tcf_mirred_egress_redirect(tc_act)) {
+ rc = bnxt_tc_parse_redir(bp, actions, tc_act);
+ if (rc)
+ return rc;
+ continue;
+ }
+
+ /* Push/pop VLAN */
+ if (is_tcf_vlan(tc_act)) {
+ bnxt_tc_parse_vlan(bp, actions, tc_act);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+#define GET_KEY(flow_cmd, key_type) \
+ skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
+ (flow_cmd)->key)
+#define GET_MASK(flow_cmd, key_type) \
+ skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
+ (flow_cmd)->mask)
+
+static int bnxt_tc_parse_flow(struct bnxt *bp,
+ struct tc_cls_flower_offload *tc_flow_cmd,
+ struct bnxt_tc_flow *flow)
+{
+ struct flow_dissector *dissector = tc_flow_cmd->dissector;
+ u16 addr_type = 0;
+
+ /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
+ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
+ (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
+ netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
+
+ addr_type = key->addr_type;
+ }
+
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
+ struct flow_dissector_key_basic *mask =
+ GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
+
+ flow->l2_key.ether_type = key->n_proto;
+ flow->l2_mask.ether_type = mask->n_proto;
+
+ if (key->n_proto == htons(ETH_P_IP) ||
+ key->n_proto == htons(ETH_P_IPV6)) {
+ flow->l4_key.ip_proto = key->ip_proto;
+ flow->l4_mask.ip_proto = mask->ip_proto;
+ }
+ }
+
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_dissector_key_eth_addrs *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
+ struct flow_dissector_key_eth_addrs *mask =
+ GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
+
+ flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
+ ether_addr_copy(flow->l2_key.dmac, key->dst);
+ ether_addr_copy(flow->l2_mask.dmac, mask->dst);
+ ether_addr_copy(flow->l2_key.smac, key->src);
+ ether_addr_copy(flow->l2_mask.smac, mask->src);
+ }
+
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
+ struct flow_dissector_key_vlan *mask =
+ GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
+
+ flow->l2_key.inner_vlan_tci =
+ cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
+ flow->l2_mask.inner_vlan_tci =
+ cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
+ flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
+ flow->l2_mask.inner_vlan_tpid = htons(0xffff);
+ flow->l2_key.num_vlans = 1;
+ }
+
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_dissector_key_ipv4_addrs *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ struct flow_dissector_key_ipv4_addrs *mask =
+ GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+
+ flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
+ flow->l3_key.ipv4.daddr.s_addr = key->dst;
+ flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
+ flow->l3_key.ipv4.saddr.s_addr = key->src;
+ flow->l3_mask.ipv4.saddr.s_addr = mask->src;
+ } else if (dissector_uses_key(dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_dissector_key_ipv6_addrs *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ struct flow_dissector_key_ipv6_addrs *mask =
+ GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+
+ flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
+ flow->l3_key.ipv6.daddr = key->dst;
+ flow->l3_mask.ipv6.daddr = mask->dst;
+ flow->l3_key.ipv6.saddr = key->src;
+ flow->l3_mask.ipv6.saddr = mask->src;
+ }
+
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_dissector_key_ports *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
+ struct flow_dissector_key_ports *mask =
+ GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
+
+ flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
+ flow->l4_key.ports.dport = key->dst;
+ flow->l4_mask.ports.dport = mask->dst;
+ flow->l4_key.ports.sport = key->src;
+ flow->l4_mask.ports.sport = mask->src;
+ }
+
+ if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_dissector_key_icmp *key =
+ GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
+ struct flow_dissector_key_icmp *mask =
+ GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
+
+ flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
+ flow->l4_key.icmp.type = key->type;
+ flow->l4_key.icmp.code = key->code;
+ flow->l4_mask.icmp.type = mask->type;
+ flow->l4_mask.icmp.code = mask->code;
+ }
+
+ return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
+}
+
+static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
+{
+ struct hwrm_cfa_flow_free_input req = { 0 };
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
+ req.flow_handle = flow_handle;
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
+ __func__, flow_handle, rc);
+ return rc;
+}
+
+static int ipv6_mask_len(struct in6_addr *mask)
+{
+ int mask_len = 0, i;
+
+ for (i = 0; i < 4; i++)
+ mask_len += inet_mask_len(mask->s6_addr32[i]);
+
+ return mask_len;
+}
+
+static bool is_wildcard(void *mask, int len)
+{
+ const u8 *p = mask;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (p[i] != 0)
+ return false;
+ }
+ return true;
+}
+
+static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
+ __le16 ref_flow_handle, __le16 *flow_handle)
+{
+ struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_tc_actions *actions = &flow->actions;
+ struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
+ struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
+ struct hwrm_cfa_flow_alloc_input req = { 0 };
+ u16 flow_flags = 0, action_flags = 0;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
+
+ req.src_fid = cpu_to_le16(flow->src_fid);
+ req.ref_flow_handle = ref_flow_handle;
+ req.ethertype = flow->l2_key.ether_type;
+ req.ip_proto = flow->l4_key.ip_proto;
+
+ if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
+ memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
+ memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
+ }
+
+ if (flow->l2_key.num_vlans > 0) {
+ flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
+ /* FW expects the inner_vlan_tci value to be set
+ * in outer_vlan_tci when num_vlans is 1 (which is
+ * always the case in TC.)
+ */
+ req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
+ }
+
+ /* If all IP and L4 fields are wildcarded then this is an L2 flow */
+ if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
+ is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
+ flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
+ } else {
+ flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
+ CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
+ CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
+
+ if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
+ req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
+ req.ip_dst_mask_len =
+ inet_mask_len(l3_mask->ipv4.daddr.s_addr);
+ req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
+ req.ip_src_mask_len =
+ inet_mask_len(l3_mask->ipv4.saddr.s_addr);
+ } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
+ memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
+ sizeof(req.ip_dst));
+ req.ip_dst_mask_len =
+ ipv6_mask_len(&l3_mask->ipv6.daddr);
+ memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
+ sizeof(req.ip_src));
+ req.ip_src_mask_len =
+ ipv6_mask_len(&l3_mask->ipv6.saddr);
+ }
+ }
+
+ if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
+ req.l4_src_port = flow->l4_key.ports.sport;
+ req.l4_src_port_mask = flow->l4_mask.ports.sport;
+ req.l4_dst_port = flow->l4_key.ports.dport;
+ req.l4_dst_port_mask = flow->l4_mask.ports.dport;
+ } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
+ /* l4 ports serve as type/code when ip_proto is ICMP */
+ req.l4_src_port = htons(flow->l4_key.icmp.type);
+ req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
+ req.l4_dst_port = htons(flow->l4_key.icmp.code);
+ req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
+ }
+ req.flags = cpu_to_le16(flow_flags);
+
+ if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
+ action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
+ } else {
+ if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
+ action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
+ req.dst_fid = cpu_to_le16(actions->dst_fid);
+ }
+ if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
+ req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
+ req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
+ memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
+ memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
+ }
+ if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
+ action_flags |=
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
+ /* Rewrite config with tpid = 0 implies vlan pop */
+ req.l2_rewrite_vlan_tpid = 0;
+ memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
+ memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
+ }
+ }
+ req.action_flags = cpu_to_le16(action_flags);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ *flow_handle = resp->flow_handle;
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ return rc;
+}
+
+/* Add val to accum while handling a possible wraparound
+ * of val. Eventhough val is of type u64, its actual width
+ * is denoted by mask and will wrap-around beyond that width.
+ */
+static void accumulate_val(u64 *accum, u64 val, u64 mask)
+{
+#define low_bits(x, mask) ((x) & (mask))
+#define high_bits(x, mask) ((x) & ~(mask))
+ bool wrapped = val < low_bits(*accum, mask);
+
+ *accum = high_bits(*accum, mask) + val;
+ if (wrapped)
+ *accum += (mask + 1);
+}
+
+/* The HW counters' width is much less than 64bits.
+ * Handle possible wrap-around while updating the stat counters
+ */
+static void bnxt_flow_stats_fix_wraparound(struct bnxt_tc_info *tc_info,
+ struct bnxt_tc_flow_stats *stats,
+ struct bnxt_tc_flow_stats *hw_stats)
+{
+ accumulate_val(&stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
+ accumulate_val(&stats->packets, hw_stats->packets,
+ tc_info->packets_mask);
+}
+
+/* Fix possible wraparound of the stats queried from HW, calculate
+ * the delta from prev_stats, and also update the prev_stats.
+ * The HW flow stats are fetched under the hwrm_cmd_lock mutex.
+ * This routine is best called while under the mutex so that the
+ * stats processing happens atomically.
+ */
+static void bnxt_flow_stats_calc(struct bnxt_tc_info *tc_info,
+ struct bnxt_tc_flow *flow,
+ struct bnxt_tc_flow_stats *stats)
+{
+ struct bnxt_tc_flow_stats *acc_stats, *prev_stats;
+
+ acc_stats = &flow->stats;
+ bnxt_flow_stats_fix_wraparound(tc_info, acc_stats, stats);
+
+ prev_stats = &flow->prev_stats;
+ stats->bytes = acc_stats->bytes - prev_stats->bytes;
+ stats->packets = acc_stats->packets - prev_stats->packets;
+ *prev_stats = *acc_stats;
+}
+
+static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp,
+ __le16 flow_handle,
+ struct bnxt_tc_flow *flow,
+ struct bnxt_tc_flow_stats *stats)
+{
+ struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_flow_stats_input req = { 0 };
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
+ req.num_flows = cpu_to_le16(1);
+ req.flow_handle_0 = flow_handle;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ stats->packets = le64_to_cpu(resp->packet_0);
+ stats->bytes = le64_to_cpu(resp->byte_0);
+ bnxt_flow_stats_calc(&bp->tc_info, flow, stats);
+ } else {
+ netdev_info(bp->dev, "error rc=%d", rc);
+ }
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_tc_put_l2_node(struct bnxt *bp,
+ struct bnxt_tc_flow_node *flow_node)
+{
+ struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+ int rc;
+
+ /* remove flow_node from the L2 shared flow list */
+ list_del(&flow_node->l2_list_node);
+ if (--l2_node->refcount == 0) {
+ rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
+ tc_info->l2_ht_params);
+ if (rc)
+ netdev_err(bp->dev,
+ "Error: %s: rhashtable_remove_fast: %d",
+ __func__, rc);
+ kfree_rcu(l2_node, rcu);
+ }
+ return 0;
+}
+
+static struct bnxt_tc_l2_node *
+bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
+ struct rhashtable_params ht_params,
+ struct bnxt_tc_l2_key *l2_key)
+{
+ struct bnxt_tc_l2_node *l2_node;
+ int rc;
+
+ l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
+ if (!l2_node) {
+ l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
+ if (!l2_node) {
+ rc = -ENOMEM;
+ return NULL;
+ }
+
+ l2_node->key = *l2_key;
+ rc = rhashtable_insert_fast(l2_table, &l2_node->node,
+ ht_params);
+ if (rc) {
+ kfree(l2_node);
+ netdev_err(bp->dev,
+ "Error: %s: rhashtable_insert_fast: %d",
+ __func__, rc);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&l2_node->common_l2_flows);
+ }
+ return l2_node;
+}
+
+/* Get the ref_flow_handle for a flow by checking if there are any other
+ * flows that share the same L2 key as this flow.
+ */
+static int
+bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
+ struct bnxt_tc_flow_node *flow_node,
+ __le16 *ref_flow_handle)
+{
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_flow_node *ref_flow_node;
+ struct bnxt_tc_l2_node *l2_node;
+
+ l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
+ tc_info->l2_ht_params,
+ &flow->l2_key);
+ if (!l2_node)
+ return -1;
+
+ /* If any other flow is using this l2_node, use it's flow_handle
+ * as the ref_flow_handle
+ */
+ if (l2_node->refcount > 0) {
+ ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
+ struct bnxt_tc_flow_node,
+ l2_list_node);
+ *ref_flow_handle = ref_flow_node->flow_handle;
+ } else {
+ *ref_flow_handle = cpu_to_le16(0xffff);
+ }
+
+ /* Insert the l2_node into the flow_node so that subsequent flows
+ * with a matching l2 key can use the flow_handle of this flow
+ * as their ref_flow_handle
+ */
+ flow_node->l2_node = l2_node;
+ list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
+ l2_node->refcount++;
+ return 0;
+}
+
+/* After the flow parsing is done, this routine is used for checking
+ * if there are any aspects of the flow that prevent it from being
+ * offloaded.
+ */
+static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
+{
+ /* If L4 ports are specified then ip_proto must be TCP or UDP */
+ if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
+ (flow->l4_key.ip_proto != IPPROTO_TCP &&
+ flow->l4_key.ip_proto != IPPROTO_UDP)) {
+ netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
+ flow->l4_key.ip_proto);
+ return false;
+ }
+
+ return true;
+}
+
+static int __bnxt_tc_del_flow(struct bnxt *bp,
+ struct bnxt_tc_flow_node *flow_node)
+{
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+ int rc;
+
+ /* send HWRM cmd to free the flow-id */
+ bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
+
+ mutex_lock(&tc_info->lock);
+
+ /* release reference to l2 node */
+ bnxt_tc_put_l2_node(bp, flow_node);
+
+ mutex_unlock(&tc_info->lock);
+
+ rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
+ tc_info->flow_ht_params);
+ if (rc)
+ netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
+ __func__, rc);
+
+ kfree_rcu(flow_node, rcu);
+ return 0;
+}
+
+/* Add a new flow or replace an existing flow.
+ * Notes on locking:
+ * There are essentially two critical sections here.
+ * 1. while adding a new flow
+ * a) lookup l2-key
+ * b) issue HWRM cmd and get flow_handle
+ * c) link l2-key with flow
+ * 2. while deleting a flow
+ * a) unlinking l2-key from flow
+ * A lock is needed to protect these two critical sections.
+ *
+ * The hash-tables are already protected by the rhashtable API.
+ */
+static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
+ struct tc_cls_flower_offload *tc_flow_cmd)
+{
+ struct bnxt_tc_flow_node *new_node, *old_node;
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_flow *flow;
+ __le16 ref_flow_handle;
+ int rc;
+
+ /* allocate memory for the new flow and it's node */
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node) {
+ rc = -ENOMEM;
+ goto done;
+ }
+ new_node->cookie = tc_flow_cmd->cookie;
+ flow = &new_node->flow;
+
+ rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
+ if (rc)
+ goto free_node;
+ flow->src_fid = src_fid;
+
+ if (!bnxt_tc_can_offload(bp, flow)) {
+ rc = -ENOSPC;
+ goto free_node;
+ }
+
+ /* If a flow exists with the same cookie, delete it */
+ old_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (old_node)
+ __bnxt_tc_del_flow(bp, old_node);
+
+ /* Check if the L2 part of the flow has been offloaded already.
+ * If so, bump up it's refcnt and get it's reference handle.
+ */
+ mutex_lock(&tc_info->lock);
+ rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
+ if (rc)
+ goto unlock;
+
+ /* send HWRM cmd to alloc the flow */
+ rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
+ &new_node->flow_handle);
+ if (rc)
+ goto put_l2;
+
+ /* add new flow to flow-table */
+ rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
+ tc_info->flow_ht_params);
+ if (rc)
+ goto hwrm_flow_free;
+
+ mutex_unlock(&tc_info->lock);
+ return 0;
+
+hwrm_flow_free:
+ bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
+put_l2:
+ bnxt_tc_put_l2_node(bp, new_node);
+unlock:
+ mutex_unlock(&tc_info->lock);
+free_node:
+ kfree(new_node);
+done:
+ netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
+ __func__, tc_flow_cmd->cookie, rc);
+ return rc;
+}
+
+static int bnxt_tc_del_flow(struct bnxt *bp,
+ struct tc_cls_flower_offload *tc_flow_cmd)
+{
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_flow_node *flow_node;
+
+ flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (!flow_node) {
+ netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
+ tc_flow_cmd->cookie);
+ return -EINVAL;
+ }
+
+ return __bnxt_tc_del_flow(bp, flow_node);
+}
+
+static int bnxt_tc_get_flow_stats(struct bnxt *bp,
+ struct tc_cls_flower_offload *tc_flow_cmd)
+{
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+ struct bnxt_tc_flow_node *flow_node;
+ struct bnxt_tc_flow_stats stats;
+ int rc;
+
+ flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
+ &tc_flow_cmd->cookie,
+ tc_info->flow_ht_params);
+ if (!flow_node) {
+ netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
+ tc_flow_cmd->cookie);
+ return -1;
+ }
+
+ rc = bnxt_hwrm_cfa_flow_stats_get(bp, flow_node->flow_handle,
+ &flow_node->flow, &stats);
+ if (rc)
+ return rc;
+
+ tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, 0);
+ return 0;
+}
+
+int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ int rc = 0;
+
+ switch (cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
+ break;
+
+ case TC_CLSFLOWER_DESTROY:
+ rc = bnxt_tc_del_flow(bp, cls_flower);
+ break;
+
+ case TC_CLSFLOWER_STATS:
+ rc = bnxt_tc_get_flow_stats(bp, cls_flower);
+ break;
+ }
+ return rc;
+}
+
+static const struct rhashtable_params bnxt_tc_flow_ht_params = {
+ .head_offset = offsetof(struct bnxt_tc_flow_node, node),
+ .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
+ .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
+ .automatic_shrinking = true
+};
+
+static const struct rhashtable_params bnxt_tc_l2_ht_params = {
+ .head_offset = offsetof(struct bnxt_tc_l2_node, node),
+ .key_offset = offsetof(struct bnxt_tc_l2_node, key),
+ .key_len = BNXT_TC_L2_KEY_LEN,
+ .automatic_shrinking = true
+};
+
+/* convert counter width in bits to a mask */
+#define mask(width) ((u64)~0 >> (64 - (width)))
+
+int bnxt_init_tc(struct bnxt *bp)
+{
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+ int rc;
+
+ if (bp->hwrm_spec_code < 0x10800) {
+ netdev_warn(bp->dev,
+ "Firmware does not support TC flower offload.\n");
+ return -ENOTSUPP;
+ }
+ mutex_init(&tc_info->lock);
+
+ /* Counter widths are programmed by FW */
+ tc_info->bytes_mask = mask(36);
+ tc_info->packets_mask = mask(28);
+
+ tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
+ rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
+ if (rc)
+ return rc;
+
+ tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
+ rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
+ if (rc)
+ goto destroy_flow_table;
+
+ tc_info->enabled = true;
+ bp->dev->hw_features |= NETIF_F_HW_TC;
+ bp->dev->features |= NETIF_F_HW_TC;
+ return 0;
+
+destroy_flow_table:
+ rhashtable_destroy(&tc_info->flow_table);
+ return rc;
+}
+
+void bnxt_shutdown_tc(struct bnxt *bp)
+{
+ struct bnxt_tc_info *tc_info = &bp->tc_info;
+
+ if (!tc_info->enabled)
+ return;
+
+ rhashtable_destroy(&tc_info->flow_table);
+ rhashtable_destroy(&tc_info->l2_table);
+}
+
+#else
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
new file mode 100644
index 000000000000..6c4c1ed279ef
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h
@@ -0,0 +1,158 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_TC_H
+#define BNXT_TC_H
+
+#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
+
+/* Structs used for storing the filter/actions of the TC cmd.
+ */
+struct bnxt_tc_l2_key {
+ u8 dmac[ETH_ALEN];
+ u8 smac[ETH_ALEN];
+ __be16 inner_vlan_tpid;
+ __be16 inner_vlan_tci;
+ __be16 ether_type;
+ u8 num_vlans;
+};
+
+struct bnxt_tc_l3_key {
+ union {
+ struct {
+ struct in_addr daddr;
+ struct in_addr saddr;
+ } ipv4;
+ struct {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ } ipv6;
+ };
+};
+
+struct bnxt_tc_l4_key {
+ u8 ip_proto;
+ union {
+ struct {
+ __be16 sport;
+ __be16 dport;
+ } ports;
+ struct {
+ u8 type;
+ u8 code;
+ } icmp;
+ };
+};
+
+struct bnxt_tc_actions {
+ u32 flags;
+#define BNXT_TC_ACTION_FLAG_FWD BIT(0)
+#define BNXT_TC_ACTION_FLAG_FWD_VXLAN BIT(1)
+#define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3)
+#define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4)
+#define BNXT_TC_ACTION_FLAG_DROP BIT(5)
+
+ u16 dst_fid;
+ struct net_device *dst_dev;
+ __be16 push_vlan_tpid;
+ __be16 push_vlan_tci;
+};
+
+struct bnxt_tc_flow_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+struct bnxt_tc_flow {
+ u32 flags;
+#define BNXT_TC_FLOW_FLAGS_ETH_ADDRS BIT(1)
+#define BNXT_TC_FLOW_FLAGS_IPV4_ADDRS BIT(2)
+#define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3)
+#define BNXT_TC_FLOW_FLAGS_PORTS BIT(4)
+#define BNXT_TC_FLOW_FLAGS_ICMP BIT(5)
+
+ /* flow applicable to pkts ingressing on this fid */
+ u16 src_fid;
+ struct bnxt_tc_l2_key l2_key;
+ struct bnxt_tc_l2_key l2_mask;
+ struct bnxt_tc_l3_key l3_key;
+ struct bnxt_tc_l3_key l3_mask;
+ struct bnxt_tc_l4_key l4_key;
+ struct bnxt_tc_l4_key l4_mask;
+
+ struct bnxt_tc_actions actions;
+
+ /* updated stats accounting for hw-counter wrap-around */
+ struct bnxt_tc_flow_stats stats;
+ /* previous snap-shot of stats */
+ struct bnxt_tc_flow_stats prev_stats;
+ unsigned long lastused; /* jiffies */
+};
+
+/* L2 hash table
+ * This data-struct is used for L2-flow table.
+ * The L2 part of a flow is stored in a hash table.
+ * A flow that shares the same L2 key/mask with an
+ * already existing flow must refer to it's flow handle.
+ */
+struct bnxt_tc_l2_node {
+ /* hash key: first 16b of key */
+#define BNXT_TC_L2_KEY_LEN 16
+ struct bnxt_tc_l2_key key;
+ struct rhash_head node;
+
+ /* a linked list of flows that share the same l2 key */
+ struct list_head common_l2_flows;
+
+ /* number of flows sharing the l2 key */
+ u16 refcount;
+
+ struct rcu_head rcu;
+};
+
+struct bnxt_tc_flow_node {
+ /* hash key: provided by TC */
+ unsigned long cookie;
+ struct rhash_head node;
+
+ struct bnxt_tc_flow flow;
+
+ __le16 flow_handle;
+
+ /* L2 node in l2 hashtable that shares flow's l2 key */
+ struct bnxt_tc_l2_node *l2_node;
+ /* for the shared_flows list maintained in l2_node */
+ struct list_head l2_list_node;
+
+ struct rcu_head rcu;
+};
+
+int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
+ struct tc_cls_flower_offload *cls_flower);
+int bnxt_init_tc(struct bnxt *bp);
+void bnxt_shutdown_tc(struct bnxt *bp);
+
+#else /* CONFIG_BNXT_FLOWER_OFFLOAD */
+
+static inline int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bnxt_init_tc(struct bnxt *bp)
+{
+ return 0;
+}
+
+static inline void bnxt_shutdown_tc(struct bnxt *bp)
+{
+}
+#endif /* CONFIG_BNXT_FLOWER_OFFLOAD */
+#endif /* BNXT_TC_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 77da75a55c02..997e10e8b863 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
+ if (ulp->msix_requested)
+ edev->en_ops->bnxt_free_msix(edev, ulp_id);
}
if (ulp->max_async_event_id)
bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
new file mode 100644
index 000000000000..e75db04c6cdc
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -0,0 +1,513 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/jhash.h>
+#include <net/pkt_cls.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_vfr.h"
+#include "bnxt_tc.h"
+
+#ifdef CONFIG_BNXT_SRIOV
+
+#define CFA_HANDLE_INVALID 0xffff
+#define VF_IDX_INVALID 0xffff
+
+static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
+ u16 *tx_cfa_action, u16 *rx_cfa_code)
+{
+ struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_cfa_vfr_alloc_input req = { 0 };
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1);
+ req.vf_id = cpu_to_le16(vf_idx);
+ sprintf(req.vfr_name, "vfr%d", vf_idx);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
+ *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
+ netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
+ *tx_cfa_action, *rx_cfa_code);
+ } else {
+ netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
+ }
+
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
+{
+ struct hwrm_cfa_vfr_free_input req = { 0 };
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1);
+ sprintf(req.vfr_name, "vfr%d", vf_idx);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
+ return rc;
+}
+
+static int bnxt_vf_rep_open(struct net_device *dev)
+{
+ struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+ struct bnxt *bp = vf_rep->bp;
+
+ /* Enable link and TX only if the parent PF is open. */
+ if (netif_running(bp->dev)) {
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+ }
+ return 0;
+}
+
+static int bnxt_vf_rep_close(struct net_device *dev)
+{
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+
+ return 0;
+}
+
+static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+ int rc, len = skb->len;
+
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *)vf_rep->dst);
+ skb_dst_set(skb, (struct dst_entry *)vf_rep->dst);
+ skb->dev = vf_rep->dst->u.port_info.lower_dev;
+
+ rc = dev_queue_xmit(skb);
+ if (!rc) {
+ vf_rep->tx_stats.packets++;
+ vf_rep->tx_stats.bytes += len;
+ }
+ return rc;
+}
+
+static void
+bnxt_vf_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+
+ stats->rx_packets = vf_rep->rx_stats.packets;
+ stats->rx_bytes = vf_rep->rx_stats.bytes;
+ stats->tx_packets = vf_rep->tx_stats.packets;
+ stats->tx_bytes = vf_rep->tx_stats.bytes;
+}
+
+static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+ struct bnxt *bp = vf_rep->bp;
+ int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return bnxt_tc_setup_flower(bp, vf_fid, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
+{
+ u16 vf_idx;
+
+ if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) {
+ vf_idx = bp->cfa_code_map[cfa_code];
+ if (vf_idx != VF_IDX_INVALID)
+ return bp->vf_reps[vf_idx]->dev;
+ }
+ return NULL;
+}
+
+void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev);
+ struct bnxt_vf_rep_stats *rx_stats;
+
+ rx_stats = &vf_rep->rx_stats;
+ vf_rep->rx_stats.bytes += skb->len;
+ vf_rep->rx_stats.packets++;
+
+ netif_receive_skb(skb);
+}
+
+static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
+ size_t len)
+{
+ struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+ struct pci_dev *pf_pdev = vf_rep->bp->pdev;
+ int rc;
+
+ rc = snprintf(buf, len, "pf%dvf%d", PCI_FUNC(pf_pdev->devfn),
+ vf_rep->vf_idx);
+ if (rc >= len)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+}
+
+static int bnxt_vf_rep_port_attr_get(struct net_device *dev,
+ struct switchdev_attr *attr)
+{
+ struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+
+ /* as only PORT_PARENT_ID is supported currently use common code
+ * between PF and VF-rep for now.
+ */
+ return bnxt_port_attr_get(vf_rep->bp, attr);
+}
+
+static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = {
+ .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get
+};
+
+static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = {
+ .get_drvinfo = bnxt_vf_rep_get_drvinfo
+};
+
+static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
+ .ndo_open = bnxt_vf_rep_open,
+ .ndo_stop = bnxt_vf_rep_close,
+ .ndo_start_xmit = bnxt_vf_rep_xmit,
+ .ndo_get_stats64 = bnxt_vf_rep_get_stats64,
+ .ndo_setup_tc = bnxt_vf_rep_setup_tc,
+ .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
+};
+
+/* Called when the parent PF interface is closed:
+ * As the mode transition from SWITCHDEV to LEGACY
+ * happens under the rtnl_lock() this routine is safe
+ * under the rtnl_lock()
+ */
+void bnxt_vf_reps_close(struct bnxt *bp)
+{
+ struct bnxt_vf_rep *vf_rep;
+ u16 num_vfs, i;
+
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return;
+
+ num_vfs = pci_num_vf(bp->pdev);
+ for (i = 0; i < num_vfs; i++) {
+ vf_rep = bp->vf_reps[i];
+ if (netif_running(vf_rep->dev))
+ bnxt_vf_rep_close(vf_rep->dev);
+ }
+}
+
+/* Called when the parent PF interface is opened (re-opened):
+ * As the mode transition from SWITCHDEV to LEGACY
+ * happen under the rtnl_lock() this routine is safe
+ * under the rtnl_lock()
+ */
+void bnxt_vf_reps_open(struct bnxt *bp)
+{
+ int i;
+
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return;
+
+ for (i = 0; i < pci_num_vf(bp->pdev); i++)
+ bnxt_vf_rep_open(bp->vf_reps[i]->dev);
+}
+
+static void __bnxt_vf_reps_destroy(struct bnxt *bp)
+{
+ u16 num_vfs = pci_num_vf(bp->pdev);
+ struct bnxt_vf_rep *vf_rep;
+ int i;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf_rep = bp->vf_reps[i];
+ if (vf_rep) {
+ dst_release((struct dst_entry *)vf_rep->dst);
+
+ if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID)
+ hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
+
+ if (vf_rep->dev) {
+ /* if register_netdev failed, then netdev_ops
+ * would have been set to NULL
+ */
+ if (vf_rep->dev->netdev_ops)
+ unregister_netdev(vf_rep->dev);
+ free_netdev(vf_rep->dev);
+ }
+ }
+ }
+
+ kfree(bp->vf_reps);
+ bp->vf_reps = NULL;
+}
+
+void bnxt_vf_reps_destroy(struct bnxt *bp)
+{
+ bool closed = false;
+
+ if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return;
+
+ if (!bp->vf_reps)
+ return;
+
+ /* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
+ * before proceeding with VF-rep cleanup.
+ */
+ rtnl_lock();
+ if (netif_running(bp->dev)) {
+ bnxt_close_nic(bp, false, false);
+ closed = true;
+ }
+ /* un-publish cfa_code_map so that RX path can't see it anymore */
+ kfree(bp->cfa_code_map);
+ bp->cfa_code_map = NULL;
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+
+ if (closed)
+ bnxt_open_nic(bp, false, false);
+ rtnl_unlock();
+
+ /* Need to call vf_reps_destroy() outside of rntl_lock
+ * as unregister_netdev takes rtnl_lock
+ */
+ __bnxt_vf_reps_destroy(bp);
+}
+
+/* Use the OUI of the PF's perm addr and report the same mac addr
+ * for the same VF-rep each time
+ */
+static void bnxt_vf_rep_eth_addr_gen(u8 *src_mac, u16 vf_idx, u8 *mac)
+{
+ u32 addr;
+
+ ether_addr_copy(mac, src_mac);
+
+ addr = jhash(src_mac, ETH_ALEN, 0) + vf_idx;
+ mac[3] = (u8)(addr & 0xFF);
+ mac[4] = (u8)((addr >> 8) & 0xFF);
+ mac[5] = (u8)((addr >> 16) & 0xFF);
+}
+
+static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
+ struct net_device *dev)
+{
+ struct net_device *pf_dev = bp->dev;
+
+ dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
+ dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
+ SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops);
+ /* Just inherit all the featues of the parent PF as the VF-R
+ * uses the RX/TX rings of the parent PF
+ */
+ dev->hw_features = pf_dev->hw_features;
+ dev->gso_partial_features = pf_dev->gso_partial_features;
+ dev->vlan_features = pf_dev->vlan_features;
+ dev->hw_enc_features = pf_dev->hw_enc_features;
+ dev->features |= pf_dev->features;
+ bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
+ dev->perm_addr);
+ ether_addr_copy(dev->dev_addr, dev->perm_addr);
+}
+
+static int bnxt_vf_reps_create(struct bnxt *bp)
+{
+ u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
+ struct bnxt_vf_rep *vf_rep;
+ struct net_device *dev;
+ int rc, i;
+
+ bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
+ if (!bp->vf_reps)
+ return -ENOMEM;
+
+ /* storage for cfa_code to vf-idx mapping */
+ cfa_code_map = kmalloc(sizeof(*bp->cfa_code_map) * MAX_CFA_CODE,
+ GFP_KERNEL);
+ if (!cfa_code_map) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ for (i = 0; i < MAX_CFA_CODE; i++)
+ cfa_code_map[i] = VF_IDX_INVALID;
+
+ for (i = 0; i < num_vfs; i++) {
+ dev = alloc_etherdev(sizeof(*vf_rep));
+ if (!dev) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ vf_rep = netdev_priv(dev);
+ bp->vf_reps[i] = vf_rep;
+ vf_rep->dev = dev;
+ vf_rep->bp = bp;
+ vf_rep->vf_idx = i;
+ vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
+
+ /* get cfa handles from FW */
+ rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx,
+ &vf_rep->tx_cfa_action,
+ &vf_rep->rx_cfa_code);
+ if (rc) {
+ rc = -ENOLINK;
+ goto err;
+ }
+ cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
+
+ vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!vf_rep->dst) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ /* only cfa_action is needed to mux a packet while TXing */
+ vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
+ vf_rep->dst->u.port_info.lower_dev = bp->dev;
+
+ bnxt_vf_rep_netdev_init(bp, vf_rep, dev);
+ rc = register_netdev(dev);
+ if (rc) {
+ /* no need for unregister_netdev in cleanup */
+ dev->netdev_ops = NULL;
+ goto err;
+ }
+ }
+
+ /* publish cfa_code_map only after all VF-reps have been initialized */
+ bp->cfa_code_map = cfa_code_map;
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ netif_keep_dst(bp->dev);
+ return 0;
+
+err:
+ netdev_info(bp->dev, "%s error=%d", __func__, rc);
+ kfree(cfa_code_map);
+ __bnxt_vf_reps_destroy(bp);
+ return rc;
+}
+
+/* Devlink related routines */
+static int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
+
+ *mode = bp->eswitch_mode;
+ return 0;
+}
+
+static int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
+ int rc = 0;
+
+ mutex_lock(&bp->sriov_lock);
+ if (bp->eswitch_mode == mode) {
+ netdev_info(bp->dev, "already in %s eswitch mode",
+ mode == DEVLINK_ESWITCH_MODE_LEGACY ?
+ "legacy" : "switchdev");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ bnxt_vf_reps_destroy(bp);
+ break;
+
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ if (pci_num_vf(bp->pdev) == 0) {
+ netdev_info(bp->dev,
+ "Enable VFs before setting switchdev mode");
+ rc = -EPERM;
+ goto done;
+ }
+ rc = bnxt_vf_reps_create(bp);
+ break;
+
+ default:
+ rc = -EINVAL;
+ goto done;
+ }
+done:
+ mutex_unlock(&bp->sriov_lock);
+ return rc;
+}
+
+static const struct devlink_ops bnxt_dl_ops = {
+ .eswitch_mode_set = bnxt_dl_eswitch_mode_set,
+ .eswitch_mode_get = bnxt_dl_eswitch_mode_get
+};
+
+int bnxt_dl_register(struct bnxt *bp)
+{
+ struct devlink *dl;
+ int rc;
+
+ if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+ return 0;
+
+ if (bp->hwrm_spec_code < 0x10800) {
+ netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n");
+ return -ENOTSUPP;
+ }
+
+ dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
+ if (!dl) {
+ netdev_warn(bp->dev, "devlink_alloc failed");
+ return -ENOMEM;
+ }
+
+ bnxt_link_bp_to_dl(bp, dl);
+ bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ rc = devlink_register(dl, &bp->pdev->dev);
+ if (rc) {
+ bnxt_link_bp_to_dl(bp, NULL);
+ devlink_free(dl);
+ netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+void bnxt_dl_unregister(struct bnxt *bp)
+{
+ struct devlink *dl = bp->dl;
+
+ if (!dl)
+ return;
+
+ devlink_unregister(dl);
+ devlink_free(dl);
+}
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
new file mode 100644
index 000000000000..7787cd24606a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h
@@ -0,0 +1,89 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016-2017 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_VFR_H
+#define BNXT_VFR_H
+
+#ifdef CONFIG_BNXT_SRIOV
+
+#define MAX_CFA_CODE 65536
+
+/* Struct to hold housekeeping info needed by devlink interface */
+struct bnxt_dl {
+ struct bnxt *bp; /* back ptr to the controlling dev */
+};
+
+static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
+{
+ return ((struct bnxt_dl *)devlink_priv(dl))->bp;
+}
+
+/* To clear devlink pointer from bp, pass NULL dl */
+static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
+{
+ bp->dl = dl;
+
+ /* add a back pointer in dl to bp */
+ if (dl) {
+ struct bnxt_dl *bp_dl = devlink_priv(dl);
+
+ bp_dl->bp = bp;
+ }
+}
+
+int bnxt_dl_register(struct bnxt *bp);
+void bnxt_dl_unregister(struct bnxt *bp);
+void bnxt_vf_reps_destroy(struct bnxt *bp);
+void bnxt_vf_reps_close(struct bnxt *bp);
+void bnxt_vf_reps_open(struct bnxt *bp);
+void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb);
+struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code);
+
+static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
+{
+ struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+ struct bnxt *bp = vf_rep->bp;
+
+ return bp->pf.vf[vf_rep->vf_idx].fw_fid;
+}
+
+#else
+
+static inline int bnxt_dl_register(struct bnxt *bp)
+{
+ return 0;
+}
+
+static inline void bnxt_dl_unregister(struct bnxt *bp)
+{
+}
+
+static inline void bnxt_vf_reps_close(struct bnxt *bp)
+{
+}
+
+static inline void bnxt_vf_reps_open(struct bnxt *bp)
+{
+}
+
+static inline void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
+{
+}
+
+static inline struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
+{
+ return NULL;
+}
+
+static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_BNXT_SRIOV */
+#endif /* BNXT_VFR_H */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 3961a6807454..d8f0c837b72c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -169,8 +169,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
tc = netdev_get_num_tc(dev);
if (!tc)
tc = 1;
- rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
- true, tc, tx_xdp);
+ rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
+ true, tc, tx_xdp);
if (rc) {
netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
return rc;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a981c4ee9d72..9cebca896913 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -72,23 +72,42 @@
#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
TOTAL_DESC * DMA_DESC_SIZE)
+static inline void bcmgenet_writel(u32 value, void __iomem *offset)
+{
+ /* MIPS chips strapped for BE will automagically configure the
+ * peripheral registers for CPU-native byte order.
+ */
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ __raw_writel(value, offset);
+ else
+ writel_relaxed(value, offset);
+}
+
+static inline u32 bcmgenet_readl(void __iomem *offset)
+{
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ return __raw_readl(offset);
+ else
+ return readl_relaxed(offset);
+}
+
static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
void __iomem *d, u32 value)
{
- __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
+ bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
}
static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
void __iomem *d)
{
- return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
+ return bcmgenet_readl(d + DMA_DESC_LENGTH_STATUS);
}
static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
void __iomem *d,
dma_addr_t addr)
{
- __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
+ bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
/* Register writes to GISB bus can take couple hundred nanoseconds
* and are done for each packet, save these expensive writes unless
@@ -96,7 +115,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (priv->hw_params->flags & GENET_HAS_40BITS)
- __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
+ bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
#endif
}
@@ -113,7 +132,7 @@ static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
{
dma_addr_t addr;
- addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
+ addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO);
/* Register writes to GISB bus can take couple hundred nanoseconds
* and are done for each packet, save these expensive writes unless
@@ -121,7 +140,7 @@ static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
*/
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (priv->hw_params->flags & GENET_HAS_40BITS)
- addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
+ addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32;
#endif
return addr;
}
@@ -156,8 +175,8 @@ static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
if (GENET_IS_V1(priv))
return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
else
- return __raw_readl(priv->base +
- priv->hw_params->tbuf_offset + TBUF_CTRL);
+ return bcmgenet_readl(priv->base +
+ priv->hw_params->tbuf_offset + TBUF_CTRL);
}
static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
@@ -165,7 +184,7 @@ static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
if (GENET_IS_V1(priv))
bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
else
- __raw_writel(val, priv->base +
+ bcmgenet_writel(val, priv->base +
priv->hw_params->tbuf_offset + TBUF_CTRL);
}
@@ -174,8 +193,8 @@ static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
if (GENET_IS_V1(priv))
return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
else
- return __raw_readl(priv->base +
- priv->hw_params->tbuf_offset + TBUF_BP_MC);
+ return bcmgenet_readl(priv->base +
+ priv->hw_params->tbuf_offset + TBUF_BP_MC);
}
static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
@@ -183,7 +202,7 @@ static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
if (GENET_IS_V1(priv))
bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
else
- __raw_writel(val, priv->base +
+ bcmgenet_writel(val, priv->base +
priv->hw_params->tbuf_offset + TBUF_BP_MC);
}
@@ -326,28 +345,28 @@ static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
enum dma_reg r)
{
- return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
- DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+ return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
}
static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
u32 val, enum dma_reg r)
{
- __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+ bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
}
static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
enum dma_reg r)
{
- return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
- DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+ return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
}
static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
u32 val, enum dma_reg r)
{
- __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+ bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
}
@@ -418,16 +437,16 @@ static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
unsigned int ring,
enum dma_ring_reg r)
{
- return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
- (DMA_RING_SIZE * ring) +
- genet_dma_ring_regs[r]);
+ return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
}
static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
unsigned int ring, u32 val,
enum dma_ring_reg r)
{
- __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+ bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
(DMA_RING_SIZE * ring) +
genet_dma_ring_regs[r]);
}
@@ -436,16 +455,16 @@ static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
unsigned int ring,
enum dma_ring_reg r)
{
- return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
- (DMA_RING_SIZE * ring) +
- genet_dma_ring_regs[r]);
+ return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
}
static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
unsigned int ring, u32 val,
enum dma_ring_reg r)
{
- __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+ bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
(DMA_RING_SIZE * ring) +
genet_dma_ring_regs[r]);
}
@@ -991,12 +1010,12 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
/* Enable EEE and switch to a 27Mhz clock automatically */
- reg = __raw_readl(priv->base + off);
+ reg = bcmgenet_readl(priv->base + off);
if (enable)
reg |= TBUF_EEE_EN | TBUF_PM_EN;
else
reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
- __raw_writel(reg, priv->base + off);
+ bcmgenet_writel(reg, priv->base + off);
/* Do the same for thing for RBUF */
reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
@@ -1360,7 +1379,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
if (skb) {
pkts_compl++;
bytes_compl += GENET_CB(skb)->bytes_sent;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
txbds_processed++;
@@ -1875,7 +1894,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
cb = ring->cbs + i;
skb = bcmgenet_rx_refill(priv, cb);
if (skb)
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
if (!cb->skb)
return -ENOMEM;
}
@@ -1894,7 +1913,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
if (skb)
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 3a34fdba5301..4c49d0b97748 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -657,6 +657,7 @@ struct bcmgenet_priv {
struct clk *clk;
struct platform_device *pdev;
+ struct platform_device *mii_pdev;
/* WOL */
struct clk *clk_wol;
@@ -671,12 +672,21 @@ struct bcmgenet_priv {
static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
u32 off) \
{ \
- return __raw_readl(priv->base + offset + off); \
+ /* MIPS chips strapped for BE will automagically configure the \
+ * peripheral registers for CPU-native byte order. \
+ */ \
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \
+ return __raw_readl(priv->base + offset + off); \
+ else \
+ return readl_relaxed(priv->base + offset + off); \
} \
static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \
u32 val, u32 off) \
{ \
- __raw_writel(val, priv->base + offset + off); \
+ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) \
+ __raw_writel(val, priv->base + offset + off); \
+ else \
+ writel_relaxed(val, priv->base + offset + off); \
}
GENET_IO_MACRO(ext, GENET_EXT_OFF);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 30cb97b4a1d7..18f5723be2c9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -24,62 +24,10 @@
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/platform_data/bcmgenet.h>
+#include <linux/platform_data/mdio-bcm-unimac.h>
#include "bcmgenet.h"
-/* read a value from the MII */
-static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
-{
- int ret;
- struct net_device *dev = bus->priv;
- struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 reg;
-
- bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
- (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
- /* Start MDIO transaction*/
- reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
- reg |= MDIO_START_BUSY;
- bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
- wait_event_timeout(priv->wq,
- !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
- & MDIO_START_BUSY),
- HZ / 100);
- ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
-
- /* Some broken devices are known not to release the line during
- * turn-around, e.g: Broadcom BCM53125 external switches, so check for
- * that condition here and ignore the MDIO controller read failure
- * indication.
- */
- if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (ret & MDIO_READ_FAIL))
- return -EIO;
-
- return ret & 0xffff;
-}
-
-/* write a value to the MII */
-static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
- int location, u16 val)
-{
- struct net_device *dev = bus->priv;
- struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 reg;
-
- bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
- (location << MDIO_REG_SHIFT) | (0xffff & val)),
- UMAC_MDIO_CMD);
- reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
- reg |= MDIO_START_BUSY;
- bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
- wait_event_timeout(priv->wq,
- !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
- MDIO_START_BUSY),
- HZ / 100);
-
- return 0;
-}
-
/* setup netdev link state when PHY link status change and
* update UMAC and RGMII block when link up
*/
@@ -393,104 +341,121 @@ int bcmgenet_mii_probe(struct net_device *dev)
return 0;
}
-/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with
- * their internal MDIO management controller making them fail to successfully
- * be read from or written to for the first transaction. We insert a dummy
- * BMSR read here to make sure that phy_get_device() and get_phy_id() can
- * correctly read the PHY MII_PHYSID1/2 registers and successfully register a
- * PHY device for this peripheral.
- *
- * Once the PHY driver is registered, we can workaround subsequent reads from
- * there (e.g: during system-wide power management).
- *
- * bus->reset is invoked before mdiobus_scan during mdiobus_register and is
- * therefore the right location to stick that workaround. Since we do not want
- * to read from non-existing PHYs, we either use bus->phy_mask or do a manual
- * Device Tree scan to limit the search area.
- */
-static int bcmgenet_mii_bus_reset(struct mii_bus *bus)
+static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv)
{
- struct net_device *dev = bus->priv;
- struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device_node *np = priv->mdio_dn;
- struct device_node *child = NULL;
- u32 read_mask = 0;
- int addr = 0;
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct device *kdev = &priv->pdev->dev;
+ char *compat;
- if (!np) {
- read_mask = 1 << priv->phy_addr;
- } else {
- for_each_available_child_of_node(np, child) {
- addr = of_mdio_parse_addr(&dev->dev, child);
- if (addr < 0)
- continue;
+ compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
+ if (!compat)
+ return NULL;
- read_mask |= 1 << addr;
- }
+ priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ kfree(compat);
+ if (!priv->mdio_dn) {
+ dev_err(kdev, "unable to find MDIO bus node\n");
+ return NULL;
}
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- if (read_mask & 1 << addr) {
- dev_dbg(&dev->dev, "Workaround for PHY @ %d\n", addr);
- mdiobus_read(bus, addr, MII_BMSR);
- }
+ return priv->mdio_dn;
+}
+
+static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv,
+ struct unimac_mdio_pdata *ppd)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct bcmgenet_platform_data *pd = kdev->platform_data;
+
+ if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
+ /*
+ * Internal or external PHY with MDIO access
+ */
+ if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
+ ppd->phy_mask = 1 << pd->phy_address;
+ else
+ ppd->phy_mask = 0;
}
+}
+static int bcmgenet_mii_wait(void *wait_func_data)
+{
+ struct bcmgenet_priv *priv = wait_func_data;
+
+ wait_event_timeout(priv->wq,
+ !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
+ & MDIO_START_BUSY),
+ HZ / 100);
return 0;
}
-static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
+static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
{
- struct mii_bus *bus;
+ struct platform_device *pdev = priv->pdev;
+ struct bcmgenet_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *dn = pdev->dev.of_node;
+ struct unimac_mdio_pdata ppd;
+ struct platform_device *ppdev;
+ struct resource *pres, res;
+ int id, ret;
+
+ pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ memset(&res, 0, sizeof(res));
+ memset(&ppd, 0, sizeof(ppd));
+
+ ppd.wait_func = bcmgenet_mii_wait;
+ ppd.wait_func_data = priv;
+ ppd.bus_name = "bcmgenet MII bus";
+
+ /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD
+ * and is 2 * 32-bits word long, 8 bytes total.
+ */
+ res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD;
+ res.end = res.start + 8;
+ res.flags = IORESOURCE_MEM;
- if (priv->mii_bus)
- return 0;
+ if (dn)
+ id = of_alias_get_id(dn, "eth");
+ else
+ id = pdev->id;
- priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus) {
- pr_err("failed to allocate\n");
+ ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id);
+ if (!ppdev)
return -ENOMEM;
- }
- bus = priv->mii_bus;
- bus->priv = priv->dev;
- bus->name = "bcmgenet MII bus";
- bus->parent = &priv->pdev->dev;
- bus->read = bcmgenet_mii_read;
- bus->write = bcmgenet_mii_write;
- bus->reset = bcmgenet_mii_bus_reset;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
- priv->pdev->name, priv->pdev->id);
+ /* Retain this platform_device pointer for later cleanup */
+ priv->mii_pdev = ppdev;
+ ppdev->dev.parent = &pdev->dev;
+ ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv);
+ if (pdata)
+ bcmgenet_mii_pdata_init(priv, &ppd);
+
+ ret = platform_device_add_resources(ppdev, &res, 1);
+ if (ret)
+ goto out;
+
+ ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd));
+ if (ret)
+ goto out;
+
+ ret = platform_device_add(ppdev);
+ if (ret)
+ goto out;
return 0;
+out:
+ platform_device_put(ppdev);
+ return ret;
}
static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
{
struct device_node *dn = priv->pdev->dev.of_node;
struct device *kdev = &priv->pdev->dev;
- struct phy_device *phydev = NULL;
- char *compat;
+ struct phy_device *phydev;
int phy_mode;
int ret;
- compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
- if (!compat)
- return -ENOMEM;
-
- priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
- kfree(compat);
- if (!priv->mdio_dn) {
- dev_err(kdev, "unable to find MDIO bus node\n");
- return -ENODEV;
- }
-
- ret = of_mdiobus_register(priv->mii_bus, priv->mdio_dn);
- if (ret) {
- dev_err(kdev, "failed to register MDIO bus\n");
- return ret;
- }
-
/* Fetch the PHY phandle */
priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
@@ -537,33 +502,23 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
{
struct device *kdev = &priv->pdev->dev;
struct bcmgenet_platform_data *pd = kdev->platform_data;
- struct mii_bus *mdio = priv->mii_bus;
+ char phy_name[MII_BUS_ID_SIZE + 3];
+ char mdio_bus_id[MII_BUS_ID_SIZE];
struct phy_device *phydev;
- int ret;
+
+ snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d",
+ UNIMAC_MDIO_DRV_NAME, priv->pdev->id);
if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) {
+ snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
+ mdio_bus_id, pd->phy_address);
+
/*
* Internal or external PHY with MDIO access
*/
- if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
- mdio->phy_mask = ~(1 << pd->phy_address);
- else
- mdio->phy_mask = 0;
-
- ret = mdiobus_register(mdio);
- if (ret) {
- dev_err(kdev, "failed to register MDIO bus\n");
- return ret;
- }
-
- if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR)
- phydev = mdiobus_get_phy(mdio, pd->phy_address);
- else
- phydev = phy_find_first(mdio);
-
+ phydev = phy_attach(priv->dev, phy_name, pd->phy_interface);
if (!phydev) {
dev_err(kdev, "failed to register PHY device\n");
- mdiobus_unregister(mdio);
return -ENODEV;
}
} else {
@@ -609,10 +564,9 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv)
int bcmgenet_mii_init(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
- struct device_node *dn = priv->pdev->dev.of_node;
int ret;
- ret = bcmgenet_mii_alloc(priv);
+ ret = bcmgenet_mii_register(priv);
if (ret)
return ret;
@@ -623,11 +577,7 @@ int bcmgenet_mii_init(struct net_device *dev)
return 0;
out:
- if (of_phy_is_fixed_link(dn))
- of_phy_deregister_fixed_link(dn);
- of_node_put(priv->phy_dn);
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
+ bcmgenet_mii_exit(dev);
return ret;
}
@@ -639,6 +589,6 @@ void bcmgenet_mii_exit(struct net_device *dev)
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
of_node_put(priv->phy_dn);
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
+ platform_device_unregister(priv->mii_pdev);
+ platform_device_put(priv->mii_pdev);
}
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 16a0f192daec..ecdef42f0ae6 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -1367,15 +1367,11 @@ static int sbmac_initctx(struct sbmac_softc *s)
static void sbdma_uninitctx(struct sbmacdma *d)
{
- if (d->sbdma_dscrtable_unaligned) {
- kfree(d->sbdma_dscrtable_unaligned);
- d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
- }
+ kfree(d->sbdma_dscrtable_unaligned);
+ d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
- if (d->sbdma_ctxtable) {
- kfree(d->sbdma_ctxtable);
- d->sbdma_ctxtable = NULL;
- }
+ kfree(d->sbdma_ctxtable);
+ d->sbdma_ctxtable = NULL;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d600c41fb1dc..af33dc15c55f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6587,7 +6587,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
pkts_compl++;
bytes_compl += skb->len;
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
if (unlikely(tx_bug)) {
tg3_tx_recover(tp);
@@ -7829,7 +7829,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
}
}
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
*pskb = new_skb;
return ret;
}
@@ -7882,7 +7882,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
} while (segs);
tg3_tso_bug_end:
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -8543,7 +8543,7 @@ static void tg3_free_rings(struct tg3 *tp)
tg3_tx_skb_unmap(tnapi, i,
skb_shinfo(skb)->nr_frags - 1);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
}
netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
}